Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*  SuperH Ethernet device driver
   3 *
   4 *  Copyright (C) 2014 Renesas Electronics Corporation
   5 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
   6 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
   7 *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
   8 *  Copyright (C) 2014 Codethink Limited
 
 
 
 
 
 
 
 
 
 
 
 
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/spinlock.h>
  14#include <linux/interrupt.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/etherdevice.h>
  17#include <linux/delay.h>
  18#include <linux/platform_device.h>
  19#include <linux/mdio-bitbang.h>
  20#include <linux/netdevice.h>
  21#include <linux/of.h>
  22#include <linux/of_device.h>
  23#include <linux/of_irq.h>
  24#include <linux/of_net.h>
  25#include <linux/phy.h>
  26#include <linux/cache.h>
  27#include <linux/io.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/slab.h>
  30#include <linux/ethtool.h>
  31#include <linux/if_vlan.h>
  32#include <linux/sh_eth.h>
  33#include <linux/of_mdio.h>
  34
  35#include "sh_eth.h"
  36
  37#define SH_ETH_DEF_MSG_ENABLE \
  38		(NETIF_MSG_LINK	| \
  39		NETIF_MSG_TIMER	| \
  40		NETIF_MSG_RX_ERR| \
  41		NETIF_MSG_TX_ERR)
  42
  43#define SH_ETH_OFFSET_INVALID	((u16)~0)
  44
  45#define SH_ETH_OFFSET_DEFAULTS			\
  46	[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
  47
  48/* use some intentionally tricky logic here to initialize the whole struct to
  49 * 0xffff, but then override certain fields, requiring us to indicate that we
  50 * "know" that there are overrides in this structure, and we'll need to disable
  51 * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
  52 * the macros available to do this only define GCC 8.
  53 */
  54__diag_push();
  55__diag_ignore(GCC, 8, "-Woverride-init",
  56	      "logic to initialize all and then override some is OK");
  57static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
  58	SH_ETH_OFFSET_DEFAULTS,
  59
  60	[EDSR]		= 0x0000,
  61	[EDMR]		= 0x0400,
  62	[EDTRR]		= 0x0408,
  63	[EDRRR]		= 0x0410,
  64	[EESR]		= 0x0428,
  65	[EESIPR]	= 0x0430,
  66	[TDLAR]		= 0x0010,
  67	[TDFAR]		= 0x0014,
  68	[TDFXR]		= 0x0018,
  69	[TDFFR]		= 0x001c,
  70	[RDLAR]		= 0x0030,
  71	[RDFAR]		= 0x0034,
  72	[RDFXR]		= 0x0038,
  73	[RDFFR]		= 0x003c,
  74	[TRSCER]	= 0x0438,
  75	[RMFCR]		= 0x0440,
  76	[TFTR]		= 0x0448,
  77	[FDR]		= 0x0450,
  78	[RMCR]		= 0x0458,
  79	[RPADIR]	= 0x0460,
  80	[FCFTR]		= 0x0468,
  81	[CSMR]		= 0x04E4,
  82
  83	[ECMR]		= 0x0500,
  84	[ECSR]		= 0x0510,
  85	[ECSIPR]	= 0x0518,
  86	[PIR]		= 0x0520,
  87	[PSR]		= 0x0528,
  88	[PIPR]		= 0x052c,
  89	[RFLR]		= 0x0508,
  90	[APR]		= 0x0554,
  91	[MPR]		= 0x0558,
  92	[PFTCR]		= 0x055c,
  93	[PFRCR]		= 0x0560,
  94	[TPAUSER]	= 0x0564,
  95	[GECMR]		= 0x05b0,
  96	[BCULR]		= 0x05b4,
  97	[MAHR]		= 0x05c0,
  98	[MALR]		= 0x05c8,
  99	[TROCR]		= 0x0700,
 100	[CDCR]		= 0x0708,
 101	[LCCR]		= 0x0710,
 102	[CEFCR]		= 0x0740,
 103	[FRECR]		= 0x0748,
 104	[TSFRCR]	= 0x0750,
 105	[TLFRCR]	= 0x0758,
 106	[RFCR]		= 0x0760,
 107	[CERCR]		= 0x0768,
 108	[CEECR]		= 0x0770,
 109	[MAFCR]		= 0x0778,
 110	[RMII_MII]	= 0x0790,
 111
 112	[ARSTR]		= 0x0000,
 113	[TSU_CTRST]	= 0x0004,
 114	[TSU_FWEN0]	= 0x0010,
 115	[TSU_FWEN1]	= 0x0014,
 116	[TSU_FCM]	= 0x0018,
 117	[TSU_BSYSL0]	= 0x0020,
 118	[TSU_BSYSL1]	= 0x0024,
 119	[TSU_PRISL0]	= 0x0028,
 120	[TSU_PRISL1]	= 0x002c,
 121	[TSU_FWSL0]	= 0x0030,
 122	[TSU_FWSL1]	= 0x0034,
 123	[TSU_FWSLC]	= 0x0038,
 124	[TSU_QTAGM0]	= 0x0040,
 125	[TSU_QTAGM1]	= 0x0044,
 126	[TSU_FWSR]	= 0x0050,
 127	[TSU_FWINMK]	= 0x0054,
 128	[TSU_ADQT0]	= 0x0048,
 129	[TSU_ADQT1]	= 0x004c,
 130	[TSU_VTAG0]	= 0x0058,
 131	[TSU_VTAG1]	= 0x005c,
 132	[TSU_ADSBSY]	= 0x0060,
 133	[TSU_TEN]	= 0x0064,
 134	[TSU_POST1]	= 0x0070,
 135	[TSU_POST2]	= 0x0074,
 136	[TSU_POST3]	= 0x0078,
 137	[TSU_POST4]	= 0x007c,
 138	[TSU_ADRH0]	= 0x0100,
 139
 140	[TXNLCR0]	= 0x0080,
 141	[TXALCR0]	= 0x0084,
 142	[RXNLCR0]	= 0x0088,
 143	[RXALCR0]	= 0x008c,
 144	[FWNLCR0]	= 0x0090,
 145	[FWALCR0]	= 0x0094,
 146	[TXNLCR1]	= 0x00a0,
 147	[TXALCR1]	= 0x00a4,
 148	[RXNLCR1]	= 0x00a8,
 149	[RXALCR1]	= 0x00ac,
 150	[FWNLCR1]	= 0x00b0,
 151	[FWALCR1]	= 0x00b4,
 152};
 153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 155	SH_ETH_OFFSET_DEFAULTS,
 156
 157	[ECMR]		= 0x0300,
 158	[RFLR]		= 0x0308,
 159	[ECSR]		= 0x0310,
 160	[ECSIPR]	= 0x0318,
 161	[PIR]		= 0x0320,
 162	[PSR]		= 0x0328,
 163	[RDMLR]		= 0x0340,
 164	[IPGR]		= 0x0350,
 165	[APR]		= 0x0354,
 166	[MPR]		= 0x0358,
 167	[RFCF]		= 0x0360,
 168	[TPAUSER]	= 0x0364,
 169	[TPAUSECR]	= 0x0368,
 170	[MAHR]		= 0x03c0,
 171	[MALR]		= 0x03c8,
 172	[TROCR]		= 0x03d0,
 173	[CDCR]		= 0x03d4,
 174	[LCCR]		= 0x03d8,
 175	[CNDCR]		= 0x03dc,
 176	[CEFCR]		= 0x03e4,
 177	[FRECR]		= 0x03e8,
 178	[TSFRCR]	= 0x03ec,
 179	[TLFRCR]	= 0x03f0,
 180	[RFCR]		= 0x03f4,
 181	[MAFCR]		= 0x03f8,
 182
 183	[EDMR]		= 0x0200,
 184	[EDTRR]		= 0x0208,
 185	[EDRRR]		= 0x0210,
 186	[TDLAR]		= 0x0218,
 187	[RDLAR]		= 0x0220,
 188	[EESR]		= 0x0228,
 189	[EESIPR]	= 0x0230,
 190	[TRSCER]	= 0x0238,
 191	[RMFCR]		= 0x0240,
 192	[TFTR]		= 0x0248,
 193	[FDR]		= 0x0250,
 194	[RMCR]		= 0x0258,
 195	[TFUCR]		= 0x0264,
 196	[RFOCR]		= 0x0268,
 197	[RMIIMODE]      = 0x026c,
 198	[FCFTR]		= 0x0270,
 199	[TRIMD]		= 0x027c,
 200};
 201
 202static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
 203	SH_ETH_OFFSET_DEFAULTS,
 204
 205	[ECMR]		= 0x0100,
 206	[RFLR]		= 0x0108,
 207	[ECSR]		= 0x0110,
 208	[ECSIPR]	= 0x0118,
 209	[PIR]		= 0x0120,
 210	[PSR]		= 0x0128,
 211	[RDMLR]		= 0x0140,
 212	[IPGR]		= 0x0150,
 213	[APR]		= 0x0154,
 214	[MPR]		= 0x0158,
 215	[TPAUSER]	= 0x0164,
 216	[RFCF]		= 0x0160,
 217	[TPAUSECR]	= 0x0168,
 218	[BCFRR]		= 0x016c,
 219	[MAHR]		= 0x01c0,
 220	[MALR]		= 0x01c8,
 221	[TROCR]		= 0x01d0,
 222	[CDCR]		= 0x01d4,
 223	[LCCR]		= 0x01d8,
 224	[CNDCR]		= 0x01dc,
 225	[CEFCR]		= 0x01e4,
 226	[FRECR]		= 0x01e8,
 227	[TSFRCR]	= 0x01ec,
 228	[TLFRCR]	= 0x01f0,
 229	[RFCR]		= 0x01f4,
 230	[MAFCR]		= 0x01f8,
 231	[RTRATE]	= 0x01fc,
 232
 233	[EDMR]		= 0x0000,
 234	[EDTRR]		= 0x0008,
 235	[EDRRR]		= 0x0010,
 236	[TDLAR]		= 0x0018,
 237	[RDLAR]		= 0x0020,
 238	[EESR]		= 0x0028,
 239	[EESIPR]	= 0x0030,
 240	[TRSCER]	= 0x0038,
 241	[RMFCR]		= 0x0040,
 242	[TFTR]		= 0x0048,
 243	[FDR]		= 0x0050,
 244	[RMCR]		= 0x0058,
 245	[TFUCR]		= 0x0064,
 246	[RFOCR]		= 0x0068,
 247	[FCFTR]		= 0x0070,
 248	[RPADIR]	= 0x0078,
 249	[TRIMD]		= 0x007c,
 250	[RBWAR]		= 0x00c8,
 251	[RDFAR]		= 0x00cc,
 252	[TBRAR]		= 0x00d4,
 253	[TDFAR]		= 0x00d8,
 254};
 255
 256static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 257	SH_ETH_OFFSET_DEFAULTS,
 258
 259	[EDMR]		= 0x0000,
 260	[EDTRR]		= 0x0004,
 261	[EDRRR]		= 0x0008,
 262	[TDLAR]		= 0x000c,
 263	[RDLAR]		= 0x0010,
 264	[EESR]		= 0x0014,
 265	[EESIPR]	= 0x0018,
 266	[TRSCER]	= 0x001c,
 267	[RMFCR]		= 0x0020,
 268	[TFTR]		= 0x0024,
 269	[FDR]		= 0x0028,
 270	[RMCR]		= 0x002c,
 271	[EDOCR]		= 0x0030,
 272	[FCFTR]		= 0x0034,
 273	[RPADIR]	= 0x0038,
 274	[TRIMD]		= 0x003c,
 275	[RBWAR]		= 0x0040,
 276	[RDFAR]		= 0x0044,
 277	[TBRAR]		= 0x004c,
 278	[TDFAR]		= 0x0050,
 279
 280	[ECMR]		= 0x0160,
 281	[ECSR]		= 0x0164,
 282	[ECSIPR]	= 0x0168,
 283	[PIR]		= 0x016c,
 284	[MAHR]		= 0x0170,
 285	[MALR]		= 0x0174,
 286	[RFLR]		= 0x0178,
 287	[PSR]		= 0x017c,
 288	[TROCR]		= 0x0180,
 289	[CDCR]		= 0x0184,
 290	[LCCR]		= 0x0188,
 291	[CNDCR]		= 0x018c,
 292	[CEFCR]		= 0x0194,
 293	[FRECR]		= 0x0198,
 294	[TSFRCR]	= 0x019c,
 295	[TLFRCR]	= 0x01a0,
 296	[RFCR]		= 0x01a4,
 297	[MAFCR]		= 0x01a8,
 298	[IPGR]		= 0x01b4,
 299	[APR]		= 0x01b8,
 300	[MPR]		= 0x01bc,
 301	[TPAUSER]	= 0x01c4,
 302	[BCFR]		= 0x01cc,
 303
 304	[ARSTR]		= 0x0000,
 305	[TSU_CTRST]	= 0x0004,
 306	[TSU_FWEN0]	= 0x0010,
 307	[TSU_FWEN1]	= 0x0014,
 308	[TSU_FCM]	= 0x0018,
 309	[TSU_BSYSL0]	= 0x0020,
 310	[TSU_BSYSL1]	= 0x0024,
 311	[TSU_PRISL0]	= 0x0028,
 312	[TSU_PRISL1]	= 0x002c,
 313	[TSU_FWSL0]	= 0x0030,
 314	[TSU_FWSL1]	= 0x0034,
 315	[TSU_FWSLC]	= 0x0038,
 316	[TSU_QTAGM0]	= 0x0040,
 317	[TSU_QTAGM1]	= 0x0044,
 318	[TSU_ADQT0]	= 0x0048,
 319	[TSU_ADQT1]	= 0x004c,
 320	[TSU_FWSR]	= 0x0050,
 321	[TSU_FWINMK]	= 0x0054,
 322	[TSU_ADSBSY]	= 0x0060,
 323	[TSU_TEN]	= 0x0064,
 324	[TSU_POST1]	= 0x0070,
 325	[TSU_POST2]	= 0x0074,
 326	[TSU_POST3]	= 0x0078,
 327	[TSU_POST4]	= 0x007c,
 328
 329	[TXNLCR0]	= 0x0080,
 330	[TXALCR0]	= 0x0084,
 331	[RXNLCR0]	= 0x0088,
 332	[RXALCR0]	= 0x008c,
 333	[FWNLCR0]	= 0x0090,
 334	[FWALCR0]	= 0x0094,
 335	[TXNLCR1]	= 0x00a0,
 336	[TXALCR1]	= 0x00a4,
 337	[RXNLCR1]	= 0x00a8,
 338	[RXALCR1]	= 0x00ac,
 339	[FWNLCR1]	= 0x00b0,
 340	[FWALCR1]	= 0x00b4,
 341
 342	[TSU_ADRH0]	= 0x0100,
 343};
 344__diag_pop();
 345
 346static void sh_eth_rcv_snd_disable(struct net_device *ndev);
 347static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
 348
 349static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
 350{
 351	struct sh_eth_private *mdp = netdev_priv(ndev);
 352	u16 offset = mdp->reg_offset[enum_index];
 353
 354	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 355		return;
 356
 357	iowrite32(data, mdp->addr + offset);
 358}
 359
 360static u32 sh_eth_read(struct net_device *ndev, int enum_index)
 361{
 362	struct sh_eth_private *mdp = netdev_priv(ndev);
 363	u16 offset = mdp->reg_offset[enum_index];
 364
 365	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 366		return ~0U;
 367
 368	return ioread32(mdp->addr + offset);
 369}
 370
 371static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
 372			  u32 set)
 373{
 374	sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
 375		     enum_index);
 376}
 377
 378static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
 379{
 380	return mdp->reg_offset[enum_index];
 381}
 382
 383static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
 384			     int enum_index)
 385{
 386	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
 387
 388	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 389		return;
 390
 391	iowrite32(data, mdp->tsu_addr + offset);
 392}
 393
 394static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
 395{
 396	u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
 397
 398	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 399		return ~0U;
 400
 401	return ioread32(mdp->tsu_addr + offset);
 402}
 403
 404static void sh_eth_soft_swap(char *src, int len)
 405{
 406#ifdef __LITTLE_ENDIAN
 407	u32 *p = (u32 *)src;
 408	u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32));
 409
 410	for (; p < maxp; p++)
 411		*p = swab32(*p);
 412#endif
 413}
 414
 415static void sh_eth_select_mii(struct net_device *ndev)
 416{
 417	struct sh_eth_private *mdp = netdev_priv(ndev);
 418	u32 value;
 419
 420	switch (mdp->phy_interface) {
 421	case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID:
 422		value = 0x3;
 423		break;
 424	case PHY_INTERFACE_MODE_GMII:
 425		value = 0x2;
 426		break;
 427	case PHY_INTERFACE_MODE_MII:
 428		value = 0x1;
 429		break;
 430	case PHY_INTERFACE_MODE_RMII:
 431		value = 0x0;
 432		break;
 433	default:
 434		netdev_warn(ndev,
 435			    "PHY interface mode was not setup. Set to MII.\n");
 436		value = 0x1;
 437		break;
 438	}
 439
 440	sh_eth_write(ndev, value, RMII_MII);
 441}
 442
 443static void sh_eth_set_duplex(struct net_device *ndev)
 444{
 445	struct sh_eth_private *mdp = netdev_priv(ndev);
 446
 447	sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
 448}
 449
 450static void sh_eth_chip_reset(struct net_device *ndev)
 451{
 452	struct sh_eth_private *mdp = netdev_priv(ndev);
 453
 454	/* reset device */
 455	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
 456	mdelay(1);
 457}
 458
 459static int sh_eth_soft_reset(struct net_device *ndev)
 460{
 461	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
 462	mdelay(3);
 463	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
 464
 465	return 0;
 466}
 467
 468static int sh_eth_check_soft_reset(struct net_device *ndev)
 469{
 470	int cnt;
 471
 472	for (cnt = 100; cnt > 0; cnt--) {
 473		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
 474			return 0;
 475		mdelay(1);
 476	}
 477
 478	netdev_err(ndev, "Device reset failed\n");
 479	return -ETIMEDOUT;
 480}
 481
 482static int sh_eth_soft_reset_gether(struct net_device *ndev)
 483{
 484	struct sh_eth_private *mdp = netdev_priv(ndev);
 485	int ret;
 486
 487	sh_eth_write(ndev, EDSR_ENALL, EDSR);
 488	sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
 489
 490	ret = sh_eth_check_soft_reset(ndev);
 491	if (ret)
 492		return ret;
 493
 494	/* Table Init */
 495	sh_eth_write(ndev, 0, TDLAR);
 496	sh_eth_write(ndev, 0, TDFAR);
 497	sh_eth_write(ndev, 0, TDFXR);
 498	sh_eth_write(ndev, 0, TDFFR);
 499	sh_eth_write(ndev, 0, RDLAR);
 500	sh_eth_write(ndev, 0, RDFAR);
 501	sh_eth_write(ndev, 0, RDFXR);
 502	sh_eth_write(ndev, 0, RDFFR);
 503
 504	/* Reset HW CRC register */
 505	if (mdp->cd->csmr)
 506		sh_eth_write(ndev, 0, CSMR);
 507
 508	/* Select MII mode */
 509	if (mdp->cd->select_mii)
 510		sh_eth_select_mii(ndev);
 511
 512	return ret;
 513}
 514
 515static void sh_eth_set_rate_gether(struct net_device *ndev)
 516{
 517	struct sh_eth_private *mdp = netdev_priv(ndev);
 518
 519	if (WARN_ON(!mdp->cd->gecmr))
 520		return;
 521
 522	switch (mdp->speed) {
 523	case 10: /* 10BASE */
 524		sh_eth_write(ndev, GECMR_10, GECMR);
 525		break;
 526	case 100:/* 100BASE */
 527		sh_eth_write(ndev, GECMR_100, GECMR);
 528		break;
 529	case 1000: /* 1000BASE */
 530		sh_eth_write(ndev, GECMR_1000, GECMR);
 531		break;
 532	}
 533}
 534
 535#ifdef CONFIG_OF
 536/* R7S72100 */
 537static struct sh_eth_cpu_data r7s72100_data = {
 538	.soft_reset	= sh_eth_soft_reset_gether,
 539
 540	.chip_reset	= sh_eth_chip_reset,
 541	.set_duplex	= sh_eth_set_duplex,
 542
 543	.register_type	= SH_ETH_REG_GIGABIT,
 544
 545	.edtrr_trns	= EDTRR_TRNS_GETHER,
 546	.ecsr_value	= ECSR_ICD,
 547	.ecsipr_value	= ECSIPR_ICDIP,
 548	.eesipr_value	= EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
 549			  EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
 550			  EESIPR_ECIIP |
 551			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 552			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 553			  EESIPR_RMAFIP | EESIPR_RRFIP |
 554			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 555			  EESIPR_PREIP | EESIPR_CERFIP,
 556
 557	.tx_check	= EESR_TC1 | EESR_FTC,
 558	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 559			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 560			  EESR_TDE,
 561	.fdr_value	= 0x0000070f,
 562
 563	.trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
 564
 565	.no_psr		= 1,
 566	.apr		= 1,
 567	.mpr		= 1,
 568	.tpauser	= 1,
 569	.hw_swap	= 1,
 570	.rpadir		= 1,
 
 571	.no_trimd	= 1,
 572	.no_ade		= 1,
 573	.xdfar_rw	= 1,
 574	.csmr		= 1,
 575	.rx_csum	= 1,
 576	.tsu		= 1,
 577	.no_tx_cntrs	= 1,
 578};
 579
 580static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 581{
 582	sh_eth_chip_reset(ndev);
 583
 584	sh_eth_select_mii(ndev);
 585}
 586
 587/* R8A7740 */
 588static struct sh_eth_cpu_data r8a7740_data = {
 589	.soft_reset	= sh_eth_soft_reset_gether,
 590
 591	.chip_reset	= sh_eth_chip_reset_r8a7740,
 592	.set_duplex	= sh_eth_set_duplex,
 593	.set_rate	= sh_eth_set_rate_gether,
 594
 595	.register_type	= SH_ETH_REG_GIGABIT,
 596
 597	.edtrr_trns	= EDTRR_TRNS_GETHER,
 598	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 599	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 600	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 601			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 602			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 603			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 604			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 605			  EESIPR_CEEFIP | EESIPR_CELFIP |
 606			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 607			  EESIPR_PREIP | EESIPR_CERFIP,
 608
 609	.tx_check	= EESR_TC1 | EESR_FTC,
 610	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 611			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 612			  EESR_TDE,
 613	.fdr_value	= 0x0000070f,
 614
 615	.apr		= 1,
 616	.mpr		= 1,
 617	.tpauser	= 1,
 618	.gecmr		= 1,
 619	.bculr		= 1,
 620	.hw_swap	= 1,
 621	.rpadir		= 1,
 
 622	.no_trimd	= 1,
 623	.no_ade		= 1,
 624	.xdfar_rw	= 1,
 625	.csmr		= 1,
 626	.rx_csum	= 1,
 627	.tsu		= 1,
 628	.select_mii	= 1,
 629	.magic		= 1,
 630	.cexcr		= 1,
 631};
 632
 633/* There is CPU dependent code */
 634static void sh_eth_set_rate_rcar(struct net_device *ndev)
 635{
 636	struct sh_eth_private *mdp = netdev_priv(ndev);
 637
 638	switch (mdp->speed) {
 639	case 10: /* 10BASE */
 640		sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
 641		break;
 642	case 100:/* 100BASE */
 643		sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
 644		break;
 645	}
 646}
 647
 648/* R-Car Gen1 */
 649static struct sh_eth_cpu_data rcar_gen1_data = {
 650	.soft_reset	= sh_eth_soft_reset,
 651
 652	.set_duplex	= sh_eth_set_duplex,
 653	.set_rate	= sh_eth_set_rate_rcar,
 654
 655	.register_type	= SH_ETH_REG_FAST_RCAR,
 656
 657	.edtrr_trns	= EDTRR_TRNS_ETHER,
 658	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 659	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 660	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 661			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 662			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 663			  EESIPR_RMAFIP | EESIPR_RRFIP |
 664			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 665			  EESIPR_PREIP | EESIPR_CERFIP,
 666
 667	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 668	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 669			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 670	.fdr_value	= 0x00000f0f,
 671
 672	.apr		= 1,
 673	.mpr		= 1,
 674	.tpauser	= 1,
 675	.hw_swap	= 1,
 676	.no_xdfar	= 1,
 677};
 678
 679/* R-Car Gen2 and RZ/G1 */
 680static struct sh_eth_cpu_data rcar_gen2_data = {
 681	.soft_reset	= sh_eth_soft_reset,
 682
 683	.set_duplex	= sh_eth_set_duplex,
 684	.set_rate	= sh_eth_set_rate_rcar,
 685
 686	.register_type	= SH_ETH_REG_FAST_RCAR,
 687
 688	.edtrr_trns	= EDTRR_TRNS_ETHER,
 689	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 690	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 691			  ECSIPR_MPDIP,
 692	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 693			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 694			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 695			  EESIPR_RMAFIP | EESIPR_RRFIP |
 696			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 697			  EESIPR_PREIP | EESIPR_CERFIP,
 698
 699	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 700	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 701			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 702	.fdr_value	= 0x00000f0f,
 703
 704	.trscer_err_mask = TRSCER_RMAFCE,
 705
 706	.apr		= 1,
 707	.mpr		= 1,
 708	.tpauser	= 1,
 709	.hw_swap	= 1,
 710	.no_xdfar	= 1,
 711	.rmiimode	= 1,
 712	.magic		= 1,
 713};
 714
 715/* R8A77980 */
 716static struct sh_eth_cpu_data r8a77980_data = {
 717	.soft_reset	= sh_eth_soft_reset_gether,
 718
 719	.set_duplex	= sh_eth_set_duplex,
 720	.set_rate	= sh_eth_set_rate_gether,
 721
 722	.register_type  = SH_ETH_REG_GIGABIT,
 723
 724	.edtrr_trns	= EDTRR_TRNS_GETHER,
 725	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 726	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 727			  ECSIPR_MPDIP,
 728	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 729			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 730			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 731			  EESIPR_RMAFIP | EESIPR_RRFIP |
 732			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 733			  EESIPR_PREIP | EESIPR_CERFIP,
 734
 735	.tx_check       = EESR_FTC | EESR_CD | EESR_TRO,
 736	.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 737			  EESR_RFE | EESR_RDE | EESR_RFRMER |
 738			  EESR_TFE | EESR_TDE | EESR_ECI,
 739	.fdr_value	= 0x0000070f,
 740
 741	.apr		= 1,
 742	.mpr		= 1,
 743	.tpauser	= 1,
 744	.gecmr		= 1,
 745	.bculr		= 1,
 746	.hw_swap	= 1,
 747	.nbst		= 1,
 748	.rpadir		= 1,
 749	.no_trimd	= 1,
 750	.no_ade		= 1,
 751	.xdfar_rw	= 1,
 752	.csmr		= 1,
 753	.rx_csum	= 1,
 754	.select_mii	= 1,
 755	.magic		= 1,
 756	.cexcr		= 1,
 757};
 758
 759/* R7S9210 */
 760static struct sh_eth_cpu_data r7s9210_data = {
 761	.soft_reset	= sh_eth_soft_reset,
 762
 763	.set_duplex	= sh_eth_set_duplex,
 764	.set_rate	= sh_eth_set_rate_rcar,
 765
 766	.register_type	= SH_ETH_REG_FAST_SH4,
 767
 768	.edtrr_trns	= EDTRR_TRNS_ETHER,
 769	.ecsr_value	= ECSR_ICD,
 770	.ecsipr_value	= ECSIPR_ICDIP,
 771	.eesipr_value	= EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
 772			  EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
 773			  EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
 774			  EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
 775			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 776			  EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
 777			  EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
 778
 779	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 780	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 781			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 782
 783	.fdr_value	= 0x0000070f,
 784
 785	.trscer_err_mask = TRSCER_RMAFCE | TRSCER_RRFCE,
 786
 787	.apr		= 1,
 788	.mpr		= 1,
 789	.tpauser	= 1,
 790	.hw_swap	= 1,
 791	.rpadir		= 1,
 792	.no_ade		= 1,
 793	.xdfar_rw	= 1,
 794};
 795#endif /* CONFIG_OF */
 796
 797static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 798{
 799	struct sh_eth_private *mdp = netdev_priv(ndev);
 800
 801	switch (mdp->speed) {
 802	case 10: /* 10BASE */
 803		sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
 804		break;
 805	case 100:/* 100BASE */
 806		sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
 807		break;
 808	}
 809}
 810
 811/* SH7724 */
 812static struct sh_eth_cpu_data sh7724_data = {
 813	.soft_reset	= sh_eth_soft_reset,
 814
 815	.set_duplex	= sh_eth_set_duplex,
 816	.set_rate	= sh_eth_set_rate_sh7724,
 817
 818	.register_type	= SH_ETH_REG_FAST_SH4,
 819
 820	.edtrr_trns	= EDTRR_TRNS_ETHER,
 821	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 822	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 823	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 824			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 825			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 826			  EESIPR_RMAFIP | EESIPR_RRFIP |
 827			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 828			  EESIPR_PREIP | EESIPR_CERFIP,
 829
 830	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 831	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 832			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 833
 834	.apr		= 1,
 835	.mpr		= 1,
 836	.tpauser	= 1,
 837	.hw_swap	= 1,
 838	.rpadir		= 1,
 
 839};
 840
 841static void sh_eth_set_rate_sh7757(struct net_device *ndev)
 842{
 843	struct sh_eth_private *mdp = netdev_priv(ndev);
 844
 845	switch (mdp->speed) {
 846	case 10: /* 10BASE */
 847		sh_eth_write(ndev, 0, RTRATE);
 848		break;
 849	case 100:/* 100BASE */
 850		sh_eth_write(ndev, 1, RTRATE);
 851		break;
 852	}
 853}
 854
 855/* SH7757 */
 856static struct sh_eth_cpu_data sh7757_data = {
 857	.soft_reset	= sh_eth_soft_reset,
 858
 859	.set_duplex	= sh_eth_set_duplex,
 860	.set_rate	= sh_eth_set_rate_sh7757,
 861
 862	.register_type	= SH_ETH_REG_FAST_SH4,
 863
 864	.edtrr_trns	= EDTRR_TRNS_ETHER,
 865	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 866			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 867			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 868			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 869			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 870			  EESIPR_CEEFIP | EESIPR_CELFIP |
 871			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 872			  EESIPR_PREIP | EESIPR_CERFIP,
 873
 874	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
 875	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 876			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 877
 878	.irq_flags	= IRQF_SHARED,
 879	.apr		= 1,
 880	.mpr		= 1,
 881	.tpauser	= 1,
 882	.hw_swap	= 1,
 883	.no_ade		= 1,
 884	.rpadir		= 1,
 
 885	.rtrate		= 1,
 886	.dual_port	= 1,
 887};
 888
 889#define SH_GIGA_ETH_BASE	0xfee00000UL
 890#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 891#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 892static void sh_eth_chip_reset_giga(struct net_device *ndev)
 893{
 894	u32 mahr[2], malr[2];
 895	int i;
 896
 897	/* save MAHR and MALR */
 898	for (i = 0; i < 2; i++) {
 899		malr[i] = ioread32((void *)GIGA_MALR(i));
 900		mahr[i] = ioread32((void *)GIGA_MAHR(i));
 901	}
 902
 903	sh_eth_chip_reset(ndev);
 904
 905	/* restore MAHR and MALR */
 906	for (i = 0; i < 2; i++) {
 907		iowrite32(malr[i], (void *)GIGA_MALR(i));
 908		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
 909	}
 910}
 911
 912static void sh_eth_set_rate_giga(struct net_device *ndev)
 913{
 914	struct sh_eth_private *mdp = netdev_priv(ndev);
 915
 916	if (WARN_ON(!mdp->cd->gecmr))
 917		return;
 918
 919	switch (mdp->speed) {
 920	case 10: /* 10BASE */
 921		sh_eth_write(ndev, 0x00000000, GECMR);
 922		break;
 923	case 100:/* 100BASE */
 924		sh_eth_write(ndev, 0x00000010, GECMR);
 925		break;
 926	case 1000: /* 1000BASE */
 927		sh_eth_write(ndev, 0x00000020, GECMR);
 928		break;
 929	}
 930}
 931
 932/* SH7757(GETHERC) */
 933static struct sh_eth_cpu_data sh7757_data_giga = {
 934	.soft_reset	= sh_eth_soft_reset_gether,
 935
 936	.chip_reset	= sh_eth_chip_reset_giga,
 937	.set_duplex	= sh_eth_set_duplex,
 938	.set_rate	= sh_eth_set_rate_giga,
 939
 940	.register_type	= SH_ETH_REG_GIGABIT,
 941
 942	.edtrr_trns	= EDTRR_TRNS_GETHER,
 943	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 944	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 945	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 946			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 947			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 948			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 949			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 950			  EESIPR_CEEFIP | EESIPR_CELFIP |
 951			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 952			  EESIPR_PREIP | EESIPR_CERFIP,
 953
 954	.tx_check	= EESR_TC1 | EESR_FTC,
 955	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 956			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 957			  EESR_TDE,
 958	.fdr_value	= 0x0000072f,
 959
 960	.irq_flags	= IRQF_SHARED,
 961	.apr		= 1,
 962	.mpr		= 1,
 963	.tpauser	= 1,
 964	.gecmr		= 1,
 965	.bculr		= 1,
 966	.hw_swap	= 1,
 967	.rpadir		= 1,
 
 968	.no_trimd	= 1,
 969	.no_ade		= 1,
 970	.xdfar_rw	= 1,
 971	.tsu		= 1,
 972	.cexcr		= 1,
 973	.dual_port	= 1,
 974};
 975
 976/* SH7734 */
 977static struct sh_eth_cpu_data sh7734_data = {
 978	.soft_reset	= sh_eth_soft_reset_gether,
 979
 980	.chip_reset	= sh_eth_chip_reset,
 981	.set_duplex	= sh_eth_set_duplex,
 982	.set_rate	= sh_eth_set_rate_gether,
 983
 984	.register_type	= SH_ETH_REG_GIGABIT,
 985
 986	.edtrr_trns	= EDTRR_TRNS_GETHER,
 987	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 988	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 989	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 990			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 991			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 992			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 993			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
 994			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 995			  EESIPR_PREIP | EESIPR_CERFIP,
 996
 997	.tx_check	= EESR_TC1 | EESR_FTC,
 998	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 999			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
1000			  EESR_TDE,
1001
1002	.apr		= 1,
1003	.mpr		= 1,
1004	.tpauser	= 1,
1005	.gecmr		= 1,
1006	.bculr		= 1,
1007	.hw_swap	= 1,
1008	.no_trimd	= 1,
1009	.no_ade		= 1,
1010	.xdfar_rw	= 1,
1011	.tsu		= 1,
1012	.csmr		= 1,
1013	.rx_csum	= 1,
1014	.select_mii	= 1,
1015	.magic		= 1,
1016	.cexcr		= 1,
1017};
1018
1019/* SH7763 */
1020static struct sh_eth_cpu_data sh7763_data = {
1021	.soft_reset	= sh_eth_soft_reset_gether,
1022
1023	.chip_reset	= sh_eth_chip_reset,
1024	.set_duplex	= sh_eth_set_duplex,
1025	.set_rate	= sh_eth_set_rate_gether,
1026
1027	.register_type	= SH_ETH_REG_GIGABIT,
1028
1029	.edtrr_trns	= EDTRR_TRNS_GETHER,
1030	.ecsr_value	= ECSR_ICD | ECSR_MPD,
1031	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
1032	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1033			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1034			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1035			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
1036			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
1037			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1038			  EESIPR_PREIP | EESIPR_CERFIP,
1039
1040	.tx_check	= EESR_TC1 | EESR_FTC,
1041	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
1042			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
1043
1044	.apr		= 1,
1045	.mpr		= 1,
1046	.tpauser	= 1,
1047	.gecmr		= 1,
1048	.bculr		= 1,
1049	.hw_swap	= 1,
1050	.no_trimd	= 1,
1051	.no_ade		= 1,
1052	.xdfar_rw	= 1,
1053	.tsu		= 1,
1054	.irq_flags	= IRQF_SHARED,
1055	.magic		= 1,
1056	.cexcr		= 1,
1057	.rx_csum	= 1,
1058	.dual_port	= 1,
1059};
1060
1061static struct sh_eth_cpu_data sh7619_data = {
1062	.soft_reset	= sh_eth_soft_reset,
1063
1064	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1065
1066	.edtrr_trns	= EDTRR_TRNS_ETHER,
1067	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1068			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1069			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1070			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1071			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1072			  EESIPR_CEEFIP | EESIPR_CELFIP |
1073			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1074			  EESIPR_PREIP | EESIPR_CERFIP,
1075
1076	.apr		= 1,
1077	.mpr		= 1,
1078	.tpauser	= 1,
1079	.hw_swap	= 1,
1080};
1081
1082static struct sh_eth_cpu_data sh771x_data = {
1083	.soft_reset	= sh_eth_soft_reset,
1084
1085	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1086
1087	.edtrr_trns	= EDTRR_TRNS_ETHER,
1088	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1089			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1090			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1091			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1092			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1093			  EESIPR_CEEFIP | EESIPR_CELFIP |
1094			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1095			  EESIPR_PREIP | EESIPR_CERFIP,
1096
1097	.trscer_err_mask = TRSCER_RMAFCE,
1098
1099	.tsu		= 1,
1100	.dual_port	= 1,
1101};
1102
1103static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
1104{
1105	if (!cd->ecsr_value)
1106		cd->ecsr_value = DEFAULT_ECSR_INIT;
1107
1108	if (!cd->ecsipr_value)
1109		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
1110
1111	if (!cd->fcftr_value)
1112		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
1113				  DEFAULT_FIFO_F_D_RFD;
1114
1115	if (!cd->fdr_value)
1116		cd->fdr_value = DEFAULT_FDR_INIT;
1117
1118	if (!cd->tx_check)
1119		cd->tx_check = DEFAULT_TX_CHECK;
1120
1121	if (!cd->eesr_err_check)
1122		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
1123
1124	if (!cd->trscer_err_mask)
1125		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
1126}
1127
1128static void sh_eth_set_receive_align(struct sk_buff *skb)
1129{
1130	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
1131
1132	if (reserve)
1133		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
1134}
1135
1136/* Program the hardware MAC address from dev->dev_addr. */
1137static void update_mac_address(struct net_device *ndev)
1138{
1139	sh_eth_write(ndev,
1140		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
1141		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1142	sh_eth_write(ndev,
1143		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1144}
1145
1146/* Get MAC address from SuperH MAC address register
1147 *
1148 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1149 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1150 * When you want use this device, you must set MAC address in bootloader.
1151 *
1152 */
1153static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1154{
1155	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1156		eth_hw_addr_set(ndev, mac);
1157	} else {
1158		u32 mahr = sh_eth_read(ndev, MAHR);
1159		u32 malr = sh_eth_read(ndev, MALR);
1160		u8 addr[ETH_ALEN];
1161
1162		addr[0] = (mahr >> 24) & 0xFF;
1163		addr[1] = (mahr >> 16) & 0xFF;
1164		addr[2] = (mahr >>  8) & 0xFF;
1165		addr[3] = (mahr >>  0) & 0xFF;
1166		addr[4] = (malr >>  8) & 0xFF;
1167		addr[5] = (malr >>  0) & 0xFF;
1168		eth_hw_addr_set(ndev, addr);
1169	}
1170}
1171
1172struct bb_info {
1173	void (*set_gate)(void *addr);
1174	struct mdiobb_ctrl ctrl;
1175	void *addr;
1176};
1177
1178static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1179{
1180	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1181	u32 pir;
1182
1183	if (bitbang->set_gate)
1184		bitbang->set_gate(bitbang->addr);
1185
1186	pir = ioread32(bitbang->addr);
1187	if (set)
1188		pir |=  mask;
1189	else
1190		pir &= ~mask;
1191	iowrite32(pir, bitbang->addr);
1192}
1193
1194/* Data I/O pin control */
1195static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1196{
1197	sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1198}
1199
1200/* Set bit data*/
1201static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1202{
1203	sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1204}
1205
1206/* Get bit data*/
1207static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1208{
1209	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1210
1211	if (bitbang->set_gate)
1212		bitbang->set_gate(bitbang->addr);
1213
1214	return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1215}
1216
1217/* MDC pin control */
1218static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1219{
1220	sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1221}
1222
1223/* mdio bus control struct */
1224static const struct mdiobb_ops bb_ops = {
1225	.owner = THIS_MODULE,
1226	.set_mdc = sh_mdc_ctrl,
1227	.set_mdio_dir = sh_mmd_ctrl,
1228	.set_mdio_data = sh_set_mdio,
1229	.get_mdio_data = sh_get_mdio,
1230};
1231
1232/* free Tx skb function */
1233static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1234{
1235	struct sh_eth_private *mdp = netdev_priv(ndev);
1236	struct sh_eth_txdesc *txdesc;
1237	int free_num = 0;
1238	int entry;
1239	bool sent;
1240
1241	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1242		entry = mdp->dirty_tx % mdp->num_tx_ring;
1243		txdesc = &mdp->tx_ring[entry];
1244		sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1245		if (sent_only && !sent)
1246			break;
1247		/* TACT bit must be checked before all the following reads */
1248		dma_rmb();
1249		netif_info(mdp, tx_done, ndev,
1250			   "tx entry %d status 0x%08x\n",
1251			   entry, le32_to_cpu(txdesc->status));
1252		/* Free the original skb. */
1253		if (mdp->tx_skbuff[entry]) {
1254			dma_unmap_single(&mdp->pdev->dev,
1255					 le32_to_cpu(txdesc->addr),
1256					 le32_to_cpu(txdesc->len) >> 16,
1257					 DMA_TO_DEVICE);
1258			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1259			mdp->tx_skbuff[entry] = NULL;
1260			free_num++;
1261		}
1262		txdesc->status = cpu_to_le32(TD_TFP);
1263		if (entry >= mdp->num_tx_ring - 1)
1264			txdesc->status |= cpu_to_le32(TD_TDLE);
1265
1266		if (sent) {
1267			ndev->stats.tx_packets++;
1268			ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1269		}
1270	}
1271	return free_num;
1272}
1273
1274/* free skb and descriptor buffer */
1275static void sh_eth_ring_free(struct net_device *ndev)
1276{
1277	struct sh_eth_private *mdp = netdev_priv(ndev);
1278	int ringsize, i;
1279
1280	if (mdp->rx_ring) {
1281		for (i = 0; i < mdp->num_rx_ring; i++) {
1282			if (mdp->rx_skbuff[i]) {
1283				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1284
1285				dma_unmap_single(&mdp->pdev->dev,
1286						 le32_to_cpu(rxdesc->addr),
1287						 ALIGN(mdp->rx_buf_sz, 32),
1288						 DMA_FROM_DEVICE);
1289			}
1290		}
1291		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1292		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1293				  mdp->rx_desc_dma);
1294		mdp->rx_ring = NULL;
1295	}
1296
1297	/* Free Rx skb ringbuffer */
1298	if (mdp->rx_skbuff) {
1299		for (i = 0; i < mdp->num_rx_ring; i++)
1300			dev_kfree_skb(mdp->rx_skbuff[i]);
1301	}
1302	kfree(mdp->rx_skbuff);
1303	mdp->rx_skbuff = NULL;
1304
1305	if (mdp->tx_ring) {
1306		sh_eth_tx_free(ndev, false);
1307
1308		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1309		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1310				  mdp->tx_desc_dma);
1311		mdp->tx_ring = NULL;
1312	}
1313
1314	/* Free Tx skb ringbuffer */
1315	kfree(mdp->tx_skbuff);
1316	mdp->tx_skbuff = NULL;
1317}
1318
1319/* format skb and descriptor buffer */
1320static void sh_eth_ring_format(struct net_device *ndev)
1321{
1322	struct sh_eth_private *mdp = netdev_priv(ndev);
1323	int i;
1324	struct sk_buff *skb;
1325	struct sh_eth_rxdesc *rxdesc = NULL;
1326	struct sh_eth_txdesc *txdesc = NULL;
1327	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1328	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1329	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1330	dma_addr_t dma_addr;
1331	u32 buf_len;
1332
1333	mdp->cur_rx = 0;
1334	mdp->cur_tx = 0;
1335	mdp->dirty_rx = 0;
1336	mdp->dirty_tx = 0;
1337
1338	memset(mdp->rx_ring, 0, rx_ringsize);
1339
1340	/* build Rx ring buffer */
1341	for (i = 0; i < mdp->num_rx_ring; i++) {
1342		/* skb */
1343		mdp->rx_skbuff[i] = NULL;
1344		skb = netdev_alloc_skb(ndev, skbuff_size);
1345		if (skb == NULL)
1346			break;
1347		sh_eth_set_receive_align(skb);
1348
1349		/* The size of the buffer is a multiple of 32 bytes. */
1350		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1351		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1352					  DMA_FROM_DEVICE);
1353		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1354			kfree_skb(skb);
1355			break;
1356		}
1357		mdp->rx_skbuff[i] = skb;
1358
1359		/* RX descriptor */
1360		rxdesc = &mdp->rx_ring[i];
1361		rxdesc->len = cpu_to_le32(buf_len << 16);
1362		rxdesc->addr = cpu_to_le32(dma_addr);
1363		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1364
1365		/* Rx descriptor address set */
1366		if (i == 0) {
1367			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1368			if (mdp->cd->xdfar_rw)
1369				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1370		}
1371	}
1372
1373	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1374
1375	/* Mark the last entry as wrapping the ring. */
1376	if (rxdesc)
1377		rxdesc->status |= cpu_to_le32(RD_RDLE);
1378
1379	memset(mdp->tx_ring, 0, tx_ringsize);
1380
1381	/* build Tx ring buffer */
1382	for (i = 0; i < mdp->num_tx_ring; i++) {
1383		mdp->tx_skbuff[i] = NULL;
1384		txdesc = &mdp->tx_ring[i];
1385		txdesc->status = cpu_to_le32(TD_TFP);
1386		txdesc->len = cpu_to_le32(0);
1387		if (i == 0) {
1388			/* Tx descriptor address set */
1389			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1390			if (mdp->cd->xdfar_rw)
1391				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1392		}
1393	}
1394
1395	txdesc->status |= cpu_to_le32(TD_TDLE);
1396}
1397
1398/* Get skb and descriptor buffer */
1399static int sh_eth_ring_init(struct net_device *ndev)
1400{
1401	struct sh_eth_private *mdp = netdev_priv(ndev);
1402	int rx_ringsize, tx_ringsize;
1403
1404	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1405	 * card needs room to do 8 byte alignment, +2 so we can reserve
1406	 * the first 2 bytes, and +16 gets room for the status word from the
1407	 * card.
1408	 */
1409	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1410			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1411	if (mdp->cd->rpadir)
1412		mdp->rx_buf_sz += NET_IP_ALIGN;
1413
1414	/* Allocate RX and TX skb rings */
1415	mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1416				 GFP_KERNEL);
1417	if (!mdp->rx_skbuff)
1418		return -ENOMEM;
1419
1420	mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1421				 GFP_KERNEL);
1422	if (!mdp->tx_skbuff)
1423		goto ring_free;
1424
1425	/* Allocate all Rx descriptors. */
1426	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1427	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1428					  &mdp->rx_desc_dma, GFP_KERNEL);
1429	if (!mdp->rx_ring)
1430		goto ring_free;
1431
1432	mdp->dirty_rx = 0;
1433
1434	/* Allocate all Tx descriptors. */
1435	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1436	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1437					  &mdp->tx_desc_dma, GFP_KERNEL);
1438	if (!mdp->tx_ring)
1439		goto ring_free;
1440	return 0;
1441
1442ring_free:
1443	/* Free Rx and Tx skb ring buffer and DMA buffer */
1444	sh_eth_ring_free(ndev);
1445
1446	return -ENOMEM;
1447}
1448
1449static int sh_eth_dev_init(struct net_device *ndev)
1450{
1451	struct sh_eth_private *mdp = netdev_priv(ndev);
1452	int ret;
1453
1454	/* Soft Reset */
1455	ret = mdp->cd->soft_reset(ndev);
1456	if (ret)
1457		return ret;
1458
1459	if (mdp->cd->rmiimode)
1460		sh_eth_write(ndev, 0x1, RMIIMODE);
1461
1462	/* Descriptor format */
1463	sh_eth_ring_format(ndev);
1464	if (mdp->cd->rpadir)
1465		sh_eth_write(ndev, NET_IP_ALIGN << 16, RPADIR);
1466
1467	/* all sh_eth int mask */
1468	sh_eth_write(ndev, 0, EESIPR);
1469
1470#if defined(__LITTLE_ENDIAN)
1471	if (mdp->cd->hw_swap)
1472		sh_eth_write(ndev, EDMR_EL, EDMR);
1473	else
1474#endif
1475		sh_eth_write(ndev, 0, EDMR);
1476
1477	/* FIFO size set */
1478	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1479	sh_eth_write(ndev, 0, TFTR);
1480
1481	/* Frame recv control (enable multiple-packets per rx irq) */
1482	sh_eth_write(ndev, RMCR_RNC, RMCR);
1483
1484	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1485
1486	/* DMA transfer burst mode */
1487	if (mdp->cd->nbst)
1488		sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST);
1489
1490	/* Burst cycle count upper-limit */
1491	if (mdp->cd->bculr)
1492		sh_eth_write(ndev, 0x800, BCULR);
1493
1494	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1495
1496	if (!mdp->cd->no_trimd)
1497		sh_eth_write(ndev, 0, TRIMD);
1498
1499	/* Recv frame limit set register */
1500	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1501		     RFLR);
1502
1503	sh_eth_modify(ndev, EESR, 0, 0);
1504	mdp->irq_enabled = true;
1505	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1506
1507	/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
1508	sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1509		     (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
1510		     ECMR_TE | ECMR_RE, ECMR);
1511
1512	if (mdp->cd->set_rate)
1513		mdp->cd->set_rate(ndev);
1514
1515	/* E-MAC Status Register clear */
1516	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1517
1518	/* E-MAC Interrupt Enable register */
1519	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1520
1521	/* Set MAC address */
1522	update_mac_address(ndev);
1523
1524	/* mask reset */
1525	if (mdp->cd->apr)
1526		sh_eth_write(ndev, 1, APR);
1527	if (mdp->cd->mpr)
1528		sh_eth_write(ndev, 1, MPR);
1529	if (mdp->cd->tpauser)
1530		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1531
1532	/* Setting the Rx mode will start the Rx process. */
1533	sh_eth_write(ndev, EDRRR_R, EDRRR);
1534
1535	return ret;
1536}
1537
1538static void sh_eth_dev_exit(struct net_device *ndev)
1539{
1540	struct sh_eth_private *mdp = netdev_priv(ndev);
1541	int i;
1542
1543	/* Deactivate all TX descriptors, so DMA should stop at next
1544	 * packet boundary if it's currently running
1545	 */
1546	for (i = 0; i < mdp->num_tx_ring; i++)
1547		mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1548
1549	/* Disable TX FIFO egress to MAC */
1550	sh_eth_rcv_snd_disable(ndev);
1551
1552	/* Stop RX DMA at next packet boundary */
1553	sh_eth_write(ndev, 0, EDRRR);
1554
1555	/* Aside from TX DMA, we can't tell when the hardware is
1556	 * really stopped, so we need to reset to make sure.
1557	 * Before doing that, wait for long enough to *probably*
1558	 * finish transmitting the last packet and poll stats.
1559	 */
1560	msleep(2); /* max frame time at 10 Mbps < 1250 us */
1561	sh_eth_get_stats(ndev);
1562	mdp->cd->soft_reset(ndev);
1563
1564	/* Set the RMII mode again if required */
1565	if (mdp->cd->rmiimode)
1566		sh_eth_write(ndev, 0x1, RMIIMODE);
1567
1568	/* Set MAC address again */
1569	update_mac_address(ndev);
1570}
1571
1572static void sh_eth_rx_csum(struct sk_buff *skb)
1573{
1574	u8 *hw_csum;
1575
1576	/* The hardware checksum is 2 bytes appended to packet data */
1577	if (unlikely(skb->len < sizeof(__sum16)))
1578		return;
1579	hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
1580	skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
1581	skb->ip_summed = CHECKSUM_COMPLETE;
1582	skb_trim(skb, skb->len - sizeof(__sum16));
1583}
1584
1585/* Packet receive function */
1586static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1587{
1588	struct sh_eth_private *mdp = netdev_priv(ndev);
1589	struct sh_eth_rxdesc *rxdesc;
1590
1591	int entry = mdp->cur_rx % mdp->num_rx_ring;
1592	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1593	int limit;
1594	struct sk_buff *skb;
1595	u32 desc_status;
1596	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1597	dma_addr_t dma_addr;
1598	u16 pkt_len;
1599	u32 buf_len;
1600
1601	boguscnt = min(boguscnt, *quota);
1602	limit = boguscnt;
1603	rxdesc = &mdp->rx_ring[entry];
1604	while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1605		/* RACT bit must be checked before all the following reads */
1606		dma_rmb();
1607		desc_status = le32_to_cpu(rxdesc->status);
1608		pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1609
1610		if (--boguscnt < 0)
1611			break;
1612
1613		netif_info(mdp, rx_status, ndev,
1614			   "rx entry %d status 0x%08x len %d\n",
1615			   entry, desc_status, pkt_len);
1616
1617		if (!(desc_status & RDFEND))
1618			ndev->stats.rx_length_errors++;
1619
1620		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1621		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1622		 * bit 0. However, in case of the R8A7740 and R7S72100
1623		 * the RFS bits are from bit 25 to bit 16. So, the
1624		 * driver needs right shifting by 16.
1625		 */
1626		if (mdp->cd->csmr)
1627			desc_status >>= 16;
1628
1629		skb = mdp->rx_skbuff[entry];
1630		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1631				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1632			ndev->stats.rx_errors++;
1633			if (desc_status & RD_RFS1)
1634				ndev->stats.rx_crc_errors++;
1635			if (desc_status & RD_RFS2)
1636				ndev->stats.rx_frame_errors++;
1637			if (desc_status & RD_RFS3)
1638				ndev->stats.rx_length_errors++;
1639			if (desc_status & RD_RFS4)
1640				ndev->stats.rx_length_errors++;
1641			if (desc_status & RD_RFS6)
1642				ndev->stats.rx_missed_errors++;
1643			if (desc_status & RD_RFS10)
1644				ndev->stats.rx_over_errors++;
1645		} else	if (skb) {
1646			dma_addr = le32_to_cpu(rxdesc->addr);
1647			if (!mdp->cd->hw_swap)
1648				sh_eth_soft_swap(
1649					phys_to_virt(ALIGN(dma_addr, 4)),
1650					pkt_len + 2);
1651			mdp->rx_skbuff[entry] = NULL;
1652			if (mdp->cd->rpadir)
1653				skb_reserve(skb, NET_IP_ALIGN);
1654			dma_unmap_single(&mdp->pdev->dev, dma_addr,
1655					 ALIGN(mdp->rx_buf_sz, 32),
1656					 DMA_FROM_DEVICE);
1657			skb_put(skb, pkt_len);
1658			skb->protocol = eth_type_trans(skb, ndev);
1659			if (ndev->features & NETIF_F_RXCSUM)
1660				sh_eth_rx_csum(skb);
1661			netif_receive_skb(skb);
1662			ndev->stats.rx_packets++;
1663			ndev->stats.rx_bytes += pkt_len;
1664			if (desc_status & RD_RFS8)
1665				ndev->stats.multicast++;
1666		}
1667		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1668		rxdesc = &mdp->rx_ring[entry];
1669	}
1670
1671	/* Refill the Rx ring buffers. */
1672	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1673		entry = mdp->dirty_rx % mdp->num_rx_ring;
1674		rxdesc = &mdp->rx_ring[entry];
1675		/* The size of the buffer is 32 byte boundary. */
1676		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1677		rxdesc->len = cpu_to_le32(buf_len << 16);
1678
1679		if (mdp->rx_skbuff[entry] == NULL) {
1680			skb = netdev_alloc_skb(ndev, skbuff_size);
1681			if (skb == NULL)
1682				break;	/* Better luck next round. */
1683			sh_eth_set_receive_align(skb);
1684			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1685						  buf_len, DMA_FROM_DEVICE);
1686			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1687				kfree_skb(skb);
1688				break;
1689			}
1690			mdp->rx_skbuff[entry] = skb;
1691
1692			skb_checksum_none_assert(skb);
1693			rxdesc->addr = cpu_to_le32(dma_addr);
1694		}
1695		dma_wmb(); /* RACT bit must be set after all the above writes */
1696		if (entry >= mdp->num_rx_ring - 1)
1697			rxdesc->status |=
1698				cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1699		else
1700			rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1701	}
1702
1703	/* Restart Rx engine if stopped. */
1704	/* If we don't need to check status, don't. -KDU */
1705	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1706		/* fix the values for the next receiving if RDE is set */
1707		if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
1708			u32 count = (sh_eth_read(ndev, RDFAR) -
1709				     sh_eth_read(ndev, RDLAR)) >> 4;
1710
1711			mdp->cur_rx = count;
1712			mdp->dirty_rx = count;
1713		}
1714		sh_eth_write(ndev, EDRRR_R, EDRRR);
1715	}
1716
1717	*quota -= limit - boguscnt - 1;
1718
1719	return *quota <= 0;
1720}
1721
1722static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1723{
1724	/* disable tx and rx */
1725	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1726}
1727
1728static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1729{
1730	/* enable tx and rx */
1731	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1732}
1733
1734/* E-MAC interrupt handler */
1735static void sh_eth_emac_interrupt(struct net_device *ndev)
1736{
1737	struct sh_eth_private *mdp = netdev_priv(ndev);
1738	u32 felic_stat;
1739	u32 link_stat;
1740
1741	felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
1742	sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1743	if (felic_stat & ECSR_ICD)
1744		ndev->stats.tx_carrier_errors++;
1745	if (felic_stat & ECSR_MPD)
1746		pm_wakeup_event(&mdp->pdev->dev, 0);
1747	if (felic_stat & ECSR_LCHNG) {
1748		/* Link Changed */
1749		if (mdp->cd->no_psr || mdp->no_ether_link)
1750			return;
1751		link_stat = sh_eth_read(ndev, PSR);
1752		if (mdp->ether_link_active_low)
1753			link_stat = ~link_stat;
1754		if (!(link_stat & PSR_LMON)) {
1755			sh_eth_rcv_snd_disable(ndev);
1756		} else {
1757			/* Link Up */
1758			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
1759			/* clear int */
1760			sh_eth_modify(ndev, ECSR, 0, 0);
1761			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
1762			/* enable tx and rx */
1763			sh_eth_rcv_snd_enable(ndev);
1764		}
1765	}
1766}
1767
1768/* error control function */
1769static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1770{
1771	struct sh_eth_private *mdp = netdev_priv(ndev);
1772	u32 mask;
1773
1774	if (intr_status & EESR_TWB) {
1775		/* Unused write back interrupt */
1776		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1777			ndev->stats.tx_aborted_errors++;
1778			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1779		}
1780	}
1781
1782	if (intr_status & EESR_RABT) {
1783		/* Receive Abort int */
1784		if (intr_status & EESR_RFRMER) {
1785			/* Receive Frame Overflow int */
1786			ndev->stats.rx_frame_errors++;
1787		}
1788	}
1789
1790	if (intr_status & EESR_TDE) {
1791		/* Transmit Descriptor Empty int */
1792		ndev->stats.tx_fifo_errors++;
1793		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1794	}
1795
1796	if (intr_status & EESR_TFE) {
1797		/* FIFO under flow */
1798		ndev->stats.tx_fifo_errors++;
1799		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1800	}
1801
1802	if (intr_status & EESR_RDE) {
1803		/* Receive Descriptor Empty int */
1804		ndev->stats.rx_over_errors++;
1805	}
1806
1807	if (intr_status & EESR_RFE) {
1808		/* Receive FIFO Overflow int */
1809		ndev->stats.rx_fifo_errors++;
1810	}
1811
1812	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1813		/* Address Error */
1814		ndev->stats.tx_fifo_errors++;
1815		netif_err(mdp, tx_err, ndev, "Address Error\n");
1816	}
1817
1818	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1819	if (mdp->cd->no_ade)
1820		mask &= ~EESR_ADE;
1821	if (intr_status & mask) {
1822		/* Tx error */
1823		u32 edtrr = sh_eth_read(ndev, EDTRR);
1824
1825		/* dmesg */
1826		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1827			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1828			   (u32)ndev->state, edtrr);
1829		/* dirty buffer free */
1830		sh_eth_tx_free(ndev, true);
1831
1832		/* SH7712 BUG */
1833		if (edtrr ^ mdp->cd->edtrr_trns) {
1834			/* tx dma start */
1835			sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
1836		}
1837		/* wakeup */
1838		netif_wake_queue(ndev);
1839	}
1840}
1841
1842static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1843{
1844	struct net_device *ndev = netdev;
1845	struct sh_eth_private *mdp = netdev_priv(ndev);
1846	struct sh_eth_cpu_data *cd = mdp->cd;
1847	irqreturn_t ret = IRQ_NONE;
1848	u32 intr_status, intr_enable;
1849
1850	spin_lock(&mdp->lock);
1851
1852	/* Get interrupt status */
1853	intr_status = sh_eth_read(ndev, EESR);
1854	/* Mask it with the interrupt mask, forcing ECI interrupt  to be always
1855	 * enabled since it's the one that  comes  thru regardless of the mask,
1856	 * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
1857	 * to quench it as it doesn't get cleared by just writing 1 to the  ECI
1858	 * bit...
1859	 */
1860	intr_enable = sh_eth_read(ndev, EESIPR);
1861	intr_status &= intr_enable | EESIPR_ECIIP;
1862	if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
1863			   cd->eesr_err_check))
1864		ret = IRQ_HANDLED;
1865	else
1866		goto out;
1867
1868	if (unlikely(!mdp->irq_enabled)) {
1869		sh_eth_write(ndev, 0, EESIPR);
1870		goto out;
1871	}
1872
1873	if (intr_status & EESR_RX_CHECK) {
1874		if (napi_schedule_prep(&mdp->napi)) {
1875			/* Mask Rx interrupts */
1876			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1877				     EESIPR);
1878			__napi_schedule(&mdp->napi);
1879		} else {
1880			netdev_warn(ndev,
1881				    "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1882				    intr_status, intr_enable);
1883		}
1884	}
1885
1886	/* Tx Check */
1887	if (intr_status & cd->tx_check) {
1888		/* Clear Tx interrupts */
1889		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1890
1891		sh_eth_tx_free(ndev, true);
1892		netif_wake_queue(ndev);
1893	}
1894
1895	/* E-MAC interrupt */
1896	if (intr_status & EESR_ECI)
1897		sh_eth_emac_interrupt(ndev);
1898
1899	if (intr_status & cd->eesr_err_check) {
1900		/* Clear error interrupts */
1901		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1902
1903		sh_eth_error(ndev, intr_status);
1904	}
1905
1906out:
1907	spin_unlock(&mdp->lock);
1908
1909	return ret;
1910}
1911
1912static int sh_eth_poll(struct napi_struct *napi, int budget)
1913{
1914	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1915						  napi);
1916	struct net_device *ndev = napi->dev;
1917	int quota = budget;
1918	u32 intr_status;
1919
1920	for (;;) {
1921		intr_status = sh_eth_read(ndev, EESR);
1922		if (!(intr_status & EESR_RX_CHECK))
1923			break;
1924		/* Clear Rx interrupts */
1925		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1926
1927		if (sh_eth_rx(ndev, intr_status, &quota))
1928			goto out;
1929	}
1930
1931	napi_complete(napi);
1932
1933	/* Reenable Rx interrupts */
1934	if (mdp->irq_enabled)
1935		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1936out:
1937	return budget - quota;
1938}
1939
1940/* PHY state control function */
1941static void sh_eth_adjust_link(struct net_device *ndev)
1942{
1943	struct sh_eth_private *mdp = netdev_priv(ndev);
1944	struct phy_device *phydev = ndev->phydev;
1945	unsigned long flags;
1946	int new_state = 0;
1947
1948	spin_lock_irqsave(&mdp->lock, flags);
1949
1950	/* Disable TX and RX right over here, if E-MAC change is ignored */
1951	if (mdp->cd->no_psr || mdp->no_ether_link)
1952		sh_eth_rcv_snd_disable(ndev);
1953
1954	if (phydev->link) {
1955		if (phydev->duplex != mdp->duplex) {
1956			new_state = 1;
1957			mdp->duplex = phydev->duplex;
1958			if (mdp->cd->set_duplex)
1959				mdp->cd->set_duplex(ndev);
1960		}
1961
1962		if (phydev->speed != mdp->speed) {
1963			new_state = 1;
1964			mdp->speed = phydev->speed;
1965			if (mdp->cd->set_rate)
1966				mdp->cd->set_rate(ndev);
1967		}
1968		if (!mdp->link) {
1969			sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1970			new_state = 1;
1971			mdp->link = phydev->link;
 
 
1972		}
1973	} else if (mdp->link) {
1974		new_state = 1;
1975		mdp->link = 0;
1976		mdp->speed = 0;
1977		mdp->duplex = -1;
 
 
1978	}
1979
1980	/* Enable TX and RX right over here, if E-MAC change is ignored */
1981	if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1982		sh_eth_rcv_snd_enable(ndev);
1983
1984	spin_unlock_irqrestore(&mdp->lock, flags);
1985
1986	if (new_state && netif_msg_link(mdp))
1987		phy_print_status(phydev);
1988}
1989
1990/* PHY init function */
1991static int sh_eth_phy_init(struct net_device *ndev)
1992{
1993	struct device_node *np = ndev->dev.parent->of_node;
1994	struct sh_eth_private *mdp = netdev_priv(ndev);
1995	struct phy_device *phydev;
1996
1997	mdp->link = 0;
1998	mdp->speed = 0;
1999	mdp->duplex = -1;
2000
2001	/* Try connect to PHY */
2002	if (np) {
2003		struct device_node *pn;
2004
2005		pn = of_parse_phandle(np, "phy-handle", 0);
2006		phydev = of_phy_connect(ndev, pn,
2007					sh_eth_adjust_link, 0,
2008					mdp->phy_interface);
2009
2010		of_node_put(pn);
2011		if (!phydev)
2012			phydev = ERR_PTR(-ENOENT);
2013	} else {
2014		char phy_id[MII_BUS_ID_SIZE + 3];
2015
2016		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2017			 mdp->mii_bus->id, mdp->phy_id);
2018
2019		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
2020				     mdp->phy_interface);
2021	}
2022
2023	if (IS_ERR(phydev)) {
2024		netdev_err(ndev, "failed to connect PHY\n");
2025		return PTR_ERR(phydev);
2026	}
2027
2028	/* mask with MAC supported features */
2029	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
2030		phy_set_max_speed(phydev, SPEED_100);
 
 
 
 
 
 
2031
2032	/* Indicate that the MAC is responsible for managing PHY PM */
2033	phydev->mac_managed_pm = true;
2034	phy_attached_info(phydev);
2035
2036	return 0;
2037}
2038
2039/* PHY control start function */
2040static int sh_eth_phy_start(struct net_device *ndev)
2041{
2042	int ret;
2043
2044	ret = sh_eth_phy_init(ndev);
2045	if (ret)
2046		return ret;
2047
2048	phy_start(ndev->phydev);
2049
2050	return 0;
2051}
2052
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2053/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2054 * version must be bumped as well.  Just adding registers up to that
2055 * limit is fine, as long as the existing register indices don't
2056 * change.
2057 */
2058#define SH_ETH_REG_DUMP_VERSION		1
2059#define SH_ETH_REG_DUMP_MAX_REGS	256
2060
2061static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2062{
2063	struct sh_eth_private *mdp = netdev_priv(ndev);
2064	struct sh_eth_cpu_data *cd = mdp->cd;
2065	u32 *valid_map;
2066	size_t len;
2067
2068	BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
2069
2070	/* Dump starts with a bitmap that tells ethtool which
2071	 * registers are defined for this chip.
2072	 */
2073	len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
2074	if (buf) {
2075		valid_map = buf;
2076		buf += len;
2077	} else {
2078		valid_map = NULL;
2079	}
2080
2081	/* Add a register to the dump, if it has a defined offset.
2082	 * This automatically skips most undefined registers, but for
2083	 * some it is also necessary to check a capability flag in
2084	 * struct sh_eth_cpu_data.
2085	 */
2086#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2087#define add_reg_from(reg, read_expr) do {				\
2088		if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {	\
2089			if (buf) {					\
2090				mark_reg_valid(reg);			\
2091				*buf++ = read_expr;			\
2092			}						\
2093			++len;						\
2094		}							\
2095	} while (0)
2096#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2097#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2098
2099	add_reg(EDSR);
2100	add_reg(EDMR);
2101	add_reg(EDTRR);
2102	add_reg(EDRRR);
2103	add_reg(EESR);
2104	add_reg(EESIPR);
2105	add_reg(TDLAR);
2106	if (!cd->no_xdfar)
2107		add_reg(TDFAR);
2108	add_reg(TDFXR);
2109	add_reg(TDFFR);
2110	add_reg(RDLAR);
2111	if (!cd->no_xdfar)
2112		add_reg(RDFAR);
2113	add_reg(RDFXR);
2114	add_reg(RDFFR);
2115	add_reg(TRSCER);
2116	add_reg(RMFCR);
2117	add_reg(TFTR);
2118	add_reg(FDR);
2119	add_reg(RMCR);
2120	add_reg(TFUCR);
2121	add_reg(RFOCR);
2122	if (cd->rmiimode)
2123		add_reg(RMIIMODE);
2124	add_reg(FCFTR);
2125	if (cd->rpadir)
2126		add_reg(RPADIR);
2127	if (!cd->no_trimd)
2128		add_reg(TRIMD);
2129	add_reg(ECMR);
2130	add_reg(ECSR);
2131	add_reg(ECSIPR);
2132	add_reg(PIR);
2133	if (!cd->no_psr)
2134		add_reg(PSR);
2135	add_reg(RDMLR);
2136	add_reg(RFLR);
2137	add_reg(IPGR);
2138	if (cd->apr)
2139		add_reg(APR);
2140	if (cd->mpr)
2141		add_reg(MPR);
2142	add_reg(RFCR);
2143	add_reg(RFCF);
2144	if (cd->tpauser)
2145		add_reg(TPAUSER);
2146	add_reg(TPAUSECR);
2147	if (cd->gecmr)
2148		add_reg(GECMR);
2149	if (cd->bculr)
2150		add_reg(BCULR);
2151	add_reg(MAHR);
2152	add_reg(MALR);
2153	if (!cd->no_tx_cntrs) {
2154		add_reg(TROCR);
2155		add_reg(CDCR);
2156		add_reg(LCCR);
2157		add_reg(CNDCR);
2158	}
2159	add_reg(CEFCR);
2160	add_reg(FRECR);
2161	add_reg(TSFRCR);
2162	add_reg(TLFRCR);
2163	if (cd->cexcr) {
2164		add_reg(CERCR);
2165		add_reg(CEECR);
2166	}
2167	add_reg(MAFCR);
2168	if (cd->rtrate)
2169		add_reg(RTRATE);
2170	if (cd->csmr)
2171		add_reg(CSMR);
2172	if (cd->select_mii)
2173		add_reg(RMII_MII);
2174	if (cd->tsu) {
2175		add_tsu_reg(ARSTR);
2176		add_tsu_reg(TSU_CTRST);
2177		if (cd->dual_port) {
2178			add_tsu_reg(TSU_FWEN0);
2179			add_tsu_reg(TSU_FWEN1);
2180			add_tsu_reg(TSU_FCM);
2181			add_tsu_reg(TSU_BSYSL0);
2182			add_tsu_reg(TSU_BSYSL1);
2183			add_tsu_reg(TSU_PRISL0);
2184			add_tsu_reg(TSU_PRISL1);
2185			add_tsu_reg(TSU_FWSL0);
2186			add_tsu_reg(TSU_FWSL1);
2187		}
2188		add_tsu_reg(TSU_FWSLC);
2189		if (cd->dual_port) {
2190			add_tsu_reg(TSU_QTAGM0);
2191			add_tsu_reg(TSU_QTAGM1);
2192			add_tsu_reg(TSU_FWSR);
2193			add_tsu_reg(TSU_FWINMK);
2194			add_tsu_reg(TSU_ADQT0);
2195			add_tsu_reg(TSU_ADQT1);
2196			add_tsu_reg(TSU_VTAG0);
2197			add_tsu_reg(TSU_VTAG1);
2198		}
2199		add_tsu_reg(TSU_ADSBSY);
2200		add_tsu_reg(TSU_TEN);
2201		add_tsu_reg(TSU_POST1);
2202		add_tsu_reg(TSU_POST2);
2203		add_tsu_reg(TSU_POST3);
2204		add_tsu_reg(TSU_POST4);
2205		/* This is the start of a table, not just a single register. */
2206		if (buf) {
2207			unsigned int i;
2208
2209			mark_reg_valid(TSU_ADRH0);
2210			for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2211				*buf++ = ioread32(mdp->tsu_addr +
2212						  mdp->reg_offset[TSU_ADRH0] +
2213						  i * 4);
2214		}
2215		len += SH_ETH_TSU_CAM_ENTRIES * 2;
2216	}
2217
2218#undef mark_reg_valid
2219#undef add_reg_from
2220#undef add_reg
2221#undef add_tsu_reg
2222
2223	return len * 4;
2224}
2225
2226static int sh_eth_get_regs_len(struct net_device *ndev)
2227{
2228	return __sh_eth_get_regs(ndev, NULL);
2229}
2230
2231static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2232			    void *buf)
2233{
2234	struct sh_eth_private *mdp = netdev_priv(ndev);
2235
2236	regs->version = SH_ETH_REG_DUMP_VERSION;
2237
2238	pm_runtime_get_sync(&mdp->pdev->dev);
2239	__sh_eth_get_regs(ndev, buf);
2240	pm_runtime_put_sync(&mdp->pdev->dev);
2241}
2242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2243static u32 sh_eth_get_msglevel(struct net_device *ndev)
2244{
2245	struct sh_eth_private *mdp = netdev_priv(ndev);
2246	return mdp->msg_enable;
2247}
2248
2249static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2250{
2251	struct sh_eth_private *mdp = netdev_priv(ndev);
2252	mdp->msg_enable = value;
2253}
2254
2255static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2256	"rx_current", "tx_current",
2257	"rx_dirty", "tx_dirty",
2258};
2259#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2260
2261static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2262{
2263	switch (sset) {
2264	case ETH_SS_STATS:
2265		return SH_ETH_STATS_LEN;
2266	default:
2267		return -EOPNOTSUPP;
2268	}
2269}
2270
2271static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2272				     struct ethtool_stats *stats, u64 *data)
2273{
2274	struct sh_eth_private *mdp = netdev_priv(ndev);
2275	int i = 0;
2276
2277	/* device-specific stats */
2278	data[i++] = mdp->cur_rx;
2279	data[i++] = mdp->cur_tx;
2280	data[i++] = mdp->dirty_rx;
2281	data[i++] = mdp->dirty_tx;
2282}
2283
2284static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2285{
2286	switch (stringset) {
2287	case ETH_SS_STATS:
2288		memcpy(data, sh_eth_gstrings_stats,
2289		       sizeof(sh_eth_gstrings_stats));
2290		break;
2291	}
2292}
2293
2294static void sh_eth_get_ringparam(struct net_device *ndev,
2295				 struct ethtool_ringparam *ring,
2296				 struct kernel_ethtool_ringparam *kernel_ring,
2297				 struct netlink_ext_ack *extack)
2298{
2299	struct sh_eth_private *mdp = netdev_priv(ndev);
2300
2301	ring->rx_max_pending = RX_RING_MAX;
2302	ring->tx_max_pending = TX_RING_MAX;
2303	ring->rx_pending = mdp->num_rx_ring;
2304	ring->tx_pending = mdp->num_tx_ring;
2305}
2306
2307static int sh_eth_set_ringparam(struct net_device *ndev,
2308				struct ethtool_ringparam *ring,
2309				struct kernel_ethtool_ringparam *kernel_ring,
2310				struct netlink_ext_ack *extack)
2311{
2312	struct sh_eth_private *mdp = netdev_priv(ndev);
2313	int ret;
2314
2315	if (ring->tx_pending > TX_RING_MAX ||
2316	    ring->rx_pending > RX_RING_MAX ||
2317	    ring->tx_pending < TX_RING_MIN ||
2318	    ring->rx_pending < RX_RING_MIN)
2319		return -EINVAL;
2320	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2321		return -EINVAL;
2322
2323	if (netif_running(ndev)) {
2324		netif_device_detach(ndev);
2325		netif_tx_disable(ndev);
2326
2327		/* Serialise with the interrupt handler and NAPI, then
2328		 * disable interrupts.  We have to clear the
2329		 * irq_enabled flag first to ensure that interrupts
2330		 * won't be re-enabled.
2331		 */
2332		mdp->irq_enabled = false;
2333		synchronize_irq(ndev->irq);
2334		napi_synchronize(&mdp->napi);
2335		sh_eth_write(ndev, 0x0000, EESIPR);
2336
2337		sh_eth_dev_exit(ndev);
2338
2339		/* Free all the skbuffs in the Rx queue and the DMA buffers. */
2340		sh_eth_ring_free(ndev);
2341	}
2342
2343	/* Set new parameters */
2344	mdp->num_rx_ring = ring->rx_pending;
2345	mdp->num_tx_ring = ring->tx_pending;
2346
2347	if (netif_running(ndev)) {
2348		ret = sh_eth_ring_init(ndev);
2349		if (ret < 0) {
2350			netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2351				   __func__);
2352			return ret;
2353		}
2354		ret = sh_eth_dev_init(ndev);
2355		if (ret < 0) {
2356			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2357				   __func__);
2358			return ret;
2359		}
2360
2361		netif_device_attach(ndev);
2362	}
2363
2364	return 0;
2365}
2366
2367static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2368{
2369	struct sh_eth_private *mdp = netdev_priv(ndev);
2370
2371	wol->supported = 0;
2372	wol->wolopts = 0;
2373
2374	if (mdp->cd->magic) {
2375		wol->supported = WAKE_MAGIC;
2376		wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2377	}
2378}
2379
2380static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2381{
2382	struct sh_eth_private *mdp = netdev_priv(ndev);
2383
2384	if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2385		return -EOPNOTSUPP;
2386
2387	mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2388
2389	device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2390
2391	return 0;
2392}
2393
2394static const struct ethtool_ops sh_eth_ethtool_ops = {
2395	.get_regs_len	= sh_eth_get_regs_len,
2396	.get_regs	= sh_eth_get_regs,
2397	.nway_reset	= phy_ethtool_nway_reset,
2398	.get_msglevel	= sh_eth_get_msglevel,
2399	.set_msglevel	= sh_eth_set_msglevel,
2400	.get_link	= ethtool_op_get_link,
2401	.get_strings	= sh_eth_get_strings,
2402	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
2403	.get_sset_count     = sh_eth_get_sset_count,
2404	.get_ringparam	= sh_eth_get_ringparam,
2405	.set_ringparam	= sh_eth_set_ringparam,
2406	.get_link_ksettings = phy_ethtool_get_link_ksettings,
2407	.set_link_ksettings = phy_ethtool_set_link_ksettings,
2408	.get_wol	= sh_eth_get_wol,
2409	.set_wol	= sh_eth_set_wol,
2410};
2411
2412/* network device open function */
2413static int sh_eth_open(struct net_device *ndev)
2414{
2415	struct sh_eth_private *mdp = netdev_priv(ndev);
2416	int ret;
2417
2418	pm_runtime_get_sync(&mdp->pdev->dev);
2419
2420	napi_enable(&mdp->napi);
2421
2422	ret = request_irq(ndev->irq, sh_eth_interrupt,
2423			  mdp->cd->irq_flags, ndev->name, ndev);
2424	if (ret) {
2425		netdev_err(ndev, "Can not assign IRQ number\n");
2426		goto out_napi_off;
2427	}
2428
2429	/* Descriptor set */
2430	ret = sh_eth_ring_init(ndev);
2431	if (ret)
2432		goto out_free_irq;
2433
2434	/* device init */
2435	ret = sh_eth_dev_init(ndev);
2436	if (ret)
2437		goto out_free_irq;
2438
2439	/* PHY control start*/
2440	ret = sh_eth_phy_start(ndev);
2441	if (ret)
2442		goto out_free_irq;
2443
2444	netif_start_queue(ndev);
2445
2446	mdp->is_opened = 1;
2447
2448	return ret;
2449
2450out_free_irq:
2451	free_irq(ndev->irq, ndev);
2452out_napi_off:
2453	napi_disable(&mdp->napi);
2454	pm_runtime_put_sync(&mdp->pdev->dev);
2455	return ret;
2456}
2457
2458/* Timeout function */
2459static void sh_eth_tx_timeout(struct net_device *ndev, unsigned int txqueue)
2460{
2461	struct sh_eth_private *mdp = netdev_priv(ndev);
2462	struct sh_eth_rxdesc *rxdesc;
2463	int i;
2464
2465	netif_stop_queue(ndev);
2466
2467	netif_err(mdp, timer, ndev,
2468		  "transmit timed out, status %8.8x, resetting...\n",
2469		  sh_eth_read(ndev, EESR));
2470
2471	/* tx_errors count up */
2472	ndev->stats.tx_errors++;
2473
2474	/* Free all the skbuffs in the Rx queue. */
2475	for (i = 0; i < mdp->num_rx_ring; i++) {
2476		rxdesc = &mdp->rx_ring[i];
2477		rxdesc->status = cpu_to_le32(0);
2478		rxdesc->addr = cpu_to_le32(0xBADF00D0);
2479		dev_kfree_skb(mdp->rx_skbuff[i]);
2480		mdp->rx_skbuff[i] = NULL;
2481	}
2482	for (i = 0; i < mdp->num_tx_ring; i++) {
2483		dev_kfree_skb(mdp->tx_skbuff[i]);
2484		mdp->tx_skbuff[i] = NULL;
2485	}
2486
2487	/* device init */
2488	sh_eth_dev_init(ndev);
2489
2490	netif_start_queue(ndev);
2491}
2492
2493/* Packet transmit function */
2494static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
2495				     struct net_device *ndev)
2496{
2497	struct sh_eth_private *mdp = netdev_priv(ndev);
2498	struct sh_eth_txdesc *txdesc;
2499	dma_addr_t dma_addr;
2500	u32 entry;
2501	unsigned long flags;
2502
2503	spin_lock_irqsave(&mdp->lock, flags);
2504	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2505		if (!sh_eth_tx_free(ndev, true)) {
2506			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2507			netif_stop_queue(ndev);
2508			spin_unlock_irqrestore(&mdp->lock, flags);
2509			return NETDEV_TX_BUSY;
2510		}
2511	}
2512	spin_unlock_irqrestore(&mdp->lock, flags);
2513
2514	if (skb_put_padto(skb, ETH_ZLEN))
2515		return NETDEV_TX_OK;
2516
2517	entry = mdp->cur_tx % mdp->num_tx_ring;
2518	mdp->tx_skbuff[entry] = skb;
2519	txdesc = &mdp->tx_ring[entry];
2520	/* soft swap. */
2521	if (!mdp->cd->hw_swap)
2522		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2523	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2524				  DMA_TO_DEVICE);
2525	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2526		kfree_skb(skb);
2527		return NETDEV_TX_OK;
2528	}
2529	txdesc->addr = cpu_to_le32(dma_addr);
2530	txdesc->len  = cpu_to_le32(skb->len << 16);
2531
2532	dma_wmb(); /* TACT bit must be set after all the above writes */
2533	if (entry >= mdp->num_tx_ring - 1)
2534		txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2535	else
2536		txdesc->status |= cpu_to_le32(TD_TACT);
2537
2538	wmb(); /* cur_tx must be incremented after TACT bit was set */
2539	mdp->cur_tx++;
2540
2541	if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
2542		sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
2543
2544	return NETDEV_TX_OK;
2545}
2546
2547/* The statistics registers have write-clear behaviour, which means we
2548 * will lose any increment between the read and write.  We mitigate
2549 * this by only clearing when we read a non-zero value, so we will
2550 * never falsely report a total of zero.
2551 */
2552static void
2553sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2554{
2555	u32 delta = sh_eth_read(ndev, reg);
2556
2557	if (delta) {
2558		*stat += delta;
2559		sh_eth_write(ndev, 0, reg);
2560	}
2561}
2562
2563static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2564{
2565	struct sh_eth_private *mdp = netdev_priv(ndev);
2566
2567	if (mdp->cd->no_tx_cntrs)
2568		return &ndev->stats;
2569
2570	if (!mdp->is_opened)
2571		return &ndev->stats;
2572
2573	sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2574	sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2575	sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2576
2577	if (mdp->cd->cexcr) {
2578		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2579				   CERCR);
2580		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2581				   CEECR);
2582	} else {
2583		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2584				   CNDCR);
2585	}
2586
2587	return &ndev->stats;
2588}
2589
2590/* device close function */
2591static int sh_eth_close(struct net_device *ndev)
2592{
2593	struct sh_eth_private *mdp = netdev_priv(ndev);
2594
2595	netif_stop_queue(ndev);
2596
2597	/* Serialise with the interrupt handler and NAPI, then disable
2598	 * interrupts.  We have to clear the irq_enabled flag first to
2599	 * ensure that interrupts won't be re-enabled.
2600	 */
2601	mdp->irq_enabled = false;
2602	synchronize_irq(ndev->irq);
2603	napi_disable(&mdp->napi);
2604	sh_eth_write(ndev, 0x0000, EESIPR);
2605
2606	sh_eth_dev_exit(ndev);
2607
2608	/* PHY Disconnect */
2609	if (ndev->phydev) {
2610		phy_stop(ndev->phydev);
2611		phy_disconnect(ndev->phydev);
2612	}
2613
2614	free_irq(ndev->irq, ndev);
2615
2616	/* Free all the skbuffs in the Rx queue and the DMA buffer. */
2617	sh_eth_ring_free(ndev);
2618
2619	mdp->is_opened = 0;
2620
2621	pm_runtime_put(&mdp->pdev->dev);
2622
2623	return 0;
2624}
2625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2626static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2627{
2628	if (netif_running(ndev))
2629		return -EBUSY;
2630
2631	ndev->mtu = new_mtu;
2632	netdev_update_features(ndev);
2633
2634	return 0;
2635}
2636
2637/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
 
 
 
 
 
 
2638static u32 sh_eth_tsu_get_post_mask(int entry)
2639{
2640	return 0x0f << (28 - ((entry % 8) * 4));
2641}
2642
2643static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2644{
2645	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2646}
2647
2648static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2649					     int entry)
2650{
2651	struct sh_eth_private *mdp = netdev_priv(ndev);
2652	int reg = TSU_POST1 + entry / 8;
2653	u32 tmp;
 
2654
2655	tmp = sh_eth_tsu_read(mdp, reg);
2656	sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
 
2657}
2658
2659static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2660					      int entry)
2661{
2662	struct sh_eth_private *mdp = netdev_priv(ndev);
2663	int reg = TSU_POST1 + entry / 8;
2664	u32 post_mask, ref_mask, tmp;
 
2665
 
2666	post_mask = sh_eth_tsu_get_post_mask(entry);
2667	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2668
2669	tmp = sh_eth_tsu_read(mdp, reg);
2670	sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
2671
2672	/* If other port enables, the function returns "true" */
2673	return tmp & ref_mask;
2674}
2675
2676static int sh_eth_tsu_busy(struct net_device *ndev)
2677{
2678	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2679	struct sh_eth_private *mdp = netdev_priv(ndev);
2680
2681	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2682		udelay(10);
2683		timeout--;
2684		if (timeout <= 0) {
2685			netdev_err(ndev, "%s: timeout\n", __func__);
2686			return -ETIMEDOUT;
2687		}
2688	}
2689
2690	return 0;
2691}
2692
2693static int sh_eth_tsu_write_entry(struct net_device *ndev, u16 offset,
2694				  const u8 *addr)
2695{
2696	struct sh_eth_private *mdp = netdev_priv(ndev);
2697	u32 val;
2698
2699	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2700	iowrite32(val, mdp->tsu_addr + offset);
2701	if (sh_eth_tsu_busy(ndev) < 0)
2702		return -EBUSY;
2703
2704	val = addr[4] << 8 | addr[5];
2705	iowrite32(val, mdp->tsu_addr + offset + 4);
2706	if (sh_eth_tsu_busy(ndev) < 0)
2707		return -EBUSY;
2708
2709	return 0;
2710}
2711
2712static void sh_eth_tsu_read_entry(struct net_device *ndev, u16 offset, u8 *addr)
2713{
2714	struct sh_eth_private *mdp = netdev_priv(ndev);
2715	u32 val;
2716
2717	val = ioread32(mdp->tsu_addr + offset);
2718	addr[0] = (val >> 24) & 0xff;
2719	addr[1] = (val >> 16) & 0xff;
2720	addr[2] = (val >> 8) & 0xff;
2721	addr[3] = val & 0xff;
2722	val = ioread32(mdp->tsu_addr + offset + 4);
2723	addr[4] = (val >> 8) & 0xff;
2724	addr[5] = val & 0xff;
2725}
2726
2727
2728static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2729{
2730	struct sh_eth_private *mdp = netdev_priv(ndev);
2731	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2732	int i;
2733	u8 c_addr[ETH_ALEN];
2734
2735	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2736		sh_eth_tsu_read_entry(ndev, reg_offset, c_addr);
2737		if (ether_addr_equal(addr, c_addr))
2738			return i;
2739	}
2740
2741	return -ENOENT;
2742}
2743
2744static int sh_eth_tsu_find_empty(struct net_device *ndev)
2745{
2746	u8 blank[ETH_ALEN];
2747	int entry;
2748
2749	memset(blank, 0, sizeof(blank));
2750	entry = sh_eth_tsu_find_entry(ndev, blank);
2751	return (entry < 0) ? -ENOMEM : entry;
2752}
2753
2754static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2755					      int entry)
2756{
2757	struct sh_eth_private *mdp = netdev_priv(ndev);
2758	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2759	int ret;
2760	u8 blank[ETH_ALEN];
2761
2762	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2763			 ~(1 << (31 - entry)), TSU_TEN);
2764
2765	memset(blank, 0, sizeof(blank));
2766	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2767	if (ret < 0)
2768		return ret;
2769	return 0;
2770}
2771
2772static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2773{
2774	struct sh_eth_private *mdp = netdev_priv(ndev);
2775	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2776	int i, ret;
2777
2778	if (!mdp->cd->tsu)
2779		return 0;
2780
2781	i = sh_eth_tsu_find_entry(ndev, addr);
2782	if (i < 0) {
2783		/* No entry found, create one */
2784		i = sh_eth_tsu_find_empty(ndev);
2785		if (i < 0)
2786			return -ENOMEM;
2787		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2788		if (ret < 0)
2789			return ret;
2790
2791		/* Enable the entry */
2792		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2793				 (1 << (31 - i)), TSU_TEN);
2794	}
2795
2796	/* Entry found or created, enable POST */
2797	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2798
2799	return 0;
2800}
2801
2802static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2803{
2804	struct sh_eth_private *mdp = netdev_priv(ndev);
2805	int i, ret;
2806
2807	if (!mdp->cd->tsu)
2808		return 0;
2809
2810	i = sh_eth_tsu_find_entry(ndev, addr);
2811	if (i) {
2812		/* Entry found */
2813		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2814			goto done;
2815
2816		/* Disable the entry if both ports was disabled */
2817		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2818		if (ret < 0)
2819			return ret;
2820	}
2821done:
2822	return 0;
2823}
2824
2825static int sh_eth_tsu_purge_all(struct net_device *ndev)
2826{
2827	struct sh_eth_private *mdp = netdev_priv(ndev);
2828	int i, ret;
2829
2830	if (!mdp->cd->tsu)
2831		return 0;
2832
2833	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2834		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2835			continue;
2836
2837		/* Disable the entry if both ports was disabled */
2838		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2839		if (ret < 0)
2840			return ret;
2841	}
2842
2843	return 0;
2844}
2845
2846static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2847{
2848	struct sh_eth_private *mdp = netdev_priv(ndev);
2849	u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2850	u8 addr[ETH_ALEN];
 
2851	int i;
2852
2853	if (!mdp->cd->tsu)
2854		return;
2855
2856	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2857		sh_eth_tsu_read_entry(ndev, reg_offset, addr);
2858		if (is_multicast_ether_addr(addr))
2859			sh_eth_tsu_del_entry(ndev, addr);
2860	}
2861}
2862
2863/* Update promiscuous flag and multicast filter */
2864static void sh_eth_set_rx_mode(struct net_device *ndev)
2865{
2866	struct sh_eth_private *mdp = netdev_priv(ndev);
2867	u32 ecmr_bits;
2868	int mcast_all = 0;
2869	unsigned long flags;
2870
2871	spin_lock_irqsave(&mdp->lock, flags);
2872	/* Initial condition is MCT = 1, PRM = 0.
2873	 * Depending on ndev->flags, set PRM or clear MCT
2874	 */
2875	ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2876	if (mdp->cd->tsu)
2877		ecmr_bits |= ECMR_MCT;
2878
2879	if (!(ndev->flags & IFF_MULTICAST)) {
2880		sh_eth_tsu_purge_mcast(ndev);
2881		mcast_all = 1;
2882	}
2883	if (ndev->flags & IFF_ALLMULTI) {
2884		sh_eth_tsu_purge_mcast(ndev);
2885		ecmr_bits &= ~ECMR_MCT;
2886		mcast_all = 1;
2887	}
2888
2889	if (ndev->flags & IFF_PROMISC) {
2890		sh_eth_tsu_purge_all(ndev);
2891		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2892	} else if (mdp->cd->tsu) {
2893		struct netdev_hw_addr *ha;
2894		netdev_for_each_mc_addr(ha, ndev) {
2895			if (mcast_all && is_multicast_ether_addr(ha->addr))
2896				continue;
2897
2898			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2899				if (!mcast_all) {
2900					sh_eth_tsu_purge_mcast(ndev);
2901					ecmr_bits &= ~ECMR_MCT;
2902					mcast_all = 1;
2903				}
2904			}
2905		}
2906	}
2907
2908	/* update the ethernet mode */
2909	sh_eth_write(ndev, ecmr_bits, ECMR);
2910
2911	spin_unlock_irqrestore(&mdp->lock, flags);
2912}
2913
2914static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable)
2915{
2916	struct sh_eth_private *mdp = netdev_priv(ndev);
2917	unsigned long flags;
2918
2919	spin_lock_irqsave(&mdp->lock, flags);
2920
2921	/* Disable TX and RX */
2922	sh_eth_rcv_snd_disable(ndev);
2923
2924	/* Modify RX Checksum setting */
2925	sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0);
2926
2927	/* Enable TX and RX */
2928	sh_eth_rcv_snd_enable(ndev);
2929
2930	spin_unlock_irqrestore(&mdp->lock, flags);
2931}
2932
2933static int sh_eth_set_features(struct net_device *ndev,
2934			       netdev_features_t features)
2935{
2936	netdev_features_t changed = ndev->features ^ features;
2937	struct sh_eth_private *mdp = netdev_priv(ndev);
2938
2939	if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
2940		sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM);
2941
2942	ndev->features = features;
2943
2944	return 0;
2945}
2946
2947static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2948{
2949	if (!mdp->port)
2950		return TSU_VTAG0;
2951	else
2952		return TSU_VTAG1;
2953}
2954
2955static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2956				  __be16 proto, u16 vid)
2957{
2958	struct sh_eth_private *mdp = netdev_priv(ndev);
2959	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2960
2961	if (unlikely(!mdp->cd->tsu))
2962		return -EPERM;
2963
2964	/* No filtering if vid = 0 */
2965	if (!vid)
2966		return 0;
2967
2968	mdp->vlan_num_ids++;
2969
2970	/* The controller has one VLAN tag HW filter. So, if the filter is
2971	 * already enabled, the driver disables it and the filte
2972	 */
2973	if (mdp->vlan_num_ids > 1) {
2974		/* disable VLAN filter */
2975		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2976		return 0;
2977	}
2978
2979	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2980			 vtag_reg_index);
2981
2982	return 0;
2983}
2984
2985static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2986				   __be16 proto, u16 vid)
2987{
2988	struct sh_eth_private *mdp = netdev_priv(ndev);
2989	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2990
2991	if (unlikely(!mdp->cd->tsu))
2992		return -EPERM;
2993
2994	/* No filtering if vid = 0 */
2995	if (!vid)
2996		return 0;
2997
2998	mdp->vlan_num_ids--;
2999	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
3000
3001	return 0;
3002}
3003
3004/* SuperH's TSU register init function */
3005static void sh_eth_tsu_init(struct sh_eth_private *mdp)
3006{
3007	if (!mdp->cd->dual_port) {
3008		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3009		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
3010				 TSU_FWSLC);	/* Enable POST registers */
3011		return;
3012	}
3013
3014	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
3015	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
3016	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
3017	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
3018	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
3019	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
3020	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
3021	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
3022	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
3023	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
3024	sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
3025	sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
3026	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
3027	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
3028	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
3029	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
3030	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
3031	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
3032	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
3033}
3034
3035/* MDIO bus release function */
3036static int sh_mdio_release(struct sh_eth_private *mdp)
3037{
3038	/* unregister mdio bus */
3039	mdiobus_unregister(mdp->mii_bus);
3040
3041	/* free bitbang info */
3042	free_mdio_bitbang(mdp->mii_bus);
3043
3044	return 0;
3045}
3046
3047static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
3048{
3049	int res;
3050
3051	pm_runtime_get_sync(bus->parent);
3052	res = mdiobb_read(bus, phy, reg);
3053	pm_runtime_put(bus->parent);
3054
3055	return res;
3056}
3057
3058static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
3059{
3060	int res;
3061
3062	pm_runtime_get_sync(bus->parent);
3063	res = mdiobb_write(bus, phy, reg, val);
3064	pm_runtime_put(bus->parent);
3065
3066	return res;
3067}
3068
3069/* MDIO bus init function */
3070static int sh_mdio_init(struct sh_eth_private *mdp,
3071			struct sh_eth_plat_data *pd)
3072{
3073	int ret;
3074	struct bb_info *bitbang;
3075	struct platform_device *pdev = mdp->pdev;
3076	struct device *dev = &mdp->pdev->dev;
3077
3078	/* create bit control struct for PHY */
3079	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
3080	if (!bitbang)
3081		return -ENOMEM;
3082
3083	/* bitbang init */
3084	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
3085	bitbang->set_gate = pd->set_mdio_gate;
3086	bitbang->ctrl.ops = &bb_ops;
3087
3088	/* MII controller setting */
3089	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
3090	if (!mdp->mii_bus)
3091		return -ENOMEM;
3092
3093	/* Wrap accessors with Runtime PM-aware ops */
3094	mdp->mii_bus->read = sh_mdiobb_read;
3095	mdp->mii_bus->write = sh_mdiobb_write;
3096
3097	/* Hook up MII support for ethtool */
3098	mdp->mii_bus->name = "sh_mii";
3099	mdp->mii_bus->parent = dev;
3100	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3101		 pdev->name, pdev->id);
3102
3103	/* register MDIO bus */
3104	if (pd->phy_irq > 0)
3105		mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
 
 
 
 
 
 
3106
3107	ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3108	if (ret)
3109		goto out_free_bus;
3110
3111	return 0;
3112
3113out_free_bus:
3114	free_mdio_bitbang(mdp->mii_bus);
3115	return ret;
3116}
3117
3118static const u16 *sh_eth_get_register_offset(int register_type)
3119{
3120	const u16 *reg_offset = NULL;
3121
3122	switch (register_type) {
3123	case SH_ETH_REG_GIGABIT:
3124		reg_offset = sh_eth_offset_gigabit;
3125		break;
 
 
 
3126	case SH_ETH_REG_FAST_RCAR:
3127		reg_offset = sh_eth_offset_fast_rcar;
3128		break;
3129	case SH_ETH_REG_FAST_SH4:
3130		reg_offset = sh_eth_offset_fast_sh4;
3131		break;
3132	case SH_ETH_REG_FAST_SH3_SH2:
3133		reg_offset = sh_eth_offset_fast_sh3_sh2;
3134		break;
3135	}
3136
3137	return reg_offset;
3138}
3139
3140static const struct net_device_ops sh_eth_netdev_ops = {
3141	.ndo_open		= sh_eth_open,
3142	.ndo_stop		= sh_eth_close,
3143	.ndo_start_xmit		= sh_eth_start_xmit,
3144	.ndo_get_stats		= sh_eth_get_stats,
3145	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3146	.ndo_tx_timeout		= sh_eth_tx_timeout,
3147	.ndo_eth_ioctl		= phy_do_ioctl_running,
3148	.ndo_change_mtu		= sh_eth_change_mtu,
3149	.ndo_validate_addr	= eth_validate_addr,
3150	.ndo_set_mac_address	= eth_mac_addr,
3151	.ndo_set_features	= sh_eth_set_features,
3152};
3153
3154static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3155	.ndo_open		= sh_eth_open,
3156	.ndo_stop		= sh_eth_close,
3157	.ndo_start_xmit		= sh_eth_start_xmit,
3158	.ndo_get_stats		= sh_eth_get_stats,
3159	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3160	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
3161	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
3162	.ndo_tx_timeout		= sh_eth_tx_timeout,
3163	.ndo_eth_ioctl		= phy_do_ioctl_running,
3164	.ndo_change_mtu		= sh_eth_change_mtu,
3165	.ndo_validate_addr	= eth_validate_addr,
3166	.ndo_set_mac_address	= eth_mac_addr,
3167	.ndo_set_features	= sh_eth_set_features,
3168};
3169
3170#ifdef CONFIG_OF
3171static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3172{
3173	struct device_node *np = dev->of_node;
3174	struct sh_eth_plat_data *pdata;
3175	phy_interface_t interface;
3176	int ret;
3177
3178	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3179	if (!pdata)
3180		return NULL;
3181
3182	ret = of_get_phy_mode(np, &interface);
3183	if (ret)
3184		return NULL;
3185	pdata->phy_interface = interface;
3186
3187	of_get_mac_address(np, pdata->mac_addr);
 
 
3188
3189	pdata->no_ether_link =
3190		of_property_read_bool(np, "renesas,no-ether-link");
3191	pdata->ether_link_active_low =
3192		of_property_read_bool(np, "renesas,ether-link-active-low");
3193
3194	return pdata;
3195}
3196
3197static const struct of_device_id sh_eth_match_table[] = {
3198	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3199	{ .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3200	{ .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3201	{ .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3202	{ .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3203	{ .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3204	{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3205	{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3206	{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
3207	{ .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
3208	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
3209	{ .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
3210	{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3211	{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3212	{ }
3213};
3214MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3215#else
3216static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3217{
3218	return NULL;
3219}
3220#endif
3221
3222static int sh_eth_drv_probe(struct platform_device *pdev)
3223{
3224	struct resource *res;
3225	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3226	const struct platform_device_id *id = platform_get_device_id(pdev);
3227	struct sh_eth_private *mdp;
3228	struct net_device *ndev;
3229	int ret;
3230
 
 
 
3231	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3232	if (!ndev)
3233		return -ENOMEM;
3234
3235	pm_runtime_enable(&pdev->dev);
3236	pm_runtime_get_sync(&pdev->dev);
3237
3238	ret = platform_get_irq(pdev, 0);
3239	if (ret < 0)
3240		goto out_release;
3241	ndev->irq = ret;
3242
3243	SET_NETDEV_DEV(ndev, &pdev->dev);
3244
3245	mdp = netdev_priv(ndev);
3246	mdp->num_tx_ring = TX_RING_SIZE;
3247	mdp->num_rx_ring = RX_RING_SIZE;
3248	mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
3249	if (IS_ERR(mdp->addr)) {
3250		ret = PTR_ERR(mdp->addr);
3251		goto out_release;
3252	}
3253
3254	ndev->base_addr = res->start;
3255
3256	spin_lock_init(&mdp->lock);
3257	mdp->pdev = pdev;
3258
3259	if (pdev->dev.of_node)
3260		pd = sh_eth_parse_dt(&pdev->dev);
3261	if (!pd) {
3262		dev_err(&pdev->dev, "no platform data\n");
3263		ret = -EINVAL;
3264		goto out_release;
3265	}
3266
3267	/* get PHY ID */
3268	mdp->phy_id = pd->phy;
3269	mdp->phy_interface = pd->phy_interface;
3270	mdp->no_ether_link = pd->no_ether_link;
3271	mdp->ether_link_active_low = pd->ether_link_active_low;
3272
3273	/* set cpu data */
3274	if (id)
3275		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3276	else
3277		mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3278
3279	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3280	if (!mdp->reg_offset) {
3281		dev_err(&pdev->dev, "Unknown register type (%d)\n",
3282			mdp->cd->register_type);
3283		ret = -EINVAL;
3284		goto out_release;
3285	}
3286	sh_eth_set_default_cpu_data(mdp->cd);
3287
3288	/* User's manual states max MTU should be 2048 but due to the
3289	 * alignment calculations in sh_eth_ring_init() the practical
3290	 * MTU is a bit less. Maybe this can be optimized some more.
3291	 */
3292	ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3293	ndev->min_mtu = ETH_MIN_MTU;
3294
3295	if (mdp->cd->rx_csum) {
3296		ndev->features = NETIF_F_RXCSUM;
3297		ndev->hw_features = NETIF_F_RXCSUM;
3298	}
3299
3300	/* set function */
3301	if (mdp->cd->tsu)
3302		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3303	else
3304		ndev->netdev_ops = &sh_eth_netdev_ops;
3305	ndev->ethtool_ops = &sh_eth_ethtool_ops;
3306	ndev->watchdog_timeo = TX_TIMEOUT;
3307
3308	/* debug message level */
3309	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3310
3311	/* read and set MAC address */
3312	read_mac_address(ndev, pd->mac_addr);
3313	if (!is_valid_ether_addr(ndev->dev_addr)) {
3314		dev_warn(&pdev->dev,
3315			 "no valid MAC address supplied, using a random one.\n");
3316		eth_hw_addr_random(ndev);
3317	}
3318
3319	if (mdp->cd->tsu) {
3320		int port = pdev->id < 0 ? 0 : pdev->id % 2;
3321		struct resource *rtsu;
3322
3323		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3324		if (!rtsu) {
3325			dev_err(&pdev->dev, "no TSU resource\n");
3326			ret = -ENODEV;
3327			goto out_release;
3328		}
3329		/* We can only request the  TSU region  for the first port
3330		 * of the two  sharing this TSU for the probe to succeed...
3331		 */
3332		if (port == 0 &&
3333		    !devm_request_mem_region(&pdev->dev, rtsu->start,
3334					     resource_size(rtsu),
3335					     dev_name(&pdev->dev))) {
3336			dev_err(&pdev->dev, "can't request TSU resource.\n");
3337			ret = -EBUSY;
3338			goto out_release;
3339		}
3340		/* ioremap the TSU registers */
3341		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3342					     resource_size(rtsu));
3343		if (!mdp->tsu_addr) {
3344			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3345			ret = -ENOMEM;
3346			goto out_release;
3347		}
3348		mdp->port = port;
3349		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3350
3351		/* Need to init only the first port of the two sharing a TSU */
3352		if (port == 0) {
3353			if (mdp->cd->chip_reset)
3354				mdp->cd->chip_reset(ndev);
3355
3356			/* TSU init (Init only)*/
3357			sh_eth_tsu_init(mdp);
3358		}
3359	}
3360
3361	if (mdp->cd->rmiimode)
3362		sh_eth_write(ndev, 0x1, RMIIMODE);
3363
3364	/* MDIO bus init */
3365	ret = sh_mdio_init(mdp, pd);
3366	if (ret) {
3367		dev_err_probe(&pdev->dev, ret, "MDIO init failed\n");
 
3368		goto out_release;
3369	}
3370
3371	netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
3372
3373	/* network device register */
3374	ret = register_netdev(ndev);
3375	if (ret)
3376		goto out_napi_del;
3377
3378	if (mdp->cd->magic)
3379		device_set_wakeup_capable(&pdev->dev, 1);
3380
3381	/* print device information */
3382	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3383		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3384
3385	pm_runtime_put(&pdev->dev);
3386	platform_set_drvdata(pdev, ndev);
3387
3388	return ret;
3389
3390out_napi_del:
3391	netif_napi_del(&mdp->napi);
3392	sh_mdio_release(mdp);
3393
3394out_release:
3395	/* net_dev free */
3396	free_netdev(ndev);
3397
3398	pm_runtime_put(&pdev->dev);
3399	pm_runtime_disable(&pdev->dev);
3400	return ret;
3401}
3402
3403static int sh_eth_drv_remove(struct platform_device *pdev)
3404{
3405	struct net_device *ndev = platform_get_drvdata(pdev);
3406	struct sh_eth_private *mdp = netdev_priv(ndev);
3407
3408	unregister_netdev(ndev);
3409	netif_napi_del(&mdp->napi);
3410	sh_mdio_release(mdp);
3411	pm_runtime_disable(&pdev->dev);
3412	free_netdev(ndev);
3413
3414	return 0;
3415}
3416
3417#ifdef CONFIG_PM
3418#ifdef CONFIG_PM_SLEEP
3419static int sh_eth_wol_setup(struct net_device *ndev)
3420{
3421	struct sh_eth_private *mdp = netdev_priv(ndev);
3422
3423	/* Only allow ECI interrupts */
3424	synchronize_irq(ndev->irq);
3425	napi_disable(&mdp->napi);
3426	sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
3427
3428	/* Enable MagicPacket */
3429	sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3430
3431	return enable_irq_wake(ndev->irq);
3432}
3433
3434static int sh_eth_wol_restore(struct net_device *ndev)
3435{
3436	struct sh_eth_private *mdp = netdev_priv(ndev);
3437	int ret;
3438
3439	napi_enable(&mdp->napi);
3440
3441	/* Disable MagicPacket */
3442	sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
3443
3444	/* The device needs to be reset to restore MagicPacket logic
3445	 * for next wakeup. If we close and open the device it will
3446	 * both be reset and all registers restored. This is what
3447	 * happens during suspend and resume without WoL enabled.
3448	 */
3449	sh_eth_close(ndev);
 
 
3450	ret = sh_eth_open(ndev);
3451	if (ret < 0)
3452		return ret;
3453
3454	return disable_irq_wake(ndev->irq);
3455}
3456
3457static int sh_eth_suspend(struct device *dev)
3458{
3459	struct net_device *ndev = dev_get_drvdata(dev);
3460	struct sh_eth_private *mdp = netdev_priv(ndev);
3461	int ret;
3462
3463	if (!netif_running(ndev))
3464		return 0;
3465
3466	netif_device_detach(ndev);
3467
3468	if (mdp->wol_enabled)
3469		ret = sh_eth_wol_setup(ndev);
3470	else
3471		ret = sh_eth_close(ndev);
3472
3473	return ret;
3474}
3475
3476static int sh_eth_resume(struct device *dev)
3477{
3478	struct net_device *ndev = dev_get_drvdata(dev);
3479	struct sh_eth_private *mdp = netdev_priv(ndev);
3480	int ret;
3481
3482	if (!netif_running(ndev))
3483		return 0;
3484
3485	if (mdp->wol_enabled)
3486		ret = sh_eth_wol_restore(ndev);
3487	else
3488		ret = sh_eth_open(ndev);
3489
3490	if (ret < 0)
3491		return ret;
3492
3493	netif_device_attach(ndev);
3494
3495	return ret;
3496}
3497#endif
3498
3499static int sh_eth_runtime_nop(struct device *dev)
3500{
3501	/* Runtime PM callback shared between ->runtime_suspend()
3502	 * and ->runtime_resume(). Simply returns success.
3503	 *
3504	 * This driver re-initializes all registers after
3505	 * pm_runtime_get_sync() anyway so there is no need
3506	 * to save and restore registers here.
3507	 */
3508	return 0;
3509}
3510
3511static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3512	SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3513	SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3514};
3515#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3516#else
3517#define SH_ETH_PM_OPS NULL
3518#endif
3519
3520static const struct platform_device_id sh_eth_id_table[] = {
3521	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3522	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3523	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3524	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3525	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3526	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3527	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3528	{ }
3529};
3530MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3531
3532static struct platform_driver sh_eth_driver = {
3533	.probe = sh_eth_drv_probe,
3534	.remove = sh_eth_drv_remove,
3535	.id_table = sh_eth_id_table,
3536	.driver = {
3537		   .name = CARDNAME,
3538		   .pm = SH_ETH_PM_OPS,
3539		   .of_match_table = of_match_ptr(sh_eth_match_table),
3540	},
3541};
3542
3543module_platform_driver(sh_eth_driver);
3544
3545MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3546MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3547MODULE_LICENSE("GPL v2");
v4.17
 
   1/*  SuperH Ethernet device driver
   2 *
   3 *  Copyright (C) 2014 Renesas Electronics Corporation
   4 *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
   5 *  Copyright (C) 2008-2014 Renesas Solutions Corp.
   6 *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
   7 *  Copyright (C) 2014 Codethink Limited
   8 *
   9 *  This program is free software; you can redistribute it and/or modify it
  10 *  under the terms and conditions of the GNU General Public License,
  11 *  version 2, as published by the Free Software Foundation.
  12 *
  13 *  This program is distributed in the hope it will be useful, but WITHOUT
  14 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  16 *  more details.
  17 *
  18 *  The full GNU General Public License is included in this distribution in
  19 *  the file called "COPYING".
  20 */
  21
  22#include <linux/module.h>
  23#include <linux/kernel.h>
  24#include <linux/spinlock.h>
  25#include <linux/interrupt.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/etherdevice.h>
  28#include <linux/delay.h>
  29#include <linux/platform_device.h>
  30#include <linux/mdio-bitbang.h>
  31#include <linux/netdevice.h>
  32#include <linux/of.h>
  33#include <linux/of_device.h>
  34#include <linux/of_irq.h>
  35#include <linux/of_net.h>
  36#include <linux/phy.h>
  37#include <linux/cache.h>
  38#include <linux/io.h>
  39#include <linux/pm_runtime.h>
  40#include <linux/slab.h>
  41#include <linux/ethtool.h>
  42#include <linux/if_vlan.h>
  43#include <linux/sh_eth.h>
  44#include <linux/of_mdio.h>
  45
  46#include "sh_eth.h"
  47
  48#define SH_ETH_DEF_MSG_ENABLE \
  49		(NETIF_MSG_LINK	| \
  50		NETIF_MSG_TIMER	| \
  51		NETIF_MSG_RX_ERR| \
  52		NETIF_MSG_TX_ERR)
  53
  54#define SH_ETH_OFFSET_INVALID	((u16)~0)
  55
  56#define SH_ETH_OFFSET_DEFAULTS			\
  57	[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
  58
 
 
 
 
 
 
 
 
 
  59static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
  60	SH_ETH_OFFSET_DEFAULTS,
  61
  62	[EDSR]		= 0x0000,
  63	[EDMR]		= 0x0400,
  64	[EDTRR]		= 0x0408,
  65	[EDRRR]		= 0x0410,
  66	[EESR]		= 0x0428,
  67	[EESIPR]	= 0x0430,
  68	[TDLAR]		= 0x0010,
  69	[TDFAR]		= 0x0014,
  70	[TDFXR]		= 0x0018,
  71	[TDFFR]		= 0x001c,
  72	[RDLAR]		= 0x0030,
  73	[RDFAR]		= 0x0034,
  74	[RDFXR]		= 0x0038,
  75	[RDFFR]		= 0x003c,
  76	[TRSCER]	= 0x0438,
  77	[RMFCR]		= 0x0440,
  78	[TFTR]		= 0x0448,
  79	[FDR]		= 0x0450,
  80	[RMCR]		= 0x0458,
  81	[RPADIR]	= 0x0460,
  82	[FCFTR]		= 0x0468,
  83	[CSMR]		= 0x04E4,
  84
  85	[ECMR]		= 0x0500,
  86	[ECSR]		= 0x0510,
  87	[ECSIPR]	= 0x0518,
  88	[PIR]		= 0x0520,
  89	[PSR]		= 0x0528,
  90	[PIPR]		= 0x052c,
  91	[RFLR]		= 0x0508,
  92	[APR]		= 0x0554,
  93	[MPR]		= 0x0558,
  94	[PFTCR]		= 0x055c,
  95	[PFRCR]		= 0x0560,
  96	[TPAUSER]	= 0x0564,
  97	[GECMR]		= 0x05b0,
  98	[BCULR]		= 0x05b4,
  99	[MAHR]		= 0x05c0,
 100	[MALR]		= 0x05c8,
 101	[TROCR]		= 0x0700,
 102	[CDCR]		= 0x0708,
 103	[LCCR]		= 0x0710,
 104	[CEFCR]		= 0x0740,
 105	[FRECR]		= 0x0748,
 106	[TSFRCR]	= 0x0750,
 107	[TLFRCR]	= 0x0758,
 108	[RFCR]		= 0x0760,
 109	[CERCR]		= 0x0768,
 110	[CEECR]		= 0x0770,
 111	[MAFCR]		= 0x0778,
 112	[RMII_MII]	= 0x0790,
 113
 114	[ARSTR]		= 0x0000,
 115	[TSU_CTRST]	= 0x0004,
 116	[TSU_FWEN0]	= 0x0010,
 117	[TSU_FWEN1]	= 0x0014,
 118	[TSU_FCM]	= 0x0018,
 119	[TSU_BSYSL0]	= 0x0020,
 120	[TSU_BSYSL1]	= 0x0024,
 121	[TSU_PRISL0]	= 0x0028,
 122	[TSU_PRISL1]	= 0x002c,
 123	[TSU_FWSL0]	= 0x0030,
 124	[TSU_FWSL1]	= 0x0034,
 125	[TSU_FWSLC]	= 0x0038,
 126	[TSU_QTAGM0]	= 0x0040,
 127	[TSU_QTAGM1]	= 0x0044,
 128	[TSU_FWSR]	= 0x0050,
 129	[TSU_FWINMK]	= 0x0054,
 130	[TSU_ADQT0]	= 0x0048,
 131	[TSU_ADQT1]	= 0x004c,
 132	[TSU_VTAG0]	= 0x0058,
 133	[TSU_VTAG1]	= 0x005c,
 134	[TSU_ADSBSY]	= 0x0060,
 135	[TSU_TEN]	= 0x0064,
 136	[TSU_POST1]	= 0x0070,
 137	[TSU_POST2]	= 0x0074,
 138	[TSU_POST3]	= 0x0078,
 139	[TSU_POST4]	= 0x007c,
 140	[TSU_ADRH0]	= 0x0100,
 141
 142	[TXNLCR0]	= 0x0080,
 143	[TXALCR0]	= 0x0084,
 144	[RXNLCR0]	= 0x0088,
 145	[RXALCR0]	= 0x008c,
 146	[FWNLCR0]	= 0x0090,
 147	[FWALCR0]	= 0x0094,
 148	[TXNLCR1]	= 0x00a0,
 149	[TXALCR1]	= 0x00a4,
 150	[RXNLCR1]	= 0x00a8,
 151	[RXALCR1]	= 0x00ac,
 152	[FWNLCR1]	= 0x00b0,
 153	[FWALCR1]	= 0x00b4,
 154};
 155
 156static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
 157	SH_ETH_OFFSET_DEFAULTS,
 158
 159	[EDSR]		= 0x0000,
 160	[EDMR]		= 0x0400,
 161	[EDTRR]		= 0x0408,
 162	[EDRRR]		= 0x0410,
 163	[EESR]		= 0x0428,
 164	[EESIPR]	= 0x0430,
 165	[TDLAR]		= 0x0010,
 166	[TDFAR]		= 0x0014,
 167	[TDFXR]		= 0x0018,
 168	[TDFFR]		= 0x001c,
 169	[RDLAR]		= 0x0030,
 170	[RDFAR]		= 0x0034,
 171	[RDFXR]		= 0x0038,
 172	[RDFFR]		= 0x003c,
 173	[TRSCER]	= 0x0438,
 174	[RMFCR]		= 0x0440,
 175	[TFTR]		= 0x0448,
 176	[FDR]		= 0x0450,
 177	[RMCR]		= 0x0458,
 178	[RPADIR]	= 0x0460,
 179	[FCFTR]		= 0x0468,
 180	[CSMR]		= 0x04E4,
 181
 182	[ECMR]		= 0x0500,
 183	[RFLR]		= 0x0508,
 184	[ECSR]		= 0x0510,
 185	[ECSIPR]	= 0x0518,
 186	[PIR]		= 0x0520,
 187	[APR]		= 0x0554,
 188	[MPR]		= 0x0558,
 189	[PFTCR]		= 0x055c,
 190	[PFRCR]		= 0x0560,
 191	[TPAUSER]	= 0x0564,
 192	[MAHR]		= 0x05c0,
 193	[MALR]		= 0x05c8,
 194	[CEFCR]		= 0x0740,
 195	[FRECR]		= 0x0748,
 196	[TSFRCR]	= 0x0750,
 197	[TLFRCR]	= 0x0758,
 198	[RFCR]		= 0x0760,
 199	[MAFCR]		= 0x0778,
 200
 201	[ARSTR]		= 0x0000,
 202	[TSU_CTRST]	= 0x0004,
 203	[TSU_FWSLC]	= 0x0038,
 204	[TSU_VTAG0]	= 0x0058,
 205	[TSU_ADSBSY]	= 0x0060,
 206	[TSU_TEN]	= 0x0064,
 207	[TSU_POST1]	= 0x0070,
 208	[TSU_POST2]	= 0x0074,
 209	[TSU_POST3]	= 0x0078,
 210	[TSU_POST4]	= 0x007c,
 211	[TSU_ADRH0]	= 0x0100,
 212
 213	[TXNLCR0]	= 0x0080,
 214	[TXALCR0]	= 0x0084,
 215	[RXNLCR0]	= 0x0088,
 216	[RXALCR0]	= 0x008C,
 217};
 218
 219static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
 220	SH_ETH_OFFSET_DEFAULTS,
 221
 222	[ECMR]		= 0x0300,
 223	[RFLR]		= 0x0308,
 224	[ECSR]		= 0x0310,
 225	[ECSIPR]	= 0x0318,
 226	[PIR]		= 0x0320,
 227	[PSR]		= 0x0328,
 228	[RDMLR]		= 0x0340,
 229	[IPGR]		= 0x0350,
 230	[APR]		= 0x0354,
 231	[MPR]		= 0x0358,
 232	[RFCF]		= 0x0360,
 233	[TPAUSER]	= 0x0364,
 234	[TPAUSECR]	= 0x0368,
 235	[MAHR]		= 0x03c0,
 236	[MALR]		= 0x03c8,
 237	[TROCR]		= 0x03d0,
 238	[CDCR]		= 0x03d4,
 239	[LCCR]		= 0x03d8,
 240	[CNDCR]		= 0x03dc,
 241	[CEFCR]		= 0x03e4,
 242	[FRECR]		= 0x03e8,
 243	[TSFRCR]	= 0x03ec,
 244	[TLFRCR]	= 0x03f0,
 245	[RFCR]		= 0x03f4,
 246	[MAFCR]		= 0x03f8,
 247
 248	[EDMR]		= 0x0200,
 249	[EDTRR]		= 0x0208,
 250	[EDRRR]		= 0x0210,
 251	[TDLAR]		= 0x0218,
 252	[RDLAR]		= 0x0220,
 253	[EESR]		= 0x0228,
 254	[EESIPR]	= 0x0230,
 255	[TRSCER]	= 0x0238,
 256	[RMFCR]		= 0x0240,
 257	[TFTR]		= 0x0248,
 258	[FDR]		= 0x0250,
 259	[RMCR]		= 0x0258,
 260	[TFUCR]		= 0x0264,
 261	[RFOCR]		= 0x0268,
 262	[RMIIMODE]      = 0x026c,
 263	[FCFTR]		= 0x0270,
 264	[TRIMD]		= 0x027c,
 265};
 266
 267static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
 268	SH_ETH_OFFSET_DEFAULTS,
 269
 270	[ECMR]		= 0x0100,
 271	[RFLR]		= 0x0108,
 272	[ECSR]		= 0x0110,
 273	[ECSIPR]	= 0x0118,
 274	[PIR]		= 0x0120,
 275	[PSR]		= 0x0128,
 276	[RDMLR]		= 0x0140,
 277	[IPGR]		= 0x0150,
 278	[APR]		= 0x0154,
 279	[MPR]		= 0x0158,
 280	[TPAUSER]	= 0x0164,
 281	[RFCF]		= 0x0160,
 282	[TPAUSECR]	= 0x0168,
 283	[BCFRR]		= 0x016c,
 284	[MAHR]		= 0x01c0,
 285	[MALR]		= 0x01c8,
 286	[TROCR]		= 0x01d0,
 287	[CDCR]		= 0x01d4,
 288	[LCCR]		= 0x01d8,
 289	[CNDCR]		= 0x01dc,
 290	[CEFCR]		= 0x01e4,
 291	[FRECR]		= 0x01e8,
 292	[TSFRCR]	= 0x01ec,
 293	[TLFRCR]	= 0x01f0,
 294	[RFCR]		= 0x01f4,
 295	[MAFCR]		= 0x01f8,
 296	[RTRATE]	= 0x01fc,
 297
 298	[EDMR]		= 0x0000,
 299	[EDTRR]		= 0x0008,
 300	[EDRRR]		= 0x0010,
 301	[TDLAR]		= 0x0018,
 302	[RDLAR]		= 0x0020,
 303	[EESR]		= 0x0028,
 304	[EESIPR]	= 0x0030,
 305	[TRSCER]	= 0x0038,
 306	[RMFCR]		= 0x0040,
 307	[TFTR]		= 0x0048,
 308	[FDR]		= 0x0050,
 309	[RMCR]		= 0x0058,
 310	[TFUCR]		= 0x0064,
 311	[RFOCR]		= 0x0068,
 312	[FCFTR]		= 0x0070,
 313	[RPADIR]	= 0x0078,
 314	[TRIMD]		= 0x007c,
 315	[RBWAR]		= 0x00c8,
 316	[RDFAR]		= 0x00cc,
 317	[TBRAR]		= 0x00d4,
 318	[TDFAR]		= 0x00d8,
 319};
 320
 321static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
 322	SH_ETH_OFFSET_DEFAULTS,
 323
 324	[EDMR]		= 0x0000,
 325	[EDTRR]		= 0x0004,
 326	[EDRRR]		= 0x0008,
 327	[TDLAR]		= 0x000c,
 328	[RDLAR]		= 0x0010,
 329	[EESR]		= 0x0014,
 330	[EESIPR]	= 0x0018,
 331	[TRSCER]	= 0x001c,
 332	[RMFCR]		= 0x0020,
 333	[TFTR]		= 0x0024,
 334	[FDR]		= 0x0028,
 335	[RMCR]		= 0x002c,
 336	[EDOCR]		= 0x0030,
 337	[FCFTR]		= 0x0034,
 338	[RPADIR]	= 0x0038,
 339	[TRIMD]		= 0x003c,
 340	[RBWAR]		= 0x0040,
 341	[RDFAR]		= 0x0044,
 342	[TBRAR]		= 0x004c,
 343	[TDFAR]		= 0x0050,
 344
 345	[ECMR]		= 0x0160,
 346	[ECSR]		= 0x0164,
 347	[ECSIPR]	= 0x0168,
 348	[PIR]		= 0x016c,
 349	[MAHR]		= 0x0170,
 350	[MALR]		= 0x0174,
 351	[RFLR]		= 0x0178,
 352	[PSR]		= 0x017c,
 353	[TROCR]		= 0x0180,
 354	[CDCR]		= 0x0184,
 355	[LCCR]		= 0x0188,
 356	[CNDCR]		= 0x018c,
 357	[CEFCR]		= 0x0194,
 358	[FRECR]		= 0x0198,
 359	[TSFRCR]	= 0x019c,
 360	[TLFRCR]	= 0x01a0,
 361	[RFCR]		= 0x01a4,
 362	[MAFCR]		= 0x01a8,
 363	[IPGR]		= 0x01b4,
 364	[APR]		= 0x01b8,
 365	[MPR]		= 0x01bc,
 366	[TPAUSER]	= 0x01c4,
 367	[BCFR]		= 0x01cc,
 368
 369	[ARSTR]		= 0x0000,
 370	[TSU_CTRST]	= 0x0004,
 371	[TSU_FWEN0]	= 0x0010,
 372	[TSU_FWEN1]	= 0x0014,
 373	[TSU_FCM]	= 0x0018,
 374	[TSU_BSYSL0]	= 0x0020,
 375	[TSU_BSYSL1]	= 0x0024,
 376	[TSU_PRISL0]	= 0x0028,
 377	[TSU_PRISL1]	= 0x002c,
 378	[TSU_FWSL0]	= 0x0030,
 379	[TSU_FWSL1]	= 0x0034,
 380	[TSU_FWSLC]	= 0x0038,
 381	[TSU_QTAGM0]	= 0x0040,
 382	[TSU_QTAGM1]	= 0x0044,
 383	[TSU_ADQT0]	= 0x0048,
 384	[TSU_ADQT1]	= 0x004c,
 385	[TSU_FWSR]	= 0x0050,
 386	[TSU_FWINMK]	= 0x0054,
 387	[TSU_ADSBSY]	= 0x0060,
 388	[TSU_TEN]	= 0x0064,
 389	[TSU_POST1]	= 0x0070,
 390	[TSU_POST2]	= 0x0074,
 391	[TSU_POST3]	= 0x0078,
 392	[TSU_POST4]	= 0x007c,
 393
 394	[TXNLCR0]	= 0x0080,
 395	[TXALCR0]	= 0x0084,
 396	[RXNLCR0]	= 0x0088,
 397	[RXALCR0]	= 0x008c,
 398	[FWNLCR0]	= 0x0090,
 399	[FWALCR0]	= 0x0094,
 400	[TXNLCR1]	= 0x00a0,
 401	[TXALCR1]	= 0x00a4,
 402	[RXNLCR1]	= 0x00a8,
 403	[RXALCR1]	= 0x00ac,
 404	[FWNLCR1]	= 0x00b0,
 405	[FWALCR1]	= 0x00b4,
 406
 407	[TSU_ADRH0]	= 0x0100,
 408};
 
 409
 410static void sh_eth_rcv_snd_disable(struct net_device *ndev);
 411static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
 412
 413static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
 414{
 415	struct sh_eth_private *mdp = netdev_priv(ndev);
 416	u16 offset = mdp->reg_offset[enum_index];
 417
 418	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 419		return;
 420
 421	iowrite32(data, mdp->addr + offset);
 422}
 423
 424static u32 sh_eth_read(struct net_device *ndev, int enum_index)
 425{
 426	struct sh_eth_private *mdp = netdev_priv(ndev);
 427	u16 offset = mdp->reg_offset[enum_index];
 428
 429	if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
 430		return ~0U;
 431
 432	return ioread32(mdp->addr + offset);
 433}
 434
 435static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
 436			  u32 set)
 437{
 438	sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
 439		     enum_index);
 440}
 441
 
 
 
 
 
 442static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
 443			     int enum_index)
 444{
 445	iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
 
 
 
 
 
 446}
 447
 448static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
 449{
 450	return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451}
 452
 453static void sh_eth_select_mii(struct net_device *ndev)
 454{
 455	struct sh_eth_private *mdp = netdev_priv(ndev);
 456	u32 value;
 457
 458	switch (mdp->phy_interface) {
 
 
 
 459	case PHY_INTERFACE_MODE_GMII:
 460		value = 0x2;
 461		break;
 462	case PHY_INTERFACE_MODE_MII:
 463		value = 0x1;
 464		break;
 465	case PHY_INTERFACE_MODE_RMII:
 466		value = 0x0;
 467		break;
 468	default:
 469		netdev_warn(ndev,
 470			    "PHY interface mode was not setup. Set to MII.\n");
 471		value = 0x1;
 472		break;
 473	}
 474
 475	sh_eth_write(ndev, value, RMII_MII);
 476}
 477
 478static void sh_eth_set_duplex(struct net_device *ndev)
 479{
 480	struct sh_eth_private *mdp = netdev_priv(ndev);
 481
 482	sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
 483}
 484
 485static void sh_eth_chip_reset(struct net_device *ndev)
 486{
 487	struct sh_eth_private *mdp = netdev_priv(ndev);
 488
 489	/* reset device */
 490	sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
 491	mdelay(1);
 492}
 493
 494static int sh_eth_soft_reset(struct net_device *ndev)
 495{
 496	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
 497	mdelay(3);
 498	sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
 499
 500	return 0;
 501}
 502
 503static int sh_eth_check_soft_reset(struct net_device *ndev)
 504{
 505	int cnt;
 506
 507	for (cnt = 100; cnt > 0; cnt--) {
 508		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
 509			return 0;
 510		mdelay(1);
 511	}
 512
 513	netdev_err(ndev, "Device reset failed\n");
 514	return -ETIMEDOUT;
 515}
 516
 517static int sh_eth_soft_reset_gether(struct net_device *ndev)
 518{
 519	struct sh_eth_private *mdp = netdev_priv(ndev);
 520	int ret;
 521
 522	sh_eth_write(ndev, EDSR_ENALL, EDSR);
 523	sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
 524
 525	ret = sh_eth_check_soft_reset(ndev);
 526	if (ret)
 527		return ret;
 528
 529	/* Table Init */
 530	sh_eth_write(ndev, 0, TDLAR);
 531	sh_eth_write(ndev, 0, TDFAR);
 532	sh_eth_write(ndev, 0, TDFXR);
 533	sh_eth_write(ndev, 0, TDFFR);
 534	sh_eth_write(ndev, 0, RDLAR);
 535	sh_eth_write(ndev, 0, RDFAR);
 536	sh_eth_write(ndev, 0, RDFXR);
 537	sh_eth_write(ndev, 0, RDFFR);
 538
 539	/* Reset HW CRC register */
 540	if (mdp->cd->hw_checksum)
 541		sh_eth_write(ndev, 0, CSMR);
 542
 543	/* Select MII mode */
 544	if (mdp->cd->select_mii)
 545		sh_eth_select_mii(ndev);
 546
 547	return ret;
 548}
 549
 550static void sh_eth_set_rate_gether(struct net_device *ndev)
 551{
 552	struct sh_eth_private *mdp = netdev_priv(ndev);
 553
 
 
 
 554	switch (mdp->speed) {
 555	case 10: /* 10BASE */
 556		sh_eth_write(ndev, GECMR_10, GECMR);
 557		break;
 558	case 100:/* 100BASE */
 559		sh_eth_write(ndev, GECMR_100, GECMR);
 560		break;
 561	case 1000: /* 1000BASE */
 562		sh_eth_write(ndev, GECMR_1000, GECMR);
 563		break;
 564	}
 565}
 566
 567#ifdef CONFIG_OF
 568/* R7S72100 */
 569static struct sh_eth_cpu_data r7s72100_data = {
 570	.soft_reset	= sh_eth_soft_reset_gether,
 571
 572	.chip_reset	= sh_eth_chip_reset,
 573	.set_duplex	= sh_eth_set_duplex,
 574
 575	.register_type	= SH_ETH_REG_FAST_RZ,
 576
 577	.edtrr_trns	= EDTRR_TRNS_GETHER,
 578	.ecsr_value	= ECSR_ICD,
 579	.ecsipr_value	= ECSIPR_ICDIP,
 580	.eesipr_value	= EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
 581			  EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
 582			  EESIPR_ECIIP |
 583			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 584			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 585			  EESIPR_RMAFIP | EESIPR_RRFIP |
 586			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 587			  EESIPR_PREIP | EESIPR_CERFIP,
 588
 589	.tx_check	= EESR_TC1 | EESR_FTC,
 590	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 591			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 592			  EESR_TDE,
 593	.fdr_value	= 0x0000070f,
 594
 
 
 595	.no_psr		= 1,
 596	.apr		= 1,
 597	.mpr		= 1,
 598	.tpauser	= 1,
 599	.hw_swap	= 1,
 600	.rpadir		= 1,
 601	.rpadir_value   = 2 << 16,
 602	.no_trimd	= 1,
 603	.no_ade		= 1,
 604	.xdfar_rw	= 1,
 605	.hw_checksum	= 1,
 
 606	.tsu		= 1,
 607	.no_tx_cntrs	= 1,
 608};
 609
 610static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
 611{
 612	sh_eth_chip_reset(ndev);
 613
 614	sh_eth_select_mii(ndev);
 615}
 616
 617/* R8A7740 */
 618static struct sh_eth_cpu_data r8a7740_data = {
 619	.soft_reset	= sh_eth_soft_reset_gether,
 620
 621	.chip_reset	= sh_eth_chip_reset_r8a7740,
 622	.set_duplex	= sh_eth_set_duplex,
 623	.set_rate	= sh_eth_set_rate_gether,
 624
 625	.register_type	= SH_ETH_REG_GIGABIT,
 626
 627	.edtrr_trns	= EDTRR_TRNS_GETHER,
 628	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 629	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 630	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 631			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 632			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 633			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 634			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 635			  EESIPR_CEEFIP | EESIPR_CELFIP |
 636			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 637			  EESIPR_PREIP | EESIPR_CERFIP,
 638
 639	.tx_check	= EESR_TC1 | EESR_FTC,
 640	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 641			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 642			  EESR_TDE,
 643	.fdr_value	= 0x0000070f,
 644
 645	.apr		= 1,
 646	.mpr		= 1,
 647	.tpauser	= 1,
 
 648	.bculr		= 1,
 649	.hw_swap	= 1,
 650	.rpadir		= 1,
 651	.rpadir_value   = 2 << 16,
 652	.no_trimd	= 1,
 653	.no_ade		= 1,
 654	.xdfar_rw	= 1,
 655	.hw_checksum	= 1,
 
 656	.tsu		= 1,
 657	.select_mii	= 1,
 658	.magic		= 1,
 659	.cexcr		= 1,
 660};
 661
 662/* There is CPU dependent code */
 663static void sh_eth_set_rate_rcar(struct net_device *ndev)
 664{
 665	struct sh_eth_private *mdp = netdev_priv(ndev);
 666
 667	switch (mdp->speed) {
 668	case 10: /* 10BASE */
 669		sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
 670		break;
 671	case 100:/* 100BASE */
 672		sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
 673		break;
 674	}
 675}
 676
 677/* R-Car Gen1 */
 678static struct sh_eth_cpu_data rcar_gen1_data = {
 679	.soft_reset	= sh_eth_soft_reset,
 680
 681	.set_duplex	= sh_eth_set_duplex,
 682	.set_rate	= sh_eth_set_rate_rcar,
 683
 684	.register_type	= SH_ETH_REG_FAST_RCAR,
 685
 686	.edtrr_trns	= EDTRR_TRNS_ETHER,
 687	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 688	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 689	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 690			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 691			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 692			  EESIPR_RMAFIP | EESIPR_RRFIP |
 693			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 694			  EESIPR_PREIP | EESIPR_CERFIP,
 695
 696	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 697	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 698			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 699	.fdr_value	= 0x00000f0f,
 700
 701	.apr		= 1,
 702	.mpr		= 1,
 703	.tpauser	= 1,
 704	.hw_swap	= 1,
 705	.no_xdfar	= 1,
 706};
 707
 708/* R-Car Gen2 and RZ/G1 */
 709static struct sh_eth_cpu_data rcar_gen2_data = {
 710	.soft_reset	= sh_eth_soft_reset,
 711
 712	.set_duplex	= sh_eth_set_duplex,
 713	.set_rate	= sh_eth_set_rate_rcar,
 714
 715	.register_type	= SH_ETH_REG_FAST_RCAR,
 716
 717	.edtrr_trns	= EDTRR_TRNS_ETHER,
 718	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
 719	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
 720			  ECSIPR_MPDIP,
 721	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 722			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 723			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 724			  EESIPR_RMAFIP | EESIPR_RRFIP |
 725			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 726			  EESIPR_PREIP | EESIPR_CERFIP,
 727
 728	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 729	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 730			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 731	.fdr_value	= 0x00000f0f,
 732
 733	.trscer_err_mask = DESC_I_RINT8,
 734
 735	.apr		= 1,
 736	.mpr		= 1,
 737	.tpauser	= 1,
 738	.hw_swap	= 1,
 739	.no_xdfar	= 1,
 740	.rmiimode	= 1,
 741	.magic		= 1,
 742};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743#endif /* CONFIG_OF */
 744
 745static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 746{
 747	struct sh_eth_private *mdp = netdev_priv(ndev);
 748
 749	switch (mdp->speed) {
 750	case 10: /* 10BASE */
 751		sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
 752		break;
 753	case 100:/* 100BASE */
 754		sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
 755		break;
 756	}
 757}
 758
 759/* SH7724 */
 760static struct sh_eth_cpu_data sh7724_data = {
 761	.soft_reset	= sh_eth_soft_reset,
 762
 763	.set_duplex	= sh_eth_set_duplex,
 764	.set_rate	= sh_eth_set_rate_sh7724,
 765
 766	.register_type	= SH_ETH_REG_FAST_SH4,
 767
 768	.edtrr_trns	= EDTRR_TRNS_ETHER,
 769	.ecsr_value	= ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
 770	.ecsipr_value	= ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
 771	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
 772			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 773			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 774			  EESIPR_RMAFIP | EESIPR_RRFIP |
 775			  EESIPR_RTLFIP | EESIPR_RTSFIP |
 776			  EESIPR_PREIP | EESIPR_CERFIP,
 777
 778	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 779	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 780			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 781
 782	.apr		= 1,
 783	.mpr		= 1,
 784	.tpauser	= 1,
 785	.hw_swap	= 1,
 786	.rpadir		= 1,
 787	.rpadir_value	= 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 788};
 789
 790static void sh_eth_set_rate_sh7757(struct net_device *ndev)
 791{
 792	struct sh_eth_private *mdp = netdev_priv(ndev);
 793
 794	switch (mdp->speed) {
 795	case 10: /* 10BASE */
 796		sh_eth_write(ndev, 0, RTRATE);
 797		break;
 798	case 100:/* 100BASE */
 799		sh_eth_write(ndev, 1, RTRATE);
 800		break;
 801	}
 802}
 803
 804/* SH7757 */
 805static struct sh_eth_cpu_data sh7757_data = {
 806	.soft_reset	= sh_eth_soft_reset,
 807
 808	.set_duplex	= sh_eth_set_duplex,
 809	.set_rate	= sh_eth_set_rate_sh7757,
 810
 811	.register_type	= SH_ETH_REG_FAST_SH4,
 812
 813	.edtrr_trns	= EDTRR_TRNS_ETHER,
 814	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 815			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 816			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 817			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 818			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 819			  EESIPR_CEEFIP | EESIPR_CELFIP |
 820			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 821			  EESIPR_PREIP | EESIPR_CERFIP,
 822
 823	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 824	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
 825			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 826
 827	.irq_flags	= IRQF_SHARED,
 828	.apr		= 1,
 829	.mpr		= 1,
 830	.tpauser	= 1,
 831	.hw_swap	= 1,
 832	.no_ade		= 1,
 833	.rpadir		= 1,
 834	.rpadir_value   = 2 << 16,
 835	.rtrate		= 1,
 836	.dual_port	= 1,
 837};
 838
 839#define SH_GIGA_ETH_BASE	0xfee00000UL
 840#define GIGA_MALR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 841#define GIGA_MAHR(port)		(SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 842static void sh_eth_chip_reset_giga(struct net_device *ndev)
 843{
 844	u32 mahr[2], malr[2];
 845	int i;
 846
 847	/* save MAHR and MALR */
 848	for (i = 0; i < 2; i++) {
 849		malr[i] = ioread32((void *)GIGA_MALR(i));
 850		mahr[i] = ioread32((void *)GIGA_MAHR(i));
 851	}
 852
 853	sh_eth_chip_reset(ndev);
 854
 855	/* restore MAHR and MALR */
 856	for (i = 0; i < 2; i++) {
 857		iowrite32(malr[i], (void *)GIGA_MALR(i));
 858		iowrite32(mahr[i], (void *)GIGA_MAHR(i));
 859	}
 860}
 861
 862static void sh_eth_set_rate_giga(struct net_device *ndev)
 863{
 864	struct sh_eth_private *mdp = netdev_priv(ndev);
 865
 
 
 
 866	switch (mdp->speed) {
 867	case 10: /* 10BASE */
 868		sh_eth_write(ndev, 0x00000000, GECMR);
 869		break;
 870	case 100:/* 100BASE */
 871		sh_eth_write(ndev, 0x00000010, GECMR);
 872		break;
 873	case 1000: /* 1000BASE */
 874		sh_eth_write(ndev, 0x00000020, GECMR);
 875		break;
 876	}
 877}
 878
 879/* SH7757(GETHERC) */
 880static struct sh_eth_cpu_data sh7757_data_giga = {
 881	.soft_reset	= sh_eth_soft_reset_gether,
 882
 883	.chip_reset	= sh_eth_chip_reset_giga,
 884	.set_duplex	= sh_eth_set_duplex,
 885	.set_rate	= sh_eth_set_rate_giga,
 886
 887	.register_type	= SH_ETH_REG_GIGABIT,
 888
 889	.edtrr_trns	= EDTRR_TRNS_GETHER,
 890	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 891	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 892	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 893			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 894			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 895			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
 896			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
 897			  EESIPR_CEEFIP | EESIPR_CELFIP |
 898			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 899			  EESIPR_PREIP | EESIPR_CERFIP,
 900
 901	.tx_check	= EESR_TC1 | EESR_FTC,
 902	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 903			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 904			  EESR_TDE,
 905	.fdr_value	= 0x0000072f,
 906
 907	.irq_flags	= IRQF_SHARED,
 908	.apr		= 1,
 909	.mpr		= 1,
 910	.tpauser	= 1,
 
 911	.bculr		= 1,
 912	.hw_swap	= 1,
 913	.rpadir		= 1,
 914	.rpadir_value   = 2 << 16,
 915	.no_trimd	= 1,
 916	.no_ade		= 1,
 917	.xdfar_rw	= 1,
 918	.tsu		= 1,
 919	.cexcr		= 1,
 920	.dual_port	= 1,
 921};
 922
 923/* SH7734 */
 924static struct sh_eth_cpu_data sh7734_data = {
 925	.soft_reset	= sh_eth_soft_reset_gether,
 926
 927	.chip_reset	= sh_eth_chip_reset,
 928	.set_duplex	= sh_eth_set_duplex,
 929	.set_rate	= sh_eth_set_rate_gether,
 930
 931	.register_type	= SH_ETH_REG_GIGABIT,
 932
 933	.edtrr_trns	= EDTRR_TRNS_GETHER,
 934	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 935	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 936	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 937			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 938			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 939			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 940			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
 941			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 942			  EESIPR_PREIP | EESIPR_CERFIP,
 943
 944	.tx_check	= EESR_TC1 | EESR_FTC,
 945	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 946			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
 947			  EESR_TDE,
 948
 949	.apr		= 1,
 950	.mpr		= 1,
 951	.tpauser	= 1,
 
 952	.bculr		= 1,
 953	.hw_swap	= 1,
 954	.no_trimd	= 1,
 955	.no_ade		= 1,
 956	.xdfar_rw	= 1,
 957	.tsu		= 1,
 958	.hw_checksum	= 1,
 
 959	.select_mii	= 1,
 960	.magic		= 1,
 961	.cexcr		= 1,
 962};
 963
 964/* SH7763 */
 965static struct sh_eth_cpu_data sh7763_data = {
 966	.soft_reset	= sh_eth_soft_reset_gether,
 967
 968	.chip_reset	= sh_eth_chip_reset,
 969	.set_duplex	= sh_eth_set_duplex,
 970	.set_rate	= sh_eth_set_rate_gether,
 971
 972	.register_type	= SH_ETH_REG_GIGABIT,
 973
 974	.edtrr_trns	= EDTRR_TRNS_GETHER,
 975	.ecsr_value	= ECSR_ICD | ECSR_MPD,
 976	.ecsipr_value	= ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 977	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
 978			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
 979			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
 980			  EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
 981			  EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
 982			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
 983			  EESIPR_PREIP | EESIPR_CERFIP,
 984
 985	.tx_check	= EESR_TC1 | EESR_FTC,
 986	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
 987			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 988
 989	.apr		= 1,
 990	.mpr		= 1,
 991	.tpauser	= 1,
 
 992	.bculr		= 1,
 993	.hw_swap	= 1,
 994	.no_trimd	= 1,
 995	.no_ade		= 1,
 996	.xdfar_rw	= 1,
 997	.tsu		= 1,
 998	.irq_flags	= IRQF_SHARED,
 999	.magic		= 1,
1000	.cexcr		= 1,
 
1001	.dual_port	= 1,
1002};
1003
1004static struct sh_eth_cpu_data sh7619_data = {
1005	.soft_reset	= sh_eth_soft_reset,
1006
1007	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1008
1009	.edtrr_trns	= EDTRR_TRNS_ETHER,
1010	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1011			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1012			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1013			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1014			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1015			  EESIPR_CEEFIP | EESIPR_CELFIP |
1016			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1017			  EESIPR_PREIP | EESIPR_CERFIP,
1018
1019	.apr		= 1,
1020	.mpr		= 1,
1021	.tpauser	= 1,
1022	.hw_swap	= 1,
1023};
1024
1025static struct sh_eth_cpu_data sh771x_data = {
1026	.soft_reset	= sh_eth_soft_reset,
1027
1028	.register_type	= SH_ETH_REG_FAST_SH3_SH2,
1029
1030	.edtrr_trns	= EDTRR_TRNS_ETHER,
1031	.eesipr_value	= EESIPR_RFCOFIP | EESIPR_ECIIP |
1032			  EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
1033			  EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
1034			  0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
1035			  EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
1036			  EESIPR_CEEFIP | EESIPR_CELFIP |
1037			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
1038			  EESIPR_PREIP | EESIPR_CERFIP,
 
 
 
1039	.tsu		= 1,
1040	.dual_port	= 1,
1041};
1042
1043static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
1044{
1045	if (!cd->ecsr_value)
1046		cd->ecsr_value = DEFAULT_ECSR_INIT;
1047
1048	if (!cd->ecsipr_value)
1049		cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
1050
1051	if (!cd->fcftr_value)
1052		cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
1053				  DEFAULT_FIFO_F_D_RFD;
1054
1055	if (!cd->fdr_value)
1056		cd->fdr_value = DEFAULT_FDR_INIT;
1057
1058	if (!cd->tx_check)
1059		cd->tx_check = DEFAULT_TX_CHECK;
1060
1061	if (!cd->eesr_err_check)
1062		cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
1063
1064	if (!cd->trscer_err_mask)
1065		cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
1066}
1067
1068static void sh_eth_set_receive_align(struct sk_buff *skb)
1069{
1070	uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
1071
1072	if (reserve)
1073		skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
1074}
1075
1076/* Program the hardware MAC address from dev->dev_addr. */
1077static void update_mac_address(struct net_device *ndev)
1078{
1079	sh_eth_write(ndev,
1080		     (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
1081		     (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
1082	sh_eth_write(ndev,
1083		     (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
1084}
1085
1086/* Get MAC address from SuperH MAC address register
1087 *
1088 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
1089 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
1090 * When you want use this device, you must set MAC address in bootloader.
1091 *
1092 */
1093static void read_mac_address(struct net_device *ndev, unsigned char *mac)
1094{
1095	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
1096		memcpy(ndev->dev_addr, mac, ETH_ALEN);
1097	} else {
1098		u32 mahr = sh_eth_read(ndev, MAHR);
1099		u32 malr = sh_eth_read(ndev, MALR);
 
1100
1101		ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
1102		ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
1103		ndev->dev_addr[2] = (mahr >>  8) & 0xFF;
1104		ndev->dev_addr[3] = (mahr >>  0) & 0xFF;
1105		ndev->dev_addr[4] = (malr >>  8) & 0xFF;
1106		ndev->dev_addr[5] = (malr >>  0) & 0xFF;
 
1107	}
1108}
1109
1110struct bb_info {
1111	void (*set_gate)(void *addr);
1112	struct mdiobb_ctrl ctrl;
1113	void *addr;
1114};
1115
1116static void sh_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
1117{
1118	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1119	u32 pir;
1120
1121	if (bitbang->set_gate)
1122		bitbang->set_gate(bitbang->addr);
1123
1124	pir = ioread32(bitbang->addr);
1125	if (set)
1126		pir |=  mask;
1127	else
1128		pir &= ~mask;
1129	iowrite32(pir, bitbang->addr);
1130}
1131
1132/* Data I/O pin control */
1133static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1134{
1135	sh_mdio_ctrl(ctrl, PIR_MMD, bit);
1136}
1137
1138/* Set bit data*/
1139static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1140{
1141	sh_mdio_ctrl(ctrl, PIR_MDO, bit);
1142}
1143
1144/* Get bit data*/
1145static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1146{
1147	struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1148
1149	if (bitbang->set_gate)
1150		bitbang->set_gate(bitbang->addr);
1151
1152	return (ioread32(bitbang->addr) & PIR_MDI) != 0;
1153}
1154
1155/* MDC pin control */
1156static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1157{
1158	sh_mdio_ctrl(ctrl, PIR_MDC, bit);
1159}
1160
1161/* mdio bus control struct */
1162static struct mdiobb_ops bb_ops = {
1163	.owner = THIS_MODULE,
1164	.set_mdc = sh_mdc_ctrl,
1165	.set_mdio_dir = sh_mmd_ctrl,
1166	.set_mdio_data = sh_set_mdio,
1167	.get_mdio_data = sh_get_mdio,
1168};
1169
1170/* free Tx skb function */
1171static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
1172{
1173	struct sh_eth_private *mdp = netdev_priv(ndev);
1174	struct sh_eth_txdesc *txdesc;
1175	int free_num = 0;
1176	int entry;
1177	bool sent;
1178
1179	for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1180		entry = mdp->dirty_tx % mdp->num_tx_ring;
1181		txdesc = &mdp->tx_ring[entry];
1182		sent = !(txdesc->status & cpu_to_le32(TD_TACT));
1183		if (sent_only && !sent)
1184			break;
1185		/* TACT bit must be checked before all the following reads */
1186		dma_rmb();
1187		netif_info(mdp, tx_done, ndev,
1188			   "tx entry %d status 0x%08x\n",
1189			   entry, le32_to_cpu(txdesc->status));
1190		/* Free the original skb. */
1191		if (mdp->tx_skbuff[entry]) {
1192			dma_unmap_single(&mdp->pdev->dev,
1193					 le32_to_cpu(txdesc->addr),
1194					 le32_to_cpu(txdesc->len) >> 16,
1195					 DMA_TO_DEVICE);
1196			dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1197			mdp->tx_skbuff[entry] = NULL;
1198			free_num++;
1199		}
1200		txdesc->status = cpu_to_le32(TD_TFP);
1201		if (entry >= mdp->num_tx_ring - 1)
1202			txdesc->status |= cpu_to_le32(TD_TDLE);
1203
1204		if (sent) {
1205			ndev->stats.tx_packets++;
1206			ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
1207		}
1208	}
1209	return free_num;
1210}
1211
1212/* free skb and descriptor buffer */
1213static void sh_eth_ring_free(struct net_device *ndev)
1214{
1215	struct sh_eth_private *mdp = netdev_priv(ndev);
1216	int ringsize, i;
1217
1218	if (mdp->rx_ring) {
1219		for (i = 0; i < mdp->num_rx_ring; i++) {
1220			if (mdp->rx_skbuff[i]) {
1221				struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1222
1223				dma_unmap_single(&mdp->pdev->dev,
1224						 le32_to_cpu(rxdesc->addr),
1225						 ALIGN(mdp->rx_buf_sz, 32),
1226						 DMA_FROM_DEVICE);
1227			}
1228		}
1229		ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1230		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1231				  mdp->rx_desc_dma);
1232		mdp->rx_ring = NULL;
1233	}
1234
1235	/* Free Rx skb ringbuffer */
1236	if (mdp->rx_skbuff) {
1237		for (i = 0; i < mdp->num_rx_ring; i++)
1238			dev_kfree_skb(mdp->rx_skbuff[i]);
1239	}
1240	kfree(mdp->rx_skbuff);
1241	mdp->rx_skbuff = NULL;
1242
1243	if (mdp->tx_ring) {
1244		sh_eth_tx_free(ndev, false);
1245
1246		ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1247		dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1248				  mdp->tx_desc_dma);
1249		mdp->tx_ring = NULL;
1250	}
1251
1252	/* Free Tx skb ringbuffer */
1253	kfree(mdp->tx_skbuff);
1254	mdp->tx_skbuff = NULL;
1255}
1256
1257/* format skb and descriptor buffer */
1258static void sh_eth_ring_format(struct net_device *ndev)
1259{
1260	struct sh_eth_private *mdp = netdev_priv(ndev);
1261	int i;
1262	struct sk_buff *skb;
1263	struct sh_eth_rxdesc *rxdesc = NULL;
1264	struct sh_eth_txdesc *txdesc = NULL;
1265	int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1266	int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1267	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1268	dma_addr_t dma_addr;
1269	u32 buf_len;
1270
1271	mdp->cur_rx = 0;
1272	mdp->cur_tx = 0;
1273	mdp->dirty_rx = 0;
1274	mdp->dirty_tx = 0;
1275
1276	memset(mdp->rx_ring, 0, rx_ringsize);
1277
1278	/* build Rx ring buffer */
1279	for (i = 0; i < mdp->num_rx_ring; i++) {
1280		/* skb */
1281		mdp->rx_skbuff[i] = NULL;
1282		skb = netdev_alloc_skb(ndev, skbuff_size);
1283		if (skb == NULL)
1284			break;
1285		sh_eth_set_receive_align(skb);
1286
1287		/* The size of the buffer is a multiple of 32 bytes. */
1288		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1289		dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1290					  DMA_FROM_DEVICE);
1291		if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1292			kfree_skb(skb);
1293			break;
1294		}
1295		mdp->rx_skbuff[i] = skb;
1296
1297		/* RX descriptor */
1298		rxdesc = &mdp->rx_ring[i];
1299		rxdesc->len = cpu_to_le32(buf_len << 16);
1300		rxdesc->addr = cpu_to_le32(dma_addr);
1301		rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP);
1302
1303		/* Rx descriptor address set */
1304		if (i == 0) {
1305			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1306			if (mdp->cd->xdfar_rw)
1307				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1308		}
1309	}
1310
1311	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1312
1313	/* Mark the last entry as wrapping the ring. */
1314	if (rxdesc)
1315		rxdesc->status |= cpu_to_le32(RD_RDLE);
1316
1317	memset(mdp->tx_ring, 0, tx_ringsize);
1318
1319	/* build Tx ring buffer */
1320	for (i = 0; i < mdp->num_tx_ring; i++) {
1321		mdp->tx_skbuff[i] = NULL;
1322		txdesc = &mdp->tx_ring[i];
1323		txdesc->status = cpu_to_le32(TD_TFP);
1324		txdesc->len = cpu_to_le32(0);
1325		if (i == 0) {
1326			/* Tx descriptor address set */
1327			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1328			if (mdp->cd->xdfar_rw)
1329				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1330		}
1331	}
1332
1333	txdesc->status |= cpu_to_le32(TD_TDLE);
1334}
1335
1336/* Get skb and descriptor buffer */
1337static int sh_eth_ring_init(struct net_device *ndev)
1338{
1339	struct sh_eth_private *mdp = netdev_priv(ndev);
1340	int rx_ringsize, tx_ringsize;
1341
1342	/* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
1343	 * card needs room to do 8 byte alignment, +2 so we can reserve
1344	 * the first 2 bytes, and +16 gets room for the status word from the
1345	 * card.
1346	 */
1347	mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1348			  (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
1349	if (mdp->cd->rpadir)
1350		mdp->rx_buf_sz += NET_IP_ALIGN;
1351
1352	/* Allocate RX and TX skb rings */
1353	mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1354				 GFP_KERNEL);
1355	if (!mdp->rx_skbuff)
1356		return -ENOMEM;
1357
1358	mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1359				 GFP_KERNEL);
1360	if (!mdp->tx_skbuff)
1361		goto ring_free;
1362
1363	/* Allocate all Rx descriptors. */
1364	rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1365	mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1366					  &mdp->rx_desc_dma, GFP_KERNEL);
1367	if (!mdp->rx_ring)
1368		goto ring_free;
1369
1370	mdp->dirty_rx = 0;
1371
1372	/* Allocate all Tx descriptors. */
1373	tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1374	mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1375					  &mdp->tx_desc_dma, GFP_KERNEL);
1376	if (!mdp->tx_ring)
1377		goto ring_free;
1378	return 0;
1379
1380ring_free:
1381	/* Free Rx and Tx skb ring buffer and DMA buffer */
1382	sh_eth_ring_free(ndev);
1383
1384	return -ENOMEM;
1385}
1386
1387static int sh_eth_dev_init(struct net_device *ndev)
1388{
1389	struct sh_eth_private *mdp = netdev_priv(ndev);
1390	int ret;
1391
1392	/* Soft Reset */
1393	ret = mdp->cd->soft_reset(ndev);
1394	if (ret)
1395		return ret;
1396
1397	if (mdp->cd->rmiimode)
1398		sh_eth_write(ndev, 0x1, RMIIMODE);
1399
1400	/* Descriptor format */
1401	sh_eth_ring_format(ndev);
1402	if (mdp->cd->rpadir)
1403		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
1404
1405	/* all sh_eth int mask */
1406	sh_eth_write(ndev, 0, EESIPR);
1407
1408#if defined(__LITTLE_ENDIAN)
1409	if (mdp->cd->hw_swap)
1410		sh_eth_write(ndev, EDMR_EL, EDMR);
1411	else
1412#endif
1413		sh_eth_write(ndev, 0, EDMR);
1414
1415	/* FIFO size set */
1416	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1417	sh_eth_write(ndev, 0, TFTR);
1418
1419	/* Frame recv control (enable multiple-packets per rx irq) */
1420	sh_eth_write(ndev, RMCR_RNC, RMCR);
1421
1422	sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1423
 
 
 
 
 
1424	if (mdp->cd->bculr)
1425		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */
1426
1427	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1428
1429	if (!mdp->cd->no_trimd)
1430		sh_eth_write(ndev, 0, TRIMD);
1431
1432	/* Recv frame limit set register */
1433	sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1434		     RFLR);
1435
1436	sh_eth_modify(ndev, EESR, 0, 0);
1437	mdp->irq_enabled = true;
1438	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1439
1440	/* PAUSE Prohibition */
1441	sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
 
1442		     ECMR_TE | ECMR_RE, ECMR);
1443
1444	if (mdp->cd->set_rate)
1445		mdp->cd->set_rate(ndev);
1446
1447	/* E-MAC Status Register clear */
1448	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1449
1450	/* E-MAC Interrupt Enable register */
1451	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1452
1453	/* Set MAC address */
1454	update_mac_address(ndev);
1455
1456	/* mask reset */
1457	if (mdp->cd->apr)
1458		sh_eth_write(ndev, APR_AP, APR);
1459	if (mdp->cd->mpr)
1460		sh_eth_write(ndev, MPR_MP, MPR);
1461	if (mdp->cd->tpauser)
1462		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
1463
1464	/* Setting the Rx mode will start the Rx process. */
1465	sh_eth_write(ndev, EDRRR_R, EDRRR);
1466
1467	return ret;
1468}
1469
1470static void sh_eth_dev_exit(struct net_device *ndev)
1471{
1472	struct sh_eth_private *mdp = netdev_priv(ndev);
1473	int i;
1474
1475	/* Deactivate all TX descriptors, so DMA should stop at next
1476	 * packet boundary if it's currently running
1477	 */
1478	for (i = 0; i < mdp->num_tx_ring; i++)
1479		mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1480
1481	/* Disable TX FIFO egress to MAC */
1482	sh_eth_rcv_snd_disable(ndev);
1483
1484	/* Stop RX DMA at next packet boundary */
1485	sh_eth_write(ndev, 0, EDRRR);
1486
1487	/* Aside from TX DMA, we can't tell when the hardware is
1488	 * really stopped, so we need to reset to make sure.
1489	 * Before doing that, wait for long enough to *probably*
1490	 * finish transmitting the last packet and poll stats.
1491	 */
1492	msleep(2); /* max frame time at 10 Mbps < 1250 us */
1493	sh_eth_get_stats(ndev);
1494	mdp->cd->soft_reset(ndev);
1495
 
 
 
 
1496	/* Set MAC address again */
1497	update_mac_address(ndev);
1498}
1499
 
 
 
 
 
 
 
 
 
 
 
 
 
1500/* Packet receive function */
1501static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1502{
1503	struct sh_eth_private *mdp = netdev_priv(ndev);
1504	struct sh_eth_rxdesc *rxdesc;
1505
1506	int entry = mdp->cur_rx % mdp->num_rx_ring;
1507	int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1508	int limit;
1509	struct sk_buff *skb;
1510	u32 desc_status;
1511	int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1512	dma_addr_t dma_addr;
1513	u16 pkt_len;
1514	u32 buf_len;
1515
1516	boguscnt = min(boguscnt, *quota);
1517	limit = boguscnt;
1518	rxdesc = &mdp->rx_ring[entry];
1519	while (!(rxdesc->status & cpu_to_le32(RD_RACT))) {
1520		/* RACT bit must be checked before all the following reads */
1521		dma_rmb();
1522		desc_status = le32_to_cpu(rxdesc->status);
1523		pkt_len = le32_to_cpu(rxdesc->len) & RD_RFL;
1524
1525		if (--boguscnt < 0)
1526			break;
1527
1528		netif_info(mdp, rx_status, ndev,
1529			   "rx entry %d status 0x%08x len %d\n",
1530			   entry, desc_status, pkt_len);
1531
1532		if (!(desc_status & RDFEND))
1533			ndev->stats.rx_length_errors++;
1534
1535		/* In case of almost all GETHER/ETHERs, the Receive Frame State
1536		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1537		 * bit 0. However, in case of the R8A7740 and R7S72100
1538		 * the RFS bits are from bit 25 to bit 16. So, the
1539		 * driver needs right shifting by 16.
1540		 */
1541		if (mdp->cd->hw_checksum)
1542			desc_status >>= 16;
1543
1544		skb = mdp->rx_skbuff[entry];
1545		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1546				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
1547			ndev->stats.rx_errors++;
1548			if (desc_status & RD_RFS1)
1549				ndev->stats.rx_crc_errors++;
1550			if (desc_status & RD_RFS2)
1551				ndev->stats.rx_frame_errors++;
1552			if (desc_status & RD_RFS3)
1553				ndev->stats.rx_length_errors++;
1554			if (desc_status & RD_RFS4)
1555				ndev->stats.rx_length_errors++;
1556			if (desc_status & RD_RFS6)
1557				ndev->stats.rx_missed_errors++;
1558			if (desc_status & RD_RFS10)
1559				ndev->stats.rx_over_errors++;
1560		} else	if (skb) {
1561			dma_addr = le32_to_cpu(rxdesc->addr);
1562			if (!mdp->cd->hw_swap)
1563				sh_eth_soft_swap(
1564					phys_to_virt(ALIGN(dma_addr, 4)),
1565					pkt_len + 2);
1566			mdp->rx_skbuff[entry] = NULL;
1567			if (mdp->cd->rpadir)
1568				skb_reserve(skb, NET_IP_ALIGN);
1569			dma_unmap_single(&mdp->pdev->dev, dma_addr,
1570					 ALIGN(mdp->rx_buf_sz, 32),
1571					 DMA_FROM_DEVICE);
1572			skb_put(skb, pkt_len);
1573			skb->protocol = eth_type_trans(skb, ndev);
 
 
1574			netif_receive_skb(skb);
1575			ndev->stats.rx_packets++;
1576			ndev->stats.rx_bytes += pkt_len;
1577			if (desc_status & RD_RFS8)
1578				ndev->stats.multicast++;
1579		}
1580		entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1581		rxdesc = &mdp->rx_ring[entry];
1582	}
1583
1584	/* Refill the Rx ring buffers. */
1585	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1586		entry = mdp->dirty_rx % mdp->num_rx_ring;
1587		rxdesc = &mdp->rx_ring[entry];
1588		/* The size of the buffer is 32 byte boundary. */
1589		buf_len = ALIGN(mdp->rx_buf_sz, 32);
1590		rxdesc->len = cpu_to_le32(buf_len << 16);
1591
1592		if (mdp->rx_skbuff[entry] == NULL) {
1593			skb = netdev_alloc_skb(ndev, skbuff_size);
1594			if (skb == NULL)
1595				break;	/* Better luck next round. */
1596			sh_eth_set_receive_align(skb);
1597			dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1598						  buf_len, DMA_FROM_DEVICE);
1599			if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1600				kfree_skb(skb);
1601				break;
1602			}
1603			mdp->rx_skbuff[entry] = skb;
1604
1605			skb_checksum_none_assert(skb);
1606			rxdesc->addr = cpu_to_le32(dma_addr);
1607		}
1608		dma_wmb(); /* RACT bit must be set after all the above writes */
1609		if (entry >= mdp->num_rx_ring - 1)
1610			rxdesc->status |=
1611				cpu_to_le32(RD_RACT | RD_RFP | RD_RDLE);
1612		else
1613			rxdesc->status |= cpu_to_le32(RD_RACT | RD_RFP);
1614	}
1615
1616	/* Restart Rx engine if stopped. */
1617	/* If we don't need to check status, don't. -KDU */
1618	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1619		/* fix the values for the next receiving if RDE is set */
1620		if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
1621			u32 count = (sh_eth_read(ndev, RDFAR) -
1622				     sh_eth_read(ndev, RDLAR)) >> 4;
1623
1624			mdp->cur_rx = count;
1625			mdp->dirty_rx = count;
1626		}
1627		sh_eth_write(ndev, EDRRR_R, EDRRR);
1628	}
1629
1630	*quota -= limit - boguscnt - 1;
1631
1632	return *quota <= 0;
1633}
1634
1635static void sh_eth_rcv_snd_disable(struct net_device *ndev)
1636{
1637	/* disable tx and rx */
1638	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
1639}
1640
1641static void sh_eth_rcv_snd_enable(struct net_device *ndev)
1642{
1643	/* enable tx and rx */
1644	sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
1645}
1646
1647/* E-MAC interrupt handler */
1648static void sh_eth_emac_interrupt(struct net_device *ndev)
1649{
1650	struct sh_eth_private *mdp = netdev_priv(ndev);
1651	u32 felic_stat;
1652	u32 link_stat;
1653
1654	felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
1655	sh_eth_write(ndev, felic_stat, ECSR);	/* clear int */
1656	if (felic_stat & ECSR_ICD)
1657		ndev->stats.tx_carrier_errors++;
1658	if (felic_stat & ECSR_MPD)
1659		pm_wakeup_event(&mdp->pdev->dev, 0);
1660	if (felic_stat & ECSR_LCHNG) {
1661		/* Link Changed */
1662		if (mdp->cd->no_psr || mdp->no_ether_link)
1663			return;
1664		link_stat = sh_eth_read(ndev, PSR);
1665		if (mdp->ether_link_active_low)
1666			link_stat = ~link_stat;
1667		if (!(link_stat & PHY_ST_LINK)) {
1668			sh_eth_rcv_snd_disable(ndev);
1669		} else {
1670			/* Link Up */
1671			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
1672			/* clear int */
1673			sh_eth_modify(ndev, ECSR, 0, 0);
1674			sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
1675			/* enable tx and rx */
1676			sh_eth_rcv_snd_enable(ndev);
1677		}
1678	}
1679}
1680
1681/* error control function */
1682static void sh_eth_error(struct net_device *ndev, u32 intr_status)
1683{
1684	struct sh_eth_private *mdp = netdev_priv(ndev);
1685	u32 mask;
1686
1687	if (intr_status & EESR_TWB) {
1688		/* Unused write back interrupt */
1689		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
1690			ndev->stats.tx_aborted_errors++;
1691			netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1692		}
1693	}
1694
1695	if (intr_status & EESR_RABT) {
1696		/* Receive Abort int */
1697		if (intr_status & EESR_RFRMER) {
1698			/* Receive Frame Overflow int */
1699			ndev->stats.rx_frame_errors++;
1700		}
1701	}
1702
1703	if (intr_status & EESR_TDE) {
1704		/* Transmit Descriptor Empty int */
1705		ndev->stats.tx_fifo_errors++;
1706		netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1707	}
1708
1709	if (intr_status & EESR_TFE) {
1710		/* FIFO under flow */
1711		ndev->stats.tx_fifo_errors++;
1712		netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1713	}
1714
1715	if (intr_status & EESR_RDE) {
1716		/* Receive Descriptor Empty int */
1717		ndev->stats.rx_over_errors++;
1718	}
1719
1720	if (intr_status & EESR_RFE) {
1721		/* Receive FIFO Overflow int */
1722		ndev->stats.rx_fifo_errors++;
1723	}
1724
1725	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1726		/* Address Error */
1727		ndev->stats.tx_fifo_errors++;
1728		netif_err(mdp, tx_err, ndev, "Address Error\n");
1729	}
1730
1731	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1732	if (mdp->cd->no_ade)
1733		mask &= ~EESR_ADE;
1734	if (intr_status & mask) {
1735		/* Tx error */
1736		u32 edtrr = sh_eth_read(ndev, EDTRR);
1737
1738		/* dmesg */
1739		netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1740			   intr_status, mdp->cur_tx, mdp->dirty_tx,
1741			   (u32)ndev->state, edtrr);
1742		/* dirty buffer free */
1743		sh_eth_tx_free(ndev, true);
1744
1745		/* SH7712 BUG */
1746		if (edtrr ^ mdp->cd->edtrr_trns) {
1747			/* tx dma start */
1748			sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
1749		}
1750		/* wakeup */
1751		netif_wake_queue(ndev);
1752	}
1753}
1754
1755static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1756{
1757	struct net_device *ndev = netdev;
1758	struct sh_eth_private *mdp = netdev_priv(ndev);
1759	struct sh_eth_cpu_data *cd = mdp->cd;
1760	irqreturn_t ret = IRQ_NONE;
1761	u32 intr_status, intr_enable;
1762
1763	spin_lock(&mdp->lock);
1764
1765	/* Get interrupt status */
1766	intr_status = sh_eth_read(ndev, EESR);
1767	/* Mask it with the interrupt mask, forcing ECI interrupt  to be always
1768	 * enabled since it's the one that  comes  thru regardless of the mask,
1769	 * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
1770	 * to quench it as it doesn't get cleared by just writing 1 to the  ECI
1771	 * bit...
1772	 */
1773	intr_enable = sh_eth_read(ndev, EESIPR);
1774	intr_status &= intr_enable | EESIPR_ECIIP;
1775	if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
1776			   cd->eesr_err_check))
1777		ret = IRQ_HANDLED;
1778	else
1779		goto out;
1780
1781	if (unlikely(!mdp->irq_enabled)) {
1782		sh_eth_write(ndev, 0, EESIPR);
1783		goto out;
1784	}
1785
1786	if (intr_status & EESR_RX_CHECK) {
1787		if (napi_schedule_prep(&mdp->napi)) {
1788			/* Mask Rx interrupts */
1789			sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1790				     EESIPR);
1791			__napi_schedule(&mdp->napi);
1792		} else {
1793			netdev_warn(ndev,
1794				    "ignoring interrupt, status 0x%08x, mask 0x%08x.\n",
1795				    intr_status, intr_enable);
1796		}
1797	}
1798
1799	/* Tx Check */
1800	if (intr_status & cd->tx_check) {
1801		/* Clear Tx interrupts */
1802		sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1803
1804		sh_eth_tx_free(ndev, true);
1805		netif_wake_queue(ndev);
1806	}
1807
1808	/* E-MAC interrupt */
1809	if (intr_status & EESR_ECI)
1810		sh_eth_emac_interrupt(ndev);
1811
1812	if (intr_status & cd->eesr_err_check) {
1813		/* Clear error interrupts */
1814		sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1815
1816		sh_eth_error(ndev, intr_status);
1817	}
1818
1819out:
1820	spin_unlock(&mdp->lock);
1821
1822	return ret;
1823}
1824
1825static int sh_eth_poll(struct napi_struct *napi, int budget)
1826{
1827	struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1828						  napi);
1829	struct net_device *ndev = napi->dev;
1830	int quota = budget;
1831	u32 intr_status;
1832
1833	for (;;) {
1834		intr_status = sh_eth_read(ndev, EESR);
1835		if (!(intr_status & EESR_RX_CHECK))
1836			break;
1837		/* Clear Rx interrupts */
1838		sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1839
1840		if (sh_eth_rx(ndev, intr_status, &quota))
1841			goto out;
1842	}
1843
1844	napi_complete(napi);
1845
1846	/* Reenable Rx interrupts */
1847	if (mdp->irq_enabled)
1848		sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1849out:
1850	return budget - quota;
1851}
1852
1853/* PHY state control function */
1854static void sh_eth_adjust_link(struct net_device *ndev)
1855{
1856	struct sh_eth_private *mdp = netdev_priv(ndev);
1857	struct phy_device *phydev = ndev->phydev;
 
1858	int new_state = 0;
1859
 
 
 
 
 
 
1860	if (phydev->link) {
1861		if (phydev->duplex != mdp->duplex) {
1862			new_state = 1;
1863			mdp->duplex = phydev->duplex;
1864			if (mdp->cd->set_duplex)
1865				mdp->cd->set_duplex(ndev);
1866		}
1867
1868		if (phydev->speed != mdp->speed) {
1869			new_state = 1;
1870			mdp->speed = phydev->speed;
1871			if (mdp->cd->set_rate)
1872				mdp->cd->set_rate(ndev);
1873		}
1874		if (!mdp->link) {
1875			sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1876			new_state = 1;
1877			mdp->link = phydev->link;
1878			if (mdp->cd->no_psr || mdp->no_ether_link)
1879				sh_eth_rcv_snd_enable(ndev);
1880		}
1881	} else if (mdp->link) {
1882		new_state = 1;
1883		mdp->link = 0;
1884		mdp->speed = 0;
1885		mdp->duplex = -1;
1886		if (mdp->cd->no_psr || mdp->no_ether_link)
1887			sh_eth_rcv_snd_disable(ndev);
1888	}
1889
 
 
 
 
 
 
1890	if (new_state && netif_msg_link(mdp))
1891		phy_print_status(phydev);
1892}
1893
1894/* PHY init function */
1895static int sh_eth_phy_init(struct net_device *ndev)
1896{
1897	struct device_node *np = ndev->dev.parent->of_node;
1898	struct sh_eth_private *mdp = netdev_priv(ndev);
1899	struct phy_device *phydev;
1900
1901	mdp->link = 0;
1902	mdp->speed = 0;
1903	mdp->duplex = -1;
1904
1905	/* Try connect to PHY */
1906	if (np) {
1907		struct device_node *pn;
1908
1909		pn = of_parse_phandle(np, "phy-handle", 0);
1910		phydev = of_phy_connect(ndev, pn,
1911					sh_eth_adjust_link, 0,
1912					mdp->phy_interface);
1913
1914		of_node_put(pn);
1915		if (!phydev)
1916			phydev = ERR_PTR(-ENOENT);
1917	} else {
1918		char phy_id[MII_BUS_ID_SIZE + 3];
1919
1920		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1921			 mdp->mii_bus->id, mdp->phy_id);
1922
1923		phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1924				     mdp->phy_interface);
1925	}
1926
1927	if (IS_ERR(phydev)) {
1928		netdev_err(ndev, "failed to connect PHY\n");
1929		return PTR_ERR(phydev);
1930	}
1931
1932	/* mask with MAC supported features */
1933	if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
1934		int err = phy_set_max_speed(phydev, SPEED_100);
1935		if (err) {
1936			netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n");
1937			phy_disconnect(phydev);
1938			return err;
1939		}
1940	}
1941
 
 
1942	phy_attached_info(phydev);
1943
1944	return 0;
1945}
1946
1947/* PHY control start function */
1948static int sh_eth_phy_start(struct net_device *ndev)
1949{
1950	int ret;
1951
1952	ret = sh_eth_phy_init(ndev);
1953	if (ret)
1954		return ret;
1955
1956	phy_start(ndev->phydev);
1957
1958	return 0;
1959}
1960
1961static int sh_eth_get_link_ksettings(struct net_device *ndev,
1962				     struct ethtool_link_ksettings *cmd)
1963{
1964	struct sh_eth_private *mdp = netdev_priv(ndev);
1965	unsigned long flags;
1966
1967	if (!ndev->phydev)
1968		return -ENODEV;
1969
1970	spin_lock_irqsave(&mdp->lock, flags);
1971	phy_ethtool_ksettings_get(ndev->phydev, cmd);
1972	spin_unlock_irqrestore(&mdp->lock, flags);
1973
1974	return 0;
1975}
1976
1977static int sh_eth_set_link_ksettings(struct net_device *ndev,
1978				     const struct ethtool_link_ksettings *cmd)
1979{
1980	struct sh_eth_private *mdp = netdev_priv(ndev);
1981	unsigned long flags;
1982	int ret;
1983
1984	if (!ndev->phydev)
1985		return -ENODEV;
1986
1987	spin_lock_irqsave(&mdp->lock, flags);
1988
1989	/* disable tx and rx */
1990	sh_eth_rcv_snd_disable(ndev);
1991
1992	ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1993	if (ret)
1994		goto error_exit;
1995
1996	if (cmd->base.duplex == DUPLEX_FULL)
1997		mdp->duplex = 1;
1998	else
1999		mdp->duplex = 0;
2000
2001	if (mdp->cd->set_duplex)
2002		mdp->cd->set_duplex(ndev);
2003
2004error_exit:
2005	mdelay(1);
2006
2007	/* enable tx and rx */
2008	sh_eth_rcv_snd_enable(ndev);
2009
2010	spin_unlock_irqrestore(&mdp->lock, flags);
2011
2012	return ret;
2013}
2014
2015/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2016 * version must be bumped as well.  Just adding registers up to that
2017 * limit is fine, as long as the existing register indices don't
2018 * change.
2019 */
2020#define SH_ETH_REG_DUMP_VERSION		1
2021#define SH_ETH_REG_DUMP_MAX_REGS	256
2022
2023static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
2024{
2025	struct sh_eth_private *mdp = netdev_priv(ndev);
2026	struct sh_eth_cpu_data *cd = mdp->cd;
2027	u32 *valid_map;
2028	size_t len;
2029
2030	BUILD_BUG_ON(SH_ETH_MAX_REGISTER_OFFSET > SH_ETH_REG_DUMP_MAX_REGS);
2031
2032	/* Dump starts with a bitmap that tells ethtool which
2033	 * registers are defined for this chip.
2034	 */
2035	len = DIV_ROUND_UP(SH_ETH_REG_DUMP_MAX_REGS, 32);
2036	if (buf) {
2037		valid_map = buf;
2038		buf += len;
2039	} else {
2040		valid_map = NULL;
2041	}
2042
2043	/* Add a register to the dump, if it has a defined offset.
2044	 * This automatically skips most undefined registers, but for
2045	 * some it is also necessary to check a capability flag in
2046	 * struct sh_eth_cpu_data.
2047	 */
2048#define mark_reg_valid(reg) valid_map[reg / 32] |= 1U << (reg % 32)
2049#define add_reg_from(reg, read_expr) do {				\
2050		if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) {	\
2051			if (buf) {					\
2052				mark_reg_valid(reg);			\
2053				*buf++ = read_expr;			\
2054			}						\
2055			++len;						\
2056		}							\
2057	} while (0)
2058#define add_reg(reg) add_reg_from(reg, sh_eth_read(ndev, reg))
2059#define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2060
2061	add_reg(EDSR);
2062	add_reg(EDMR);
2063	add_reg(EDTRR);
2064	add_reg(EDRRR);
2065	add_reg(EESR);
2066	add_reg(EESIPR);
2067	add_reg(TDLAR);
2068	add_reg(TDFAR);
 
2069	add_reg(TDFXR);
2070	add_reg(TDFFR);
2071	add_reg(RDLAR);
2072	add_reg(RDFAR);
 
2073	add_reg(RDFXR);
2074	add_reg(RDFFR);
2075	add_reg(TRSCER);
2076	add_reg(RMFCR);
2077	add_reg(TFTR);
2078	add_reg(FDR);
2079	add_reg(RMCR);
2080	add_reg(TFUCR);
2081	add_reg(RFOCR);
2082	if (cd->rmiimode)
2083		add_reg(RMIIMODE);
2084	add_reg(FCFTR);
2085	if (cd->rpadir)
2086		add_reg(RPADIR);
2087	if (!cd->no_trimd)
2088		add_reg(TRIMD);
2089	add_reg(ECMR);
2090	add_reg(ECSR);
2091	add_reg(ECSIPR);
2092	add_reg(PIR);
2093	if (!cd->no_psr)
2094		add_reg(PSR);
2095	add_reg(RDMLR);
2096	add_reg(RFLR);
2097	add_reg(IPGR);
2098	if (cd->apr)
2099		add_reg(APR);
2100	if (cd->mpr)
2101		add_reg(MPR);
2102	add_reg(RFCR);
2103	add_reg(RFCF);
2104	if (cd->tpauser)
2105		add_reg(TPAUSER);
2106	add_reg(TPAUSECR);
2107	add_reg(GECMR);
 
2108	if (cd->bculr)
2109		add_reg(BCULR);
2110	add_reg(MAHR);
2111	add_reg(MALR);
2112	add_reg(TROCR);
2113	add_reg(CDCR);
2114	add_reg(LCCR);
2115	add_reg(CNDCR);
 
 
2116	add_reg(CEFCR);
2117	add_reg(FRECR);
2118	add_reg(TSFRCR);
2119	add_reg(TLFRCR);
2120	add_reg(CERCR);
2121	add_reg(CEECR);
 
 
2122	add_reg(MAFCR);
2123	if (cd->rtrate)
2124		add_reg(RTRATE);
2125	if (cd->hw_checksum)
2126		add_reg(CSMR);
2127	if (cd->select_mii)
2128		add_reg(RMII_MII);
2129	if (cd->tsu) {
2130		add_tsu_reg(ARSTR);
2131		add_tsu_reg(TSU_CTRST);
2132		add_tsu_reg(TSU_FWEN0);
2133		add_tsu_reg(TSU_FWEN1);
2134		add_tsu_reg(TSU_FCM);
2135		add_tsu_reg(TSU_BSYSL0);
2136		add_tsu_reg(TSU_BSYSL1);
2137		add_tsu_reg(TSU_PRISL0);
2138		add_tsu_reg(TSU_PRISL1);
2139		add_tsu_reg(TSU_FWSL0);
2140		add_tsu_reg(TSU_FWSL1);
 
 
2141		add_tsu_reg(TSU_FWSLC);
2142		add_tsu_reg(TSU_QTAGM0);
2143		add_tsu_reg(TSU_QTAGM1);
2144		add_tsu_reg(TSU_FWSR);
2145		add_tsu_reg(TSU_FWINMK);
2146		add_tsu_reg(TSU_ADQT0);
2147		add_tsu_reg(TSU_ADQT1);
2148		add_tsu_reg(TSU_VTAG0);
2149		add_tsu_reg(TSU_VTAG1);
 
 
2150		add_tsu_reg(TSU_ADSBSY);
2151		add_tsu_reg(TSU_TEN);
2152		add_tsu_reg(TSU_POST1);
2153		add_tsu_reg(TSU_POST2);
2154		add_tsu_reg(TSU_POST3);
2155		add_tsu_reg(TSU_POST4);
2156		/* This is the start of a table, not just a single register. */
2157		if (buf) {
2158			unsigned int i;
2159
2160			mark_reg_valid(TSU_ADRH0);
2161			for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES * 2; i++)
2162				*buf++ = ioread32(mdp->tsu_addr +
2163						  mdp->reg_offset[TSU_ADRH0] +
2164						  i * 4);
2165		}
2166		len += SH_ETH_TSU_CAM_ENTRIES * 2;
2167	}
2168
2169#undef mark_reg_valid
2170#undef add_reg_from
2171#undef add_reg
2172#undef add_tsu_reg
2173
2174	return len * 4;
2175}
2176
2177static int sh_eth_get_regs_len(struct net_device *ndev)
2178{
2179	return __sh_eth_get_regs(ndev, NULL);
2180}
2181
2182static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2183			    void *buf)
2184{
2185	struct sh_eth_private *mdp = netdev_priv(ndev);
2186
2187	regs->version = SH_ETH_REG_DUMP_VERSION;
2188
2189	pm_runtime_get_sync(&mdp->pdev->dev);
2190	__sh_eth_get_regs(ndev, buf);
2191	pm_runtime_put_sync(&mdp->pdev->dev);
2192}
2193
2194static int sh_eth_nway_reset(struct net_device *ndev)
2195{
2196	struct sh_eth_private *mdp = netdev_priv(ndev);
2197	unsigned long flags;
2198	int ret;
2199
2200	if (!ndev->phydev)
2201		return -ENODEV;
2202
2203	spin_lock_irqsave(&mdp->lock, flags);
2204	ret = phy_start_aneg(ndev->phydev);
2205	spin_unlock_irqrestore(&mdp->lock, flags);
2206
2207	return ret;
2208}
2209
2210static u32 sh_eth_get_msglevel(struct net_device *ndev)
2211{
2212	struct sh_eth_private *mdp = netdev_priv(ndev);
2213	return mdp->msg_enable;
2214}
2215
2216static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
2217{
2218	struct sh_eth_private *mdp = netdev_priv(ndev);
2219	mdp->msg_enable = value;
2220}
2221
2222static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
2223	"rx_current", "tx_current",
2224	"rx_dirty", "tx_dirty",
2225};
2226#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
2227
2228static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
2229{
2230	switch (sset) {
2231	case ETH_SS_STATS:
2232		return SH_ETH_STATS_LEN;
2233	default:
2234		return -EOPNOTSUPP;
2235	}
2236}
2237
2238static void sh_eth_get_ethtool_stats(struct net_device *ndev,
2239				     struct ethtool_stats *stats, u64 *data)
2240{
2241	struct sh_eth_private *mdp = netdev_priv(ndev);
2242	int i = 0;
2243
2244	/* device-specific stats */
2245	data[i++] = mdp->cur_rx;
2246	data[i++] = mdp->cur_tx;
2247	data[i++] = mdp->dirty_rx;
2248	data[i++] = mdp->dirty_tx;
2249}
2250
2251static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
2252{
2253	switch (stringset) {
2254	case ETH_SS_STATS:
2255		memcpy(data, *sh_eth_gstrings_stats,
2256		       sizeof(sh_eth_gstrings_stats));
2257		break;
2258	}
2259}
2260
2261static void sh_eth_get_ringparam(struct net_device *ndev,
2262				 struct ethtool_ringparam *ring)
 
 
2263{
2264	struct sh_eth_private *mdp = netdev_priv(ndev);
2265
2266	ring->rx_max_pending = RX_RING_MAX;
2267	ring->tx_max_pending = TX_RING_MAX;
2268	ring->rx_pending = mdp->num_rx_ring;
2269	ring->tx_pending = mdp->num_tx_ring;
2270}
2271
2272static int sh_eth_set_ringparam(struct net_device *ndev,
2273				struct ethtool_ringparam *ring)
 
 
2274{
2275	struct sh_eth_private *mdp = netdev_priv(ndev);
2276	int ret;
2277
2278	if (ring->tx_pending > TX_RING_MAX ||
2279	    ring->rx_pending > RX_RING_MAX ||
2280	    ring->tx_pending < TX_RING_MIN ||
2281	    ring->rx_pending < RX_RING_MIN)
2282		return -EINVAL;
2283	if (ring->rx_mini_pending || ring->rx_jumbo_pending)
2284		return -EINVAL;
2285
2286	if (netif_running(ndev)) {
2287		netif_device_detach(ndev);
2288		netif_tx_disable(ndev);
2289
2290		/* Serialise with the interrupt handler and NAPI, then
2291		 * disable interrupts.  We have to clear the
2292		 * irq_enabled flag first to ensure that interrupts
2293		 * won't be re-enabled.
2294		 */
2295		mdp->irq_enabled = false;
2296		synchronize_irq(ndev->irq);
2297		napi_synchronize(&mdp->napi);
2298		sh_eth_write(ndev, 0x0000, EESIPR);
2299
2300		sh_eth_dev_exit(ndev);
2301
2302		/* Free all the skbuffs in the Rx queue and the DMA buffers. */
2303		sh_eth_ring_free(ndev);
2304	}
2305
2306	/* Set new parameters */
2307	mdp->num_rx_ring = ring->rx_pending;
2308	mdp->num_tx_ring = ring->tx_pending;
2309
2310	if (netif_running(ndev)) {
2311		ret = sh_eth_ring_init(ndev);
2312		if (ret < 0) {
2313			netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
2314				   __func__);
2315			return ret;
2316		}
2317		ret = sh_eth_dev_init(ndev);
2318		if (ret < 0) {
2319			netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
2320				   __func__);
2321			return ret;
2322		}
2323
2324		netif_device_attach(ndev);
2325	}
2326
2327	return 0;
2328}
2329
2330static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2331{
2332	struct sh_eth_private *mdp = netdev_priv(ndev);
2333
2334	wol->supported = 0;
2335	wol->wolopts = 0;
2336
2337	if (mdp->cd->magic) {
2338		wol->supported = WAKE_MAGIC;
2339		wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2340	}
2341}
2342
2343static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2344{
2345	struct sh_eth_private *mdp = netdev_priv(ndev);
2346
2347	if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2348		return -EOPNOTSUPP;
2349
2350	mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2351
2352	device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2353
2354	return 0;
2355}
2356
2357static const struct ethtool_ops sh_eth_ethtool_ops = {
2358	.get_regs_len	= sh_eth_get_regs_len,
2359	.get_regs	= sh_eth_get_regs,
2360	.nway_reset	= sh_eth_nway_reset,
2361	.get_msglevel	= sh_eth_get_msglevel,
2362	.set_msglevel	= sh_eth_set_msglevel,
2363	.get_link	= ethtool_op_get_link,
2364	.get_strings	= sh_eth_get_strings,
2365	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
2366	.get_sset_count     = sh_eth_get_sset_count,
2367	.get_ringparam	= sh_eth_get_ringparam,
2368	.set_ringparam	= sh_eth_set_ringparam,
2369	.get_link_ksettings = sh_eth_get_link_ksettings,
2370	.set_link_ksettings = sh_eth_set_link_ksettings,
2371	.get_wol	= sh_eth_get_wol,
2372	.set_wol	= sh_eth_set_wol,
2373};
2374
2375/* network device open function */
2376static int sh_eth_open(struct net_device *ndev)
2377{
2378	struct sh_eth_private *mdp = netdev_priv(ndev);
2379	int ret;
2380
2381	pm_runtime_get_sync(&mdp->pdev->dev);
2382
2383	napi_enable(&mdp->napi);
2384
2385	ret = request_irq(ndev->irq, sh_eth_interrupt,
2386			  mdp->cd->irq_flags, ndev->name, ndev);
2387	if (ret) {
2388		netdev_err(ndev, "Can not assign IRQ number\n");
2389		goto out_napi_off;
2390	}
2391
2392	/* Descriptor set */
2393	ret = sh_eth_ring_init(ndev);
2394	if (ret)
2395		goto out_free_irq;
2396
2397	/* device init */
2398	ret = sh_eth_dev_init(ndev);
2399	if (ret)
2400		goto out_free_irq;
2401
2402	/* PHY control start*/
2403	ret = sh_eth_phy_start(ndev);
2404	if (ret)
2405		goto out_free_irq;
2406
2407	netif_start_queue(ndev);
2408
2409	mdp->is_opened = 1;
2410
2411	return ret;
2412
2413out_free_irq:
2414	free_irq(ndev->irq, ndev);
2415out_napi_off:
2416	napi_disable(&mdp->napi);
2417	pm_runtime_put_sync(&mdp->pdev->dev);
2418	return ret;
2419}
2420
2421/* Timeout function */
2422static void sh_eth_tx_timeout(struct net_device *ndev)
2423{
2424	struct sh_eth_private *mdp = netdev_priv(ndev);
2425	struct sh_eth_rxdesc *rxdesc;
2426	int i;
2427
2428	netif_stop_queue(ndev);
2429
2430	netif_err(mdp, timer, ndev,
2431		  "transmit timed out, status %8.8x, resetting...\n",
2432		  sh_eth_read(ndev, EESR));
2433
2434	/* tx_errors count up */
2435	ndev->stats.tx_errors++;
2436
2437	/* Free all the skbuffs in the Rx queue. */
2438	for (i = 0; i < mdp->num_rx_ring; i++) {
2439		rxdesc = &mdp->rx_ring[i];
2440		rxdesc->status = cpu_to_le32(0);
2441		rxdesc->addr = cpu_to_le32(0xBADF00D0);
2442		dev_kfree_skb(mdp->rx_skbuff[i]);
2443		mdp->rx_skbuff[i] = NULL;
2444	}
2445	for (i = 0; i < mdp->num_tx_ring; i++) {
2446		dev_kfree_skb(mdp->tx_skbuff[i]);
2447		mdp->tx_skbuff[i] = NULL;
2448	}
2449
2450	/* device init */
2451	sh_eth_dev_init(ndev);
2452
2453	netif_start_queue(ndev);
2454}
2455
2456/* Packet transmit function */
2457static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
2458{
2459	struct sh_eth_private *mdp = netdev_priv(ndev);
2460	struct sh_eth_txdesc *txdesc;
2461	dma_addr_t dma_addr;
2462	u32 entry;
2463	unsigned long flags;
2464
2465	spin_lock_irqsave(&mdp->lock, flags);
2466	if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2467		if (!sh_eth_tx_free(ndev, true)) {
2468			netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2469			netif_stop_queue(ndev);
2470			spin_unlock_irqrestore(&mdp->lock, flags);
2471			return NETDEV_TX_BUSY;
2472		}
2473	}
2474	spin_unlock_irqrestore(&mdp->lock, flags);
2475
2476	if (skb_put_padto(skb, ETH_ZLEN))
2477		return NETDEV_TX_OK;
2478
2479	entry = mdp->cur_tx % mdp->num_tx_ring;
2480	mdp->tx_skbuff[entry] = skb;
2481	txdesc = &mdp->tx_ring[entry];
2482	/* soft swap. */
2483	if (!mdp->cd->hw_swap)
2484		sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
2485	dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2486				  DMA_TO_DEVICE);
2487	if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2488		kfree_skb(skb);
2489		return NETDEV_TX_OK;
2490	}
2491	txdesc->addr = cpu_to_le32(dma_addr);
2492	txdesc->len  = cpu_to_le32(skb->len << 16);
2493
2494	dma_wmb(); /* TACT bit must be set after all the above writes */
2495	if (entry >= mdp->num_tx_ring - 1)
2496		txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE);
2497	else
2498		txdesc->status |= cpu_to_le32(TD_TACT);
2499
 
2500	mdp->cur_tx++;
2501
2502	if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
2503		sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
2504
2505	return NETDEV_TX_OK;
2506}
2507
2508/* The statistics registers have write-clear behaviour, which means we
2509 * will lose any increment between the read and write.  We mitigate
2510 * this by only clearing when we read a non-zero value, so we will
2511 * never falsely report a total of zero.
2512 */
2513static void
2514sh_eth_update_stat(struct net_device *ndev, unsigned long *stat, int reg)
2515{
2516	u32 delta = sh_eth_read(ndev, reg);
2517
2518	if (delta) {
2519		*stat += delta;
2520		sh_eth_write(ndev, 0, reg);
2521	}
2522}
2523
2524static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2525{
2526	struct sh_eth_private *mdp = netdev_priv(ndev);
2527
2528	if (mdp->cd->no_tx_cntrs)
2529		return &ndev->stats;
2530
2531	if (!mdp->is_opened)
2532		return &ndev->stats;
2533
2534	sh_eth_update_stat(ndev, &ndev->stats.tx_dropped, TROCR);
2535	sh_eth_update_stat(ndev, &ndev->stats.collisions, CDCR);
2536	sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors, LCCR);
2537
2538	if (mdp->cd->cexcr) {
2539		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2540				   CERCR);
2541		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2542				   CEECR);
2543	} else {
2544		sh_eth_update_stat(ndev, &ndev->stats.tx_carrier_errors,
2545				   CNDCR);
2546	}
2547
2548	return &ndev->stats;
2549}
2550
2551/* device close function */
2552static int sh_eth_close(struct net_device *ndev)
2553{
2554	struct sh_eth_private *mdp = netdev_priv(ndev);
2555
2556	netif_stop_queue(ndev);
2557
2558	/* Serialise with the interrupt handler and NAPI, then disable
2559	 * interrupts.  We have to clear the irq_enabled flag first to
2560	 * ensure that interrupts won't be re-enabled.
2561	 */
2562	mdp->irq_enabled = false;
2563	synchronize_irq(ndev->irq);
2564	napi_disable(&mdp->napi);
2565	sh_eth_write(ndev, 0x0000, EESIPR);
2566
2567	sh_eth_dev_exit(ndev);
2568
2569	/* PHY Disconnect */
2570	if (ndev->phydev) {
2571		phy_stop(ndev->phydev);
2572		phy_disconnect(ndev->phydev);
2573	}
2574
2575	free_irq(ndev->irq, ndev);
2576
2577	/* Free all the skbuffs in the Rx queue and the DMA buffer. */
2578	sh_eth_ring_free(ndev);
2579
2580	pm_runtime_put_sync(&mdp->pdev->dev);
2581
2582	mdp->is_opened = 0;
2583
2584	return 0;
2585}
2586
2587/* ioctl to device function */
2588static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
2589{
2590	struct phy_device *phydev = ndev->phydev;
2591
2592	if (!netif_running(ndev))
2593		return -EINVAL;
2594
2595	if (!phydev)
2596		return -ENODEV;
2597
2598	return phy_mii_ioctl(phydev, rq, cmd);
2599}
2600
2601static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
2602{
2603	if (netif_running(ndev))
2604		return -EBUSY;
2605
2606	ndev->mtu = new_mtu;
2607	netdev_update_features(ndev);
2608
2609	return 0;
2610}
2611
2612/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2613static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2614					    int entry)
2615{
2616	return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2617}
2618
2619static u32 sh_eth_tsu_get_post_mask(int entry)
2620{
2621	return 0x0f << (28 - ((entry % 8) * 4));
2622}
2623
2624static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2625{
2626	return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2627}
2628
2629static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2630					     int entry)
2631{
2632	struct sh_eth_private *mdp = netdev_priv(ndev);
 
2633	u32 tmp;
2634	void *reg_offset;
2635
2636	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2637	tmp = ioread32(reg_offset);
2638	iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2639}
2640
2641static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2642					      int entry)
2643{
2644	struct sh_eth_private *mdp = netdev_priv(ndev);
 
2645	u32 post_mask, ref_mask, tmp;
2646	void *reg_offset;
2647
2648	reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2649	post_mask = sh_eth_tsu_get_post_mask(entry);
2650	ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2651
2652	tmp = ioread32(reg_offset);
2653	iowrite32(tmp & ~post_mask, reg_offset);
2654
2655	/* If other port enables, the function returns "true" */
2656	return tmp & ref_mask;
2657}
2658
2659static int sh_eth_tsu_busy(struct net_device *ndev)
2660{
2661	int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2662	struct sh_eth_private *mdp = netdev_priv(ndev);
2663
2664	while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2665		udelay(10);
2666		timeout--;
2667		if (timeout <= 0) {
2668			netdev_err(ndev, "%s: timeout\n", __func__);
2669			return -ETIMEDOUT;
2670		}
2671	}
2672
2673	return 0;
2674}
2675
2676static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2677				  const u8 *addr)
2678{
 
2679	u32 val;
2680
2681	val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2682	iowrite32(val, reg);
2683	if (sh_eth_tsu_busy(ndev) < 0)
2684		return -EBUSY;
2685
2686	val = addr[4] << 8 | addr[5];
2687	iowrite32(val, reg + 4);
2688	if (sh_eth_tsu_busy(ndev) < 0)
2689		return -EBUSY;
2690
2691	return 0;
2692}
2693
2694static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2695{
 
2696	u32 val;
2697
2698	val = ioread32(reg);
2699	addr[0] = (val >> 24) & 0xff;
2700	addr[1] = (val >> 16) & 0xff;
2701	addr[2] = (val >> 8) & 0xff;
2702	addr[3] = val & 0xff;
2703	val = ioread32(reg + 4);
2704	addr[4] = (val >> 8) & 0xff;
2705	addr[5] = val & 0xff;
2706}
2707
2708
2709static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2710{
2711	struct sh_eth_private *mdp = netdev_priv(ndev);
2712	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2713	int i;
2714	u8 c_addr[ETH_ALEN];
2715
2716	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2717		sh_eth_tsu_read_entry(reg_offset, c_addr);
2718		if (ether_addr_equal(addr, c_addr))
2719			return i;
2720	}
2721
2722	return -ENOENT;
2723}
2724
2725static int sh_eth_tsu_find_empty(struct net_device *ndev)
2726{
2727	u8 blank[ETH_ALEN];
2728	int entry;
2729
2730	memset(blank, 0, sizeof(blank));
2731	entry = sh_eth_tsu_find_entry(ndev, blank);
2732	return (entry < 0) ? -ENOMEM : entry;
2733}
2734
2735static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2736					      int entry)
2737{
2738	struct sh_eth_private *mdp = netdev_priv(ndev);
2739	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2740	int ret;
2741	u8 blank[ETH_ALEN];
2742
2743	sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2744			 ~(1 << (31 - entry)), TSU_TEN);
2745
2746	memset(blank, 0, sizeof(blank));
2747	ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2748	if (ret < 0)
2749		return ret;
2750	return 0;
2751}
2752
2753static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2754{
2755	struct sh_eth_private *mdp = netdev_priv(ndev);
2756	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2757	int i, ret;
2758
2759	if (!mdp->cd->tsu)
2760		return 0;
2761
2762	i = sh_eth_tsu_find_entry(ndev, addr);
2763	if (i < 0) {
2764		/* No entry found, create one */
2765		i = sh_eth_tsu_find_empty(ndev);
2766		if (i < 0)
2767			return -ENOMEM;
2768		ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2769		if (ret < 0)
2770			return ret;
2771
2772		/* Enable the entry */
2773		sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2774				 (1 << (31 - i)), TSU_TEN);
2775	}
2776
2777	/* Entry found or created, enable POST */
2778	sh_eth_tsu_enable_cam_entry_post(ndev, i);
2779
2780	return 0;
2781}
2782
2783static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2784{
2785	struct sh_eth_private *mdp = netdev_priv(ndev);
2786	int i, ret;
2787
2788	if (!mdp->cd->tsu)
2789		return 0;
2790
2791	i = sh_eth_tsu_find_entry(ndev, addr);
2792	if (i) {
2793		/* Entry found */
2794		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2795			goto done;
2796
2797		/* Disable the entry if both ports was disabled */
2798		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2799		if (ret < 0)
2800			return ret;
2801	}
2802done:
2803	return 0;
2804}
2805
2806static int sh_eth_tsu_purge_all(struct net_device *ndev)
2807{
2808	struct sh_eth_private *mdp = netdev_priv(ndev);
2809	int i, ret;
2810
2811	if (!mdp->cd->tsu)
2812		return 0;
2813
2814	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2815		if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2816			continue;
2817
2818		/* Disable the entry if both ports was disabled */
2819		ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2820		if (ret < 0)
2821			return ret;
2822	}
2823
2824	return 0;
2825}
2826
2827static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2828{
2829	struct sh_eth_private *mdp = netdev_priv(ndev);
 
2830	u8 addr[ETH_ALEN];
2831	void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2832	int i;
2833
2834	if (!mdp->cd->tsu)
2835		return;
2836
2837	for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2838		sh_eth_tsu_read_entry(reg_offset, addr);
2839		if (is_multicast_ether_addr(addr))
2840			sh_eth_tsu_del_entry(ndev, addr);
2841	}
2842}
2843
2844/* Update promiscuous flag and multicast filter */
2845static void sh_eth_set_rx_mode(struct net_device *ndev)
2846{
2847	struct sh_eth_private *mdp = netdev_priv(ndev);
2848	u32 ecmr_bits;
2849	int mcast_all = 0;
2850	unsigned long flags;
2851
2852	spin_lock_irqsave(&mdp->lock, flags);
2853	/* Initial condition is MCT = 1, PRM = 0.
2854	 * Depending on ndev->flags, set PRM or clear MCT
2855	 */
2856	ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
2857	if (mdp->cd->tsu)
2858		ecmr_bits |= ECMR_MCT;
2859
2860	if (!(ndev->flags & IFF_MULTICAST)) {
2861		sh_eth_tsu_purge_mcast(ndev);
2862		mcast_all = 1;
2863	}
2864	if (ndev->flags & IFF_ALLMULTI) {
2865		sh_eth_tsu_purge_mcast(ndev);
2866		ecmr_bits &= ~ECMR_MCT;
2867		mcast_all = 1;
2868	}
2869
2870	if (ndev->flags & IFF_PROMISC) {
2871		sh_eth_tsu_purge_all(ndev);
2872		ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2873	} else if (mdp->cd->tsu) {
2874		struct netdev_hw_addr *ha;
2875		netdev_for_each_mc_addr(ha, ndev) {
2876			if (mcast_all && is_multicast_ether_addr(ha->addr))
2877				continue;
2878
2879			if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2880				if (!mcast_all) {
2881					sh_eth_tsu_purge_mcast(ndev);
2882					ecmr_bits &= ~ECMR_MCT;
2883					mcast_all = 1;
2884				}
2885			}
2886		}
2887	}
2888
2889	/* update the ethernet mode */
2890	sh_eth_write(ndev, ecmr_bits, ECMR);
2891
2892	spin_unlock_irqrestore(&mdp->lock, flags);
2893}
2894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2895static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2896{
2897	if (!mdp->port)
2898		return TSU_VTAG0;
2899	else
2900		return TSU_VTAG1;
2901}
2902
2903static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2904				  __be16 proto, u16 vid)
2905{
2906	struct sh_eth_private *mdp = netdev_priv(ndev);
2907	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2908
2909	if (unlikely(!mdp->cd->tsu))
2910		return -EPERM;
2911
2912	/* No filtering if vid = 0 */
2913	if (!vid)
2914		return 0;
2915
2916	mdp->vlan_num_ids++;
2917
2918	/* The controller has one VLAN tag HW filter. So, if the filter is
2919	 * already enabled, the driver disables it and the filte
2920	 */
2921	if (mdp->vlan_num_ids > 1) {
2922		/* disable VLAN filter */
2923		sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2924		return 0;
2925	}
2926
2927	sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2928			 vtag_reg_index);
2929
2930	return 0;
2931}
2932
2933static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2934				   __be16 proto, u16 vid)
2935{
2936	struct sh_eth_private *mdp = netdev_priv(ndev);
2937	int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2938
2939	if (unlikely(!mdp->cd->tsu))
2940		return -EPERM;
2941
2942	/* No filtering if vid = 0 */
2943	if (!vid)
2944		return 0;
2945
2946	mdp->vlan_num_ids--;
2947	sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2948
2949	return 0;
2950}
2951
2952/* SuperH's TSU register init function */
2953static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2954{
2955	if (!mdp->cd->dual_port) {
2956		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2957		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
2958				 TSU_FWSLC);	/* Enable POST registers */
2959		return;
2960	}
2961
2962	sh_eth_tsu_write(mdp, 0, TSU_FWEN0);	/* Disable forward(0->1) */
2963	sh_eth_tsu_write(mdp, 0, TSU_FWEN1);	/* Disable forward(1->0) */
2964	sh_eth_tsu_write(mdp, 0, TSU_FCM);	/* forward fifo 3k-3k */
2965	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2966	sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2967	sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2968	sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2969	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2970	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2971	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
2972	sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
2973	sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
2974	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
2975	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
2976	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
2977	sh_eth_tsu_write(mdp, 0, TSU_POST1);	/* Disable CAM entry [ 0- 7] */
2978	sh_eth_tsu_write(mdp, 0, TSU_POST2);	/* Disable CAM entry [ 8-15] */
2979	sh_eth_tsu_write(mdp, 0, TSU_POST3);	/* Disable CAM entry [16-23] */
2980	sh_eth_tsu_write(mdp, 0, TSU_POST4);	/* Disable CAM entry [24-31] */
2981}
2982
2983/* MDIO bus release function */
2984static int sh_mdio_release(struct sh_eth_private *mdp)
2985{
2986	/* unregister mdio bus */
2987	mdiobus_unregister(mdp->mii_bus);
2988
2989	/* free bitbang info */
2990	free_mdio_bitbang(mdp->mii_bus);
2991
2992	return 0;
2993}
2994
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2995/* MDIO bus init function */
2996static int sh_mdio_init(struct sh_eth_private *mdp,
2997			struct sh_eth_plat_data *pd)
2998{
2999	int ret;
3000	struct bb_info *bitbang;
3001	struct platform_device *pdev = mdp->pdev;
3002	struct device *dev = &mdp->pdev->dev;
3003
3004	/* create bit control struct for PHY */
3005	bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
3006	if (!bitbang)
3007		return -ENOMEM;
3008
3009	/* bitbang init */
3010	bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
3011	bitbang->set_gate = pd->set_mdio_gate;
3012	bitbang->ctrl.ops = &bb_ops;
3013
3014	/* MII controller setting */
3015	mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
3016	if (!mdp->mii_bus)
3017		return -ENOMEM;
3018
 
 
 
 
3019	/* Hook up MII support for ethtool */
3020	mdp->mii_bus->name = "sh_mii";
3021	mdp->mii_bus->parent = dev;
3022	snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3023		 pdev->name, pdev->id);
3024
3025	/* register MDIO bus */
3026	if (dev->of_node) {
3027		ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3028	} else {
3029		if (pd->phy_irq > 0)
3030			mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3031
3032		ret = mdiobus_register(mdp->mii_bus);
3033	}
3034
 
3035	if (ret)
3036		goto out_free_bus;
3037
3038	return 0;
3039
3040out_free_bus:
3041	free_mdio_bitbang(mdp->mii_bus);
3042	return ret;
3043}
3044
3045static const u16 *sh_eth_get_register_offset(int register_type)
3046{
3047	const u16 *reg_offset = NULL;
3048
3049	switch (register_type) {
3050	case SH_ETH_REG_GIGABIT:
3051		reg_offset = sh_eth_offset_gigabit;
3052		break;
3053	case SH_ETH_REG_FAST_RZ:
3054		reg_offset = sh_eth_offset_fast_rz;
3055		break;
3056	case SH_ETH_REG_FAST_RCAR:
3057		reg_offset = sh_eth_offset_fast_rcar;
3058		break;
3059	case SH_ETH_REG_FAST_SH4:
3060		reg_offset = sh_eth_offset_fast_sh4;
3061		break;
3062	case SH_ETH_REG_FAST_SH3_SH2:
3063		reg_offset = sh_eth_offset_fast_sh3_sh2;
3064		break;
3065	}
3066
3067	return reg_offset;
3068}
3069
3070static const struct net_device_ops sh_eth_netdev_ops = {
3071	.ndo_open		= sh_eth_open,
3072	.ndo_stop		= sh_eth_close,
3073	.ndo_start_xmit		= sh_eth_start_xmit,
3074	.ndo_get_stats		= sh_eth_get_stats,
3075	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3076	.ndo_tx_timeout		= sh_eth_tx_timeout,
3077	.ndo_do_ioctl		= sh_eth_do_ioctl,
3078	.ndo_change_mtu		= sh_eth_change_mtu,
3079	.ndo_validate_addr	= eth_validate_addr,
3080	.ndo_set_mac_address	= eth_mac_addr,
 
3081};
3082
3083static const struct net_device_ops sh_eth_netdev_ops_tsu = {
3084	.ndo_open		= sh_eth_open,
3085	.ndo_stop		= sh_eth_close,
3086	.ndo_start_xmit		= sh_eth_start_xmit,
3087	.ndo_get_stats		= sh_eth_get_stats,
3088	.ndo_set_rx_mode	= sh_eth_set_rx_mode,
3089	.ndo_vlan_rx_add_vid	= sh_eth_vlan_rx_add_vid,
3090	.ndo_vlan_rx_kill_vid	= sh_eth_vlan_rx_kill_vid,
3091	.ndo_tx_timeout		= sh_eth_tx_timeout,
3092	.ndo_do_ioctl		= sh_eth_do_ioctl,
3093	.ndo_change_mtu		= sh_eth_change_mtu,
3094	.ndo_validate_addr	= eth_validate_addr,
3095	.ndo_set_mac_address	= eth_mac_addr,
 
3096};
3097
3098#ifdef CONFIG_OF
3099static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3100{
3101	struct device_node *np = dev->of_node;
3102	struct sh_eth_plat_data *pdata;
3103	const char *mac_addr;
 
3104
3105	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3106	if (!pdata)
3107		return NULL;
3108
3109	pdata->phy_interface = of_get_phy_mode(np);
 
 
 
3110
3111	mac_addr = of_get_mac_address(np);
3112	if (mac_addr)
3113		memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
3114
3115	pdata->no_ether_link =
3116		of_property_read_bool(np, "renesas,no-ether-link");
3117	pdata->ether_link_active_low =
3118		of_property_read_bool(np, "renesas,ether-link-active-low");
3119
3120	return pdata;
3121}
3122
3123static const struct of_device_id sh_eth_match_table[] = {
3124	{ .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
3125	{ .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data },
3126	{ .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data },
3127	{ .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data },
3128	{ .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data },
3129	{ .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data },
3130	{ .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data },
3131	{ .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data },
3132	{ .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
 
3133	{ .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
 
3134	{ .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
3135	{ .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
3136	{ }
3137};
3138MODULE_DEVICE_TABLE(of, sh_eth_match_table);
3139#else
3140static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
3141{
3142	return NULL;
3143}
3144#endif
3145
3146static int sh_eth_drv_probe(struct platform_device *pdev)
3147{
3148	struct resource *res;
3149	struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
3150	const struct platform_device_id *id = platform_get_device_id(pdev);
3151	struct sh_eth_private *mdp;
3152	struct net_device *ndev;
3153	int ret;
3154
3155	/* get base addr */
3156	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3157
3158	ndev = alloc_etherdev(sizeof(struct sh_eth_private));
3159	if (!ndev)
3160		return -ENOMEM;
3161
3162	pm_runtime_enable(&pdev->dev);
3163	pm_runtime_get_sync(&pdev->dev);
3164
3165	ret = platform_get_irq(pdev, 0);
3166	if (ret < 0)
3167		goto out_release;
3168	ndev->irq = ret;
3169
3170	SET_NETDEV_DEV(ndev, &pdev->dev);
3171
3172	mdp = netdev_priv(ndev);
3173	mdp->num_tx_ring = TX_RING_SIZE;
3174	mdp->num_rx_ring = RX_RING_SIZE;
3175	mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3176	if (IS_ERR(mdp->addr)) {
3177		ret = PTR_ERR(mdp->addr);
3178		goto out_release;
3179	}
3180
3181	ndev->base_addr = res->start;
3182
3183	spin_lock_init(&mdp->lock);
3184	mdp->pdev = pdev;
3185
3186	if (pdev->dev.of_node)
3187		pd = sh_eth_parse_dt(&pdev->dev);
3188	if (!pd) {
3189		dev_err(&pdev->dev, "no platform data\n");
3190		ret = -EINVAL;
3191		goto out_release;
3192	}
3193
3194	/* get PHY ID */
3195	mdp->phy_id = pd->phy;
3196	mdp->phy_interface = pd->phy_interface;
3197	mdp->no_ether_link = pd->no_ether_link;
3198	mdp->ether_link_active_low = pd->ether_link_active_low;
3199
3200	/* set cpu data */
3201	if (id)
3202		mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3203	else
3204		mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3205
3206	mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3207	if (!mdp->reg_offset) {
3208		dev_err(&pdev->dev, "Unknown register type (%d)\n",
3209			mdp->cd->register_type);
3210		ret = -EINVAL;
3211		goto out_release;
3212	}
3213	sh_eth_set_default_cpu_data(mdp->cd);
3214
3215	/* User's manual states max MTU should be 2048 but due to the
3216	 * alignment calculations in sh_eth_ring_init() the practical
3217	 * MTU is a bit less. Maybe this can be optimized some more.
3218	 */
3219	ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
3220	ndev->min_mtu = ETH_MIN_MTU;
3221
 
 
 
 
 
3222	/* set function */
3223	if (mdp->cd->tsu)
3224		ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
3225	else
3226		ndev->netdev_ops = &sh_eth_netdev_ops;
3227	ndev->ethtool_ops = &sh_eth_ethtool_ops;
3228	ndev->watchdog_timeo = TX_TIMEOUT;
3229
3230	/* debug message level */
3231	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3232
3233	/* read and set MAC address */
3234	read_mac_address(ndev, pd->mac_addr);
3235	if (!is_valid_ether_addr(ndev->dev_addr)) {
3236		dev_warn(&pdev->dev,
3237			 "no valid MAC address supplied, using a random one.\n");
3238		eth_hw_addr_random(ndev);
3239	}
3240
3241	if (mdp->cd->tsu) {
3242		int port = pdev->id < 0 ? 0 : pdev->id % 2;
3243		struct resource *rtsu;
3244
3245		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
3246		if (!rtsu) {
3247			dev_err(&pdev->dev, "no TSU resource\n");
3248			ret = -ENODEV;
3249			goto out_release;
3250		}
3251		/* We can only request the  TSU region  for the first port
3252		 * of the two  sharing this TSU for the probe to succeed...
3253		 */
3254		if (port == 0 &&
3255		    !devm_request_mem_region(&pdev->dev, rtsu->start,
3256					     resource_size(rtsu),
3257					     dev_name(&pdev->dev))) {
3258			dev_err(&pdev->dev, "can't request TSU resource.\n");
3259			ret = -EBUSY;
3260			goto out_release;
3261		}
3262		/* ioremap the TSU registers */
3263		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3264					     resource_size(rtsu));
3265		if (!mdp->tsu_addr) {
3266			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
3267			ret = -ENOMEM;
3268			goto out_release;
3269		}
3270		mdp->port = port;
3271		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
3272
3273		/* Need to init only the first port of the two sharing a TSU */
3274		if (port == 0) {
3275			if (mdp->cd->chip_reset)
3276				mdp->cd->chip_reset(ndev);
3277
3278			/* TSU init (Init only)*/
3279			sh_eth_tsu_init(mdp);
3280		}
3281	}
3282
3283	if (mdp->cd->rmiimode)
3284		sh_eth_write(ndev, 0x1, RMIIMODE);
3285
3286	/* MDIO bus init */
3287	ret = sh_mdio_init(mdp, pd);
3288	if (ret) {
3289		if (ret != -EPROBE_DEFER)
3290			dev_err(&pdev->dev, "MDIO init failed: %d\n", ret);
3291		goto out_release;
3292	}
3293
3294	netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3295
3296	/* network device register */
3297	ret = register_netdev(ndev);
3298	if (ret)
3299		goto out_napi_del;
3300
3301	if (mdp->cd->magic)
3302		device_set_wakeup_capable(&pdev->dev, 1);
3303
3304	/* print device information */
3305	netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
3306		    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
3307
3308	pm_runtime_put(&pdev->dev);
3309	platform_set_drvdata(pdev, ndev);
3310
3311	return ret;
3312
3313out_napi_del:
3314	netif_napi_del(&mdp->napi);
3315	sh_mdio_release(mdp);
3316
3317out_release:
3318	/* net_dev free */
3319	free_netdev(ndev);
3320
3321	pm_runtime_put(&pdev->dev);
3322	pm_runtime_disable(&pdev->dev);
3323	return ret;
3324}
3325
3326static int sh_eth_drv_remove(struct platform_device *pdev)
3327{
3328	struct net_device *ndev = platform_get_drvdata(pdev);
3329	struct sh_eth_private *mdp = netdev_priv(ndev);
3330
3331	unregister_netdev(ndev);
3332	netif_napi_del(&mdp->napi);
3333	sh_mdio_release(mdp);
3334	pm_runtime_disable(&pdev->dev);
3335	free_netdev(ndev);
3336
3337	return 0;
3338}
3339
3340#ifdef CONFIG_PM
3341#ifdef CONFIG_PM_SLEEP
3342static int sh_eth_wol_setup(struct net_device *ndev)
3343{
3344	struct sh_eth_private *mdp = netdev_priv(ndev);
3345
3346	/* Only allow ECI interrupts */
3347	synchronize_irq(ndev->irq);
3348	napi_disable(&mdp->napi);
3349	sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
3350
3351	/* Enable MagicPacket */
3352	sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3353
3354	return enable_irq_wake(ndev->irq);
3355}
3356
3357static int sh_eth_wol_restore(struct net_device *ndev)
3358{
3359	struct sh_eth_private *mdp = netdev_priv(ndev);
3360	int ret;
3361
3362	napi_enable(&mdp->napi);
3363
3364	/* Disable MagicPacket */
3365	sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
3366
3367	/* The device needs to be reset to restore MagicPacket logic
3368	 * for next wakeup. If we close and open the device it will
3369	 * both be reset and all registers restored. This is what
3370	 * happens during suspend and resume without WoL enabled.
3371	 */
3372	ret = sh_eth_close(ndev);
3373	if (ret < 0)
3374		return ret;
3375	ret = sh_eth_open(ndev);
3376	if (ret < 0)
3377		return ret;
3378
3379	return disable_irq_wake(ndev->irq);
3380}
3381
3382static int sh_eth_suspend(struct device *dev)
3383{
3384	struct net_device *ndev = dev_get_drvdata(dev);
3385	struct sh_eth_private *mdp = netdev_priv(ndev);
3386	int ret = 0;
3387
3388	if (!netif_running(ndev))
3389		return 0;
3390
3391	netif_device_detach(ndev);
3392
3393	if (mdp->wol_enabled)
3394		ret = sh_eth_wol_setup(ndev);
3395	else
3396		ret = sh_eth_close(ndev);
3397
3398	return ret;
3399}
3400
3401static int sh_eth_resume(struct device *dev)
3402{
3403	struct net_device *ndev = dev_get_drvdata(dev);
3404	struct sh_eth_private *mdp = netdev_priv(ndev);
3405	int ret = 0;
3406
3407	if (!netif_running(ndev))
3408		return 0;
3409
3410	if (mdp->wol_enabled)
3411		ret = sh_eth_wol_restore(ndev);
3412	else
3413		ret = sh_eth_open(ndev);
3414
3415	if (ret < 0)
3416		return ret;
3417
3418	netif_device_attach(ndev);
3419
3420	return ret;
3421}
3422#endif
3423
3424static int sh_eth_runtime_nop(struct device *dev)
3425{
3426	/* Runtime PM callback shared between ->runtime_suspend()
3427	 * and ->runtime_resume(). Simply returns success.
3428	 *
3429	 * This driver re-initializes all registers after
3430	 * pm_runtime_get_sync() anyway so there is no need
3431	 * to save and restore registers here.
3432	 */
3433	return 0;
3434}
3435
3436static const struct dev_pm_ops sh_eth_dev_pm_ops = {
3437	SET_SYSTEM_SLEEP_PM_OPS(sh_eth_suspend, sh_eth_resume)
3438	SET_RUNTIME_PM_OPS(sh_eth_runtime_nop, sh_eth_runtime_nop, NULL)
3439};
3440#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
3441#else
3442#define SH_ETH_PM_OPS NULL
3443#endif
3444
3445static const struct platform_device_id sh_eth_id_table[] = {
3446	{ "sh7619-ether", (kernel_ulong_t)&sh7619_data },
3447	{ "sh771x-ether", (kernel_ulong_t)&sh771x_data },
3448	{ "sh7724-ether", (kernel_ulong_t)&sh7724_data },
3449	{ "sh7734-gether", (kernel_ulong_t)&sh7734_data },
3450	{ "sh7757-ether", (kernel_ulong_t)&sh7757_data },
3451	{ "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
3452	{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
3453	{ }
3454};
3455MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
3456
3457static struct platform_driver sh_eth_driver = {
3458	.probe = sh_eth_drv_probe,
3459	.remove = sh_eth_drv_remove,
3460	.id_table = sh_eth_id_table,
3461	.driver = {
3462		   .name = CARDNAME,
3463		   .pm = SH_ETH_PM_OPS,
3464		   .of_match_table = of_match_ptr(sh_eth_match_table),
3465	},
3466};
3467
3468module_platform_driver(sh_eth_driver);
3469
3470MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3471MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3472MODULE_LICENSE("GPL v2");