Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v3.15
   1/*
   2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
   3 *
   4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
   5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
   6 * Copyright (c) a lot of people too. Please respect their work.
   7 *
   8 * See MAINTAINERS file for support contact information.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/moduleparam.h>
  13#include <linux/pci.h>
  14#include <linux/netdevice.h>
  15#include <linux/etherdevice.h>
  16#include <linux/delay.h>
  17#include <linux/ethtool.h>
  18#include <linux/mii.h>
  19#include <linux/if_vlan.h>
  20#include <linux/crc32.h>
  21#include <linux/in.h>
  22#include <linux/ip.h>
  23#include <linux/tcp.h>
  24#include <linux/interrupt.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/firmware.h>
  28#include <linux/pci-aspm.h>
  29#include <linux/prefetch.h>
 
 
  30
  31#include <asm/io.h>
  32#include <asm/irq.h>
  33
  34#define RTL8169_VERSION "2.3LK-NAPI"
  35#define MODULENAME "r8169"
  36#define PFX MODULENAME ": "
  37
  38#define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
  39#define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
  40#define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
  41#define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
  42#define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
  43#define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
  44#define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
  45#define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
  46#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
  47#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
  48#define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
  49#define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
  50#define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
  51#define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
  52#define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
 
 
 
 
  53
  54#ifdef RTL8169_DEBUG
  55#define assert(expr) \
  56	if (!(expr)) {					\
  57		printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
  58		#expr,__FILE__,__func__,__LINE__);		\
  59	}
  60#define dprintk(fmt, args...) \
  61	do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
  62#else
  63#define assert(expr) do {} while (0)
  64#define dprintk(fmt, args...)	do {} while (0)
  65#endif /* RTL8169_DEBUG */
  66
  67#define R8169_MSG_DEFAULT \
  68	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
  69
  70#define TX_SLOTS_AVAIL(tp) \
  71	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
  72
  73/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
  74#define TX_FRAGS_READY_FOR(tp,nr_frags) \
  75	(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
  76
  77/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  78   The RTL chips use a 64 element hash table based on the Ethernet CRC. */
  79static const int multicast_filter_limit = 32;
  80
  81#define MAX_READ_REQUEST_SHIFT	12
  82#define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
  83#define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
  84
  85#define R8169_REGS_SIZE		256
  86#define R8169_NAPI_WEIGHT	64
  87#define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
  88#define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
  89#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
  90#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
  91
  92#define RTL8169_TX_TIMEOUT	(6*HZ)
  93#define RTL8169_PHY_TIMEOUT	(10*HZ)
  94
  95/* write/read MMIO register */
  96#define RTL_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
  97#define RTL_W16(reg, val16)	writew ((val16), ioaddr + (reg))
  98#define RTL_W32(reg, val32)	writel ((val32), ioaddr + (reg))
  99#define RTL_R8(reg)		readb (ioaddr + (reg))
 100#define RTL_R16(reg)		readw (ioaddr + (reg))
 101#define RTL_R32(reg)		readl (ioaddr + (reg))
 102
 103enum mac_version {
 104	RTL_GIGA_MAC_VER_01 = 0,
 105	RTL_GIGA_MAC_VER_02,
 106	RTL_GIGA_MAC_VER_03,
 107	RTL_GIGA_MAC_VER_04,
 108	RTL_GIGA_MAC_VER_05,
 109	RTL_GIGA_MAC_VER_06,
 110	RTL_GIGA_MAC_VER_07,
 111	RTL_GIGA_MAC_VER_08,
 112	RTL_GIGA_MAC_VER_09,
 113	RTL_GIGA_MAC_VER_10,
 114	RTL_GIGA_MAC_VER_11,
 115	RTL_GIGA_MAC_VER_12,
 116	RTL_GIGA_MAC_VER_13,
 117	RTL_GIGA_MAC_VER_14,
 118	RTL_GIGA_MAC_VER_15,
 119	RTL_GIGA_MAC_VER_16,
 120	RTL_GIGA_MAC_VER_17,
 121	RTL_GIGA_MAC_VER_18,
 122	RTL_GIGA_MAC_VER_19,
 123	RTL_GIGA_MAC_VER_20,
 124	RTL_GIGA_MAC_VER_21,
 125	RTL_GIGA_MAC_VER_22,
 126	RTL_GIGA_MAC_VER_23,
 127	RTL_GIGA_MAC_VER_24,
 128	RTL_GIGA_MAC_VER_25,
 129	RTL_GIGA_MAC_VER_26,
 130	RTL_GIGA_MAC_VER_27,
 131	RTL_GIGA_MAC_VER_28,
 132	RTL_GIGA_MAC_VER_29,
 133	RTL_GIGA_MAC_VER_30,
 134	RTL_GIGA_MAC_VER_31,
 135	RTL_GIGA_MAC_VER_32,
 136	RTL_GIGA_MAC_VER_33,
 137	RTL_GIGA_MAC_VER_34,
 138	RTL_GIGA_MAC_VER_35,
 139	RTL_GIGA_MAC_VER_36,
 140	RTL_GIGA_MAC_VER_37,
 141	RTL_GIGA_MAC_VER_38,
 142	RTL_GIGA_MAC_VER_39,
 143	RTL_GIGA_MAC_VER_40,
 144	RTL_GIGA_MAC_VER_41,
 145	RTL_GIGA_MAC_VER_42,
 146	RTL_GIGA_MAC_VER_43,
 147	RTL_GIGA_MAC_VER_44,
 
 
 
 
 
 
 
 148	RTL_GIGA_MAC_NONE   = 0xff,
 149};
 150
 151enum rtl_tx_desc_version {
 152	RTL_TD_0	= 0,
 153	RTL_TD_1	= 1,
 154};
 155
 156#define JUMBO_1K	ETH_DATA_LEN
 157#define JUMBO_4K	(4*1024 - ETH_HLEN - 2)
 158#define JUMBO_6K	(6*1024 - ETH_HLEN - 2)
 159#define JUMBO_7K	(7*1024 - ETH_HLEN - 2)
 160#define JUMBO_9K	(9*1024 - ETH_HLEN - 2)
 161
 162#define _R(NAME,TD,FW,SZ,B) {	\
 163	.name = NAME,		\
 164	.txd_version = TD,	\
 165	.fw_name = FW,		\
 166	.jumbo_max = SZ,	\
 167	.jumbo_tx_csum = B	\
 168}
 169
 170static const struct {
 171	const char *name;
 172	enum rtl_tx_desc_version txd_version;
 173	const char *fw_name;
 174	u16 jumbo_max;
 175	bool jumbo_tx_csum;
 176} rtl_chip_infos[] = {
 177	/* PCI devices. */
 178	[RTL_GIGA_MAC_VER_01] =
 179		_R("RTL8169",		RTL_TD_0, NULL, JUMBO_7K, true),
 180	[RTL_GIGA_MAC_VER_02] =
 181		_R("RTL8169s",		RTL_TD_0, NULL, JUMBO_7K, true),
 182	[RTL_GIGA_MAC_VER_03] =
 183		_R("RTL8110s",		RTL_TD_0, NULL, JUMBO_7K, true),
 184	[RTL_GIGA_MAC_VER_04] =
 185		_R("RTL8169sb/8110sb",	RTL_TD_0, NULL, JUMBO_7K, true),
 186	[RTL_GIGA_MAC_VER_05] =
 187		_R("RTL8169sc/8110sc",	RTL_TD_0, NULL, JUMBO_7K, true),
 188	[RTL_GIGA_MAC_VER_06] =
 189		_R("RTL8169sc/8110sc",	RTL_TD_0, NULL, JUMBO_7K, true),
 190	/* PCI-E devices. */
 191	[RTL_GIGA_MAC_VER_07] =
 192		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
 193	[RTL_GIGA_MAC_VER_08] =
 194		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
 195	[RTL_GIGA_MAC_VER_09] =
 196		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
 197	[RTL_GIGA_MAC_VER_10] =
 198		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
 199	[RTL_GIGA_MAC_VER_11] =
 200		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
 201	[RTL_GIGA_MAC_VER_12] =
 202		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
 203	[RTL_GIGA_MAC_VER_13] =
 204		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
 205	[RTL_GIGA_MAC_VER_14] =
 206		_R("RTL8100e",		RTL_TD_0, NULL, JUMBO_1K, true),
 207	[RTL_GIGA_MAC_VER_15] =
 208		_R("RTL8100e",		RTL_TD_0, NULL, JUMBO_1K, true),
 209	[RTL_GIGA_MAC_VER_16] =
 210		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
 211	[RTL_GIGA_MAC_VER_17] =
 212		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
 213	[RTL_GIGA_MAC_VER_18] =
 214		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
 215	[RTL_GIGA_MAC_VER_19] =
 216		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 217	[RTL_GIGA_MAC_VER_20] =
 218		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 219	[RTL_GIGA_MAC_VER_21] =
 220		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 221	[RTL_GIGA_MAC_VER_22] =
 222		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 223	[RTL_GIGA_MAC_VER_23] =
 224		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
 225	[RTL_GIGA_MAC_VER_24] =
 226		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
 227	[RTL_GIGA_MAC_VER_25] =
 228		_R("RTL8168d/8111d",	RTL_TD_1, FIRMWARE_8168D_1,
 229							JUMBO_9K, false),
 230	[RTL_GIGA_MAC_VER_26] =
 231		_R("RTL8168d/8111d",	RTL_TD_1, FIRMWARE_8168D_2,
 232							JUMBO_9K, false),
 233	[RTL_GIGA_MAC_VER_27] =
 234		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
 235	[RTL_GIGA_MAC_VER_28] =
 236		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
 237	[RTL_GIGA_MAC_VER_29] =
 238		_R("RTL8105e",		RTL_TD_1, FIRMWARE_8105E_1,
 239							JUMBO_1K, true),
 240	[RTL_GIGA_MAC_VER_30] =
 241		_R("RTL8105e",		RTL_TD_1, FIRMWARE_8105E_1,
 242							JUMBO_1K, true),
 243	[RTL_GIGA_MAC_VER_31] =
 244		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
 245	[RTL_GIGA_MAC_VER_32] =
 246		_R("RTL8168e/8111e",	RTL_TD_1, FIRMWARE_8168E_1,
 247							JUMBO_9K, false),
 248	[RTL_GIGA_MAC_VER_33] =
 249		_R("RTL8168e/8111e",	RTL_TD_1, FIRMWARE_8168E_2,
 250							JUMBO_9K, false),
 251	[RTL_GIGA_MAC_VER_34] =
 252		_R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
 253							JUMBO_9K, false),
 254	[RTL_GIGA_MAC_VER_35] =
 255		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_1,
 256							JUMBO_9K, false),
 257	[RTL_GIGA_MAC_VER_36] =
 258		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_2,
 259							JUMBO_9K, false),
 260	[RTL_GIGA_MAC_VER_37] =
 261		_R("RTL8402",		RTL_TD_1, FIRMWARE_8402_1,
 262							JUMBO_1K, true),
 263	[RTL_GIGA_MAC_VER_38] =
 264		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_1,
 265							JUMBO_9K, false),
 266	[RTL_GIGA_MAC_VER_39] =
 267		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_1,
 268							JUMBO_1K, true),
 269	[RTL_GIGA_MAC_VER_40] =
 270		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_2,
 271							JUMBO_9K, false),
 272	[RTL_GIGA_MAC_VER_41] =
 273		_R("RTL8168g/8111g",	RTL_TD_1, NULL, JUMBO_9K, false),
 274	[RTL_GIGA_MAC_VER_42] =
 275		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_3,
 276							JUMBO_9K, false),
 277	[RTL_GIGA_MAC_VER_43] =
 278		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_2,
 279							JUMBO_1K, true),
 280	[RTL_GIGA_MAC_VER_44] =
 281		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_2,
 282							JUMBO_9K, false),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283};
 284#undef _R
 285
 286enum cfg_version {
 287	RTL_CFG_0 = 0x00,
 288	RTL_CFG_1,
 289	RTL_CFG_2
 290};
 291
 292static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
 293	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
 294	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
 295	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
 296	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
 297	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
 298	{ PCI_VENDOR_ID_DLINK,			0x4300,
 299		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
 300	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
 301	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4302), 0, 0, RTL_CFG_0 },
 302	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },
 303	{ PCI_DEVICE(0x16ec,			0x0116), 0, 0, RTL_CFG_0 },
 304	{ PCI_VENDOR_ID_LINKSYS,		0x1032,
 305		PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
 306	{ 0x0001,				0x8168,
 307		PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
 308	{0,},
 309};
 310
 311MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 312
 313static int rx_buf_sz = 16383;
 314static int use_dac;
 315static struct {
 316	u32 msg_enable;
 317} debug = { -1 };
 318
 319enum rtl_registers {
 320	MAC0		= 0,	/* Ethernet hardware address. */
 321	MAC4		= 4,
 322	MAR0		= 8,	/* Multicast filter. */
 323	CounterAddrLow		= 0x10,
 324	CounterAddrHigh		= 0x14,
 325	TxDescStartAddrLow	= 0x20,
 326	TxDescStartAddrHigh	= 0x24,
 327	TxHDescStartAddrLow	= 0x28,
 328	TxHDescStartAddrHigh	= 0x2c,
 329	FLASH		= 0x30,
 330	ERSR		= 0x36,
 331	ChipCmd		= 0x37,
 332	TxPoll		= 0x38,
 333	IntrMask	= 0x3c,
 334	IntrStatus	= 0x3e,
 335
 336	TxConfig	= 0x40,
 337#define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
 338#define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
 339
 340	RxConfig	= 0x44,
 341#define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
 342#define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
 343#define	RXCFG_FIFO_SHIFT		13
 344					/* No threshold before first PCI xfer */
 345#define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
 346#define	RX_EARLY_OFF			(1 << 11)
 347#define	RXCFG_DMA_SHIFT			8
 348					/* Unlimited maximum PCI burst. */
 349#define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
 350
 351	RxMissed	= 0x4c,
 352	Cfg9346		= 0x50,
 353	Config0		= 0x51,
 354	Config1		= 0x52,
 355	Config2		= 0x53,
 356#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
 357
 358	Config3		= 0x54,
 359	Config4		= 0x55,
 360	Config5		= 0x56,
 361	MultiIntr	= 0x5c,
 362	PHYAR		= 0x60,
 363	PHYstatus	= 0x6c,
 364	RxMaxSize	= 0xda,
 365	CPlusCmd	= 0xe0,
 366	IntrMitigate	= 0xe2,
 367	RxDescAddrLow	= 0xe4,
 368	RxDescAddrHigh	= 0xe8,
 369	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
 370
 371#define NoEarlyTx	0x3f	/* Max value : no early transmit. */
 372
 373	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
 374
 375#define TxPacketMax	(8064 >> 7)
 376#define EarlySize	0x27
 377
 378	FuncEvent	= 0xf0,
 379	FuncEventMask	= 0xf4,
 380	FuncPresetState	= 0xf8,
 
 
 
 
 381	FuncForceEvent	= 0xfc,
 382};
 383
 384enum rtl8110_registers {
 385	TBICSR			= 0x64,
 386	TBI_ANAR		= 0x68,
 387	TBI_LPAR		= 0x6a,
 388};
 389
 390enum rtl8168_8101_registers {
 391	CSIDR			= 0x64,
 392	CSIAR			= 0x68,
 393#define	CSIAR_FLAG			0x80000000
 394#define	CSIAR_WRITE_CMD			0x80000000
 395#define	CSIAR_BYTE_ENABLE		0x0f
 396#define	CSIAR_BYTE_ENABLE_SHIFT		12
 397#define	CSIAR_ADDR_MASK			0x0fff
 398#define CSIAR_FUNC_CARD			0x00000000
 399#define CSIAR_FUNC_SDIO			0x00010000
 400#define CSIAR_FUNC_NIC			0x00020000
 401#define CSIAR_FUNC_NIC2			0x00010000
 402	PMCH			= 0x6f,
 403	EPHYAR			= 0x80,
 404#define	EPHYAR_FLAG			0x80000000
 405#define	EPHYAR_WRITE_CMD		0x80000000
 406#define	EPHYAR_REG_MASK			0x1f
 407#define	EPHYAR_REG_SHIFT		16
 408#define	EPHYAR_DATA_MASK		0xffff
 409	DLLPR			= 0xd0,
 410#define	PFM_EN				(1 << 6)
 
 411	DBG_REG			= 0xd1,
 412#define	FIX_NAK_1			(1 << 4)
 413#define	FIX_NAK_2			(1 << 3)
 414	TWSI			= 0xd2,
 415	MCU			= 0xd3,
 416#define	NOW_IS_OOB			(1 << 7)
 417#define	TX_EMPTY			(1 << 5)
 418#define	RX_EMPTY			(1 << 4)
 419#define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
 420#define	EN_NDP				(1 << 3)
 421#define	EN_OOB_RESET			(1 << 2)
 422#define	LINK_LIST_RDY			(1 << 1)
 423	EFUSEAR			= 0xdc,
 424#define	EFUSEAR_FLAG			0x80000000
 425#define	EFUSEAR_WRITE_CMD		0x80000000
 426#define	EFUSEAR_READ_CMD		0x00000000
 427#define	EFUSEAR_REG_MASK		0x03ff
 428#define	EFUSEAR_REG_SHIFT		8
 429#define	EFUSEAR_DATA_MASK		0xff
 
 
 430};
 431
 432enum rtl8168_registers {
 433	LED_FREQ		= 0x1a,
 434	EEE_LED			= 0x1b,
 435	ERIDR			= 0x70,
 436	ERIAR			= 0x74,
 437#define ERIAR_FLAG			0x80000000
 438#define ERIAR_WRITE_CMD			0x80000000
 439#define ERIAR_READ_CMD			0x00000000
 440#define ERIAR_ADDR_BYTE_ALIGN		4
 441#define ERIAR_TYPE_SHIFT		16
 442#define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
 443#define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
 444#define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
 
 445#define ERIAR_MASK_SHIFT		12
 446#define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
 447#define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
 
 448#define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
 449#define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
 450	EPHY_RXER_NUM		= 0x7c,
 451	OCPDR			= 0xb0,	/* OCP GPHY access */
 452#define OCPDR_WRITE_CMD			0x80000000
 453#define OCPDR_READ_CMD			0x00000000
 454#define OCPDR_REG_MASK			0x7f
 455#define OCPDR_GPHY_REG_SHIFT		16
 456#define OCPDR_DATA_MASK			0xffff
 457	OCPAR			= 0xb4,
 458#define OCPAR_FLAG			0x80000000
 459#define OCPAR_GPHY_WRITE_CMD		0x8000f060
 460#define OCPAR_GPHY_READ_CMD		0x0000f060
 461	GPHY_OCP		= 0xb8,
 462	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
 463	MISC			= 0xf0,	/* 8168e only. */
 464#define TXPLA_RST			(1 << 29)
 465#define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
 466#define PWM_EN				(1 << 22)
 467#define RXDV_GATED_EN			(1 << 19)
 468#define EARLY_TALLY_EN			(1 << 16)
 469};
 470
 471enum rtl_register_content {
 472	/* InterruptStatusBits */
 473	SYSErr		= 0x8000,
 474	PCSTimeout	= 0x4000,
 475	SWInt		= 0x0100,
 476	TxDescUnavail	= 0x0080,
 477	RxFIFOOver	= 0x0040,
 478	LinkChg		= 0x0020,
 479	RxOverflow	= 0x0010,
 480	TxErr		= 0x0008,
 481	TxOK		= 0x0004,
 482	RxErr		= 0x0002,
 483	RxOK		= 0x0001,
 484
 485	/* RxStatusDesc */
 486	RxBOVF	= (1 << 24),
 487	RxFOVF	= (1 << 23),
 488	RxRWT	= (1 << 22),
 489	RxRES	= (1 << 21),
 490	RxRUNT	= (1 << 20),
 491	RxCRC	= (1 << 19),
 492
 493	/* ChipCmdBits */
 494	StopReq		= 0x80,
 495	CmdReset	= 0x10,
 496	CmdRxEnb	= 0x08,
 497	CmdTxEnb	= 0x04,
 498	RxBufEmpty	= 0x01,
 499
 500	/* TXPoll register p.5 */
 501	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
 502	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
 503	FSWInt		= 0x01,		/* Forced software interrupt */
 504
 505	/* Cfg9346Bits */
 506	Cfg9346_Lock	= 0x00,
 507	Cfg9346_Unlock	= 0xc0,
 508
 509	/* rx_mode_bits */
 510	AcceptErr	= 0x20,
 511	AcceptRunt	= 0x10,
 512	AcceptBroadcast	= 0x08,
 513	AcceptMulticast	= 0x04,
 514	AcceptMyPhys	= 0x02,
 515	AcceptAllPhys	= 0x01,
 516#define RX_CONFIG_ACCEPT_MASK		0x3f
 517
 518	/* TxConfigBits */
 519	TxInterFrameGapShift = 24,
 520	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
 521
 522	/* Config1 register p.24 */
 523	LEDS1		= (1 << 7),
 524	LEDS0		= (1 << 6),
 525	Speed_down	= (1 << 4),
 526	MEMMAP		= (1 << 3),
 527	IOMAP		= (1 << 2),
 528	VPD		= (1 << 1),
 529	PMEnable	= (1 << 0),	/* Power Management Enable */
 530
 531	/* Config2 register p. 25 */
 532	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
 533	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
 534	PCI_Clock_66MHz = 0x01,
 535	PCI_Clock_33MHz = 0x00,
 536
 537	/* Config3 register p.25 */
 538	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
 539	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
 540	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
 
 541	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
 542
 543	/* Config4 register */
 544	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
 545
 546	/* Config5 register p.27 */
 547	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
 548	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
 549	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
 550	Spi_en		= (1 << 3),
 551	LanWake		= (1 << 1),	/* LanWake enable/disable */
 552	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
 553	ASPM_en		= (1 << 0),	/* ASPM enable */
 554
 555	/* TBICSR p.28 */
 556	TBIReset	= 0x80000000,
 557	TBILoopback	= 0x40000000,
 558	TBINwEnable	= 0x20000000,
 559	TBINwRestart	= 0x10000000,
 560	TBILinkOk	= 0x02000000,
 561	TBINwComplete	= 0x01000000,
 562
 563	/* CPlusCmd p.31 */
 564	EnableBist	= (1 << 15),	// 8168 8101
 565	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
 566	Normal_mode	= (1 << 13),	// unused
 567	Force_half_dup	= (1 << 12),	// 8168 8101
 568	Force_rxflow_en	= (1 << 11),	// 8168 8101
 569	Force_txflow_en	= (1 << 10),	// 8168 8101
 570	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
 571	ASF		= (1 << 8),	// 8168 8101
 572	PktCntrDisable	= (1 << 7),	// 8168 8101
 573	Mac_dbgo_sel	= 0x001c,	// 8168
 574	RxVlan		= (1 << 6),
 575	RxChkSum	= (1 << 5),
 576	PCIDAC		= (1 << 4),
 577	PCIMulRW	= (1 << 3),
 578	INTT_0		= 0x0000,	// 8168
 579	INTT_1		= 0x0001,	// 8168
 580	INTT_2		= 0x0002,	// 8168
 581	INTT_3		= 0x0003,	// 8168
 582
 583	/* rtl8169_PHYstatus */
 584	TBI_Enable	= 0x80,
 585	TxFlowCtrl	= 0x40,
 586	RxFlowCtrl	= 0x20,
 587	_1000bpsF	= 0x10,
 588	_100bps		= 0x08,
 589	_10bps		= 0x04,
 590	LinkStatus	= 0x02,
 591	FullDup		= 0x01,
 592
 593	/* _TBICSRBit */
 594	TBILinkOK	= 0x02000000,
 595
 
 
 
 596	/* DumpCounterCommand */
 597	CounterDump	= 0x8,
 
 
 
 598};
 599
 600enum rtl_desc_bit {
 601	/* First doubleword. */
 602	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
 603	RingEnd		= (1 << 30), /* End of descriptor ring */
 604	FirstFrag	= (1 << 29), /* First segment of a packet */
 605	LastFrag	= (1 << 28), /* Final segment of a packet */
 606};
 607
 608/* Generic case. */
 609enum rtl_tx_desc_bit {
 610	/* First doubleword. */
 611	TD_LSO		= (1 << 27),		/* Large Send Offload */
 612#define TD_MSS_MAX			0x07ffu	/* MSS value */
 613
 614	/* Second doubleword. */
 615	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
 616};
 617
 618/* 8169, 8168b and 810x except 8102e. */
 619enum rtl_tx_desc_bit_0 {
 620	/* First doubleword. */
 621#define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
 622	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
 623	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
 624	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
 625};
 626
 627/* 8102e, 8168c and beyond. */
 628enum rtl_tx_desc_bit_1 {
 
 
 
 
 
 
 629	/* Second doubleword. */
 
 
 630#define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
 631	TD1_IP_CS	= (1 << 29),		/* Calculate IP checksum */
 
 632	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
 633	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
 634};
 635
 636static const struct rtl_tx_desc_info {
 637	struct {
 638		u32 udp;
 639		u32 tcp;
 640	} checksum;
 641	u16 mss_shift;
 642	u16 opts_offset;
 643} tx_desc_info [] = {
 644	[RTL_TD_0] = {
 645		.checksum = {
 646			.udp	= TD0_IP_CS | TD0_UDP_CS,
 647			.tcp	= TD0_IP_CS | TD0_TCP_CS
 648		},
 649		.mss_shift	= TD0_MSS_SHIFT,
 650		.opts_offset	= 0
 651	},
 652	[RTL_TD_1] = {
 653		.checksum = {
 654			.udp	= TD1_IP_CS | TD1_UDP_CS,
 655			.tcp	= TD1_IP_CS | TD1_TCP_CS
 656		},
 657		.mss_shift	= TD1_MSS_SHIFT,
 658		.opts_offset	= 1
 659	}
 660};
 661
 662enum rtl_rx_desc_bit {
 663	/* Rx private */
 664	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
 665	PID0		= (1 << 17), /* Protocol ID bit 2/2 */
 666
 667#define RxProtoUDP	(PID1)
 668#define RxProtoTCP	(PID0)
 669#define RxProtoIP	(PID1 | PID0)
 670#define RxProtoMask	RxProtoIP
 671
 672	IPFail		= (1 << 16), /* IP checksum failed */
 673	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
 674	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
 675	RxVlanTag	= (1 << 16), /* VLAN tag available */
 676};
 677
 678#define RsvdMask	0x3fffc000
 679
 680struct TxDesc {
 681	__le32 opts1;
 682	__le32 opts2;
 683	__le64 addr;
 684};
 685
 686struct RxDesc {
 687	__le32 opts1;
 688	__le32 opts2;
 689	__le64 addr;
 690};
 691
 692struct ring_info {
 693	struct sk_buff	*skb;
 694	u32		len;
 695	u8		__pad[sizeof(void *) - sizeof(u32)];
 696};
 697
 698enum features {
 699	RTL_FEATURE_WOL		= (1 << 0),
 700	RTL_FEATURE_MSI		= (1 << 1),
 701	RTL_FEATURE_GMII	= (1 << 2),
 702};
 703
 704struct rtl8169_counters {
 705	__le64	tx_packets;
 706	__le64	rx_packets;
 707	__le64	tx_errors;
 708	__le32	rx_errors;
 709	__le16	rx_missed;
 710	__le16	align_errors;
 711	__le32	tx_one_collision;
 712	__le32	tx_multi_collision;
 713	__le64	rx_unicast;
 714	__le64	rx_broadcast;
 715	__le32	rx_multicast;
 716	__le16	tx_aborted;
 717	__le16	tx_underun;
 718};
 719
 
 
 
 
 
 
 
 720enum rtl_flag {
 721	RTL_FLAG_TASK_ENABLED,
 722	RTL_FLAG_TASK_SLOW_PENDING,
 723	RTL_FLAG_TASK_RESET_PENDING,
 724	RTL_FLAG_TASK_PHY_PENDING,
 725	RTL_FLAG_MAX
 726};
 727
 728struct rtl8169_stats {
 729	u64			packets;
 730	u64			bytes;
 731	struct u64_stats_sync	syncp;
 732};
 733
 734struct rtl8169_private {
 735	void __iomem *mmio_addr;	/* memory map physical address */
 736	struct pci_dev *pci_dev;
 737	struct net_device *dev;
 738	struct napi_struct napi;
 739	u32 msg_enable;
 740	u16 txd_version;
 741	u16 mac_version;
 742	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
 743	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
 744	u32 dirty_tx;
 745	struct rtl8169_stats rx_stats;
 746	struct rtl8169_stats tx_stats;
 747	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
 748	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
 749	dma_addr_t TxPhyAddr;
 750	dma_addr_t RxPhyAddr;
 751	void *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
 752	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
 753	struct timer_list timer;
 754	u16 cp_cmd;
 755
 756	u16 event_slow;
 757
 758	struct mdio_ops {
 759		void (*write)(struct rtl8169_private *, int, int);
 760		int (*read)(struct rtl8169_private *, int);
 761	} mdio_ops;
 762
 763	struct pll_power_ops {
 764		void (*down)(struct rtl8169_private *);
 765		void (*up)(struct rtl8169_private *);
 766	} pll_power_ops;
 767
 768	struct jumbo_ops {
 769		void (*enable)(struct rtl8169_private *);
 770		void (*disable)(struct rtl8169_private *);
 771	} jumbo_ops;
 772
 773	struct csi_ops {
 774		void (*write)(struct rtl8169_private *, int, int);
 775		u32 (*read)(struct rtl8169_private *, int);
 776	} csi_ops;
 777
 778	int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
 779	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
 780	void (*phy_reset_enable)(struct rtl8169_private *tp);
 781	void (*hw_start)(struct net_device *);
 782	unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
 783	unsigned int (*link_ok)(void __iomem *);
 784	int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
 
 785
 786	struct {
 787		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
 788		struct mutex mutex;
 789		struct work_struct work;
 790	} wk;
 791
 792	unsigned features;
 793
 794	struct mii_if_info mii;
 795	struct rtl8169_counters counters;
 
 
 796	u32 saved_wolopts;
 797	u32 opts1_mask;
 798
 799	struct rtl_fw {
 800		const struct firmware *fw;
 801
 802#define RTL_VER_SIZE		32
 803
 804		char version[RTL_VER_SIZE];
 805
 806		struct rtl_fw_phy_action {
 807			__le32 *code;
 808			size_t size;
 809		} phy_action;
 810	} *rtl_fw;
 811#define RTL_FIRMWARE_UNKNOWN	ERR_PTR(-EAGAIN)
 812
 813	u32 ocp_base;
 814};
 815
 816MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 817MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 818module_param(use_dac, int, 0);
 819MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 820module_param_named(debug, debug.msg_enable, int, 0);
 821MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
 822MODULE_LICENSE("GPL");
 823MODULE_VERSION(RTL8169_VERSION);
 824MODULE_FIRMWARE(FIRMWARE_8168D_1);
 825MODULE_FIRMWARE(FIRMWARE_8168D_2);
 826MODULE_FIRMWARE(FIRMWARE_8168E_1);
 827MODULE_FIRMWARE(FIRMWARE_8168E_2);
 828MODULE_FIRMWARE(FIRMWARE_8168E_3);
 829MODULE_FIRMWARE(FIRMWARE_8105E_1);
 830MODULE_FIRMWARE(FIRMWARE_8168F_1);
 831MODULE_FIRMWARE(FIRMWARE_8168F_2);
 832MODULE_FIRMWARE(FIRMWARE_8402_1);
 833MODULE_FIRMWARE(FIRMWARE_8411_1);
 834MODULE_FIRMWARE(FIRMWARE_8411_2);
 835MODULE_FIRMWARE(FIRMWARE_8106E_1);
 836MODULE_FIRMWARE(FIRMWARE_8106E_2);
 837MODULE_FIRMWARE(FIRMWARE_8168G_2);
 838MODULE_FIRMWARE(FIRMWARE_8168G_3);
 
 
 
 
 839
 840static void rtl_lock_work(struct rtl8169_private *tp)
 841{
 842	mutex_lock(&tp->wk.mutex);
 843}
 844
 845static void rtl_unlock_work(struct rtl8169_private *tp)
 846{
 847	mutex_unlock(&tp->wk.mutex);
 848}
 849
 850static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
 851{
 852	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 853					   PCI_EXP_DEVCTL_READRQ, force);
 854}
 855
 856struct rtl_cond {
 857	bool (*check)(struct rtl8169_private *);
 858	const char *msg;
 859};
 860
 861static void rtl_udelay(unsigned int d)
 862{
 863	udelay(d);
 864}
 865
 866static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
 867			  void (*delay)(unsigned int), unsigned int d, int n,
 868			  bool high)
 869{
 870	int i;
 871
 872	for (i = 0; i < n; i++) {
 873		delay(d);
 874		if (c->check(tp) == high)
 875			return true;
 876	}
 877	netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
 878		  c->msg, !high, n, d);
 879	return false;
 880}
 881
 882static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
 883				      const struct rtl_cond *c,
 884				      unsigned int d, int n)
 885{
 886	return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
 887}
 888
 889static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
 890				     const struct rtl_cond *c,
 891				     unsigned int d, int n)
 892{
 893	return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
 894}
 895
 896static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
 897				      const struct rtl_cond *c,
 898				      unsigned int d, int n)
 899{
 900	return rtl_loop_wait(tp, c, msleep, d, n, true);
 901}
 902
 903static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
 904				     const struct rtl_cond *c,
 905				     unsigned int d, int n)
 906{
 907	return rtl_loop_wait(tp, c, msleep, d, n, false);
 908}
 909
 910#define DECLARE_RTL_COND(name)				\
 911static bool name ## _check(struct rtl8169_private *);	\
 912							\
 913static const struct rtl_cond name = {			\
 914	.check	= name ## _check,			\
 915	.msg	= #name					\
 916};							\
 917							\
 918static bool name ## _check(struct rtl8169_private *tp)
 919
 920DECLARE_RTL_COND(rtl_ocpar_cond)
 921{
 922	void __iomem *ioaddr = tp->mmio_addr;
 923
 924	return RTL_R32(OCPAR) & OCPAR_FLAG;
 925}
 926
 927static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
 928{
 929	void __iomem *ioaddr = tp->mmio_addr;
 930
 931	RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
 932
 933	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
 934		RTL_R32(OCPDR) : ~0;
 935}
 936
 937static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
 938{
 939	void __iomem *ioaddr = tp->mmio_addr;
 940
 941	RTL_W32(OCPDR, data);
 942	RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
 943
 944	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
 945}
 946
 947DECLARE_RTL_COND(rtl_eriar_cond)
 948{
 949	void __iomem *ioaddr = tp->mmio_addr;
 950
 951	return RTL_R32(ERIAR) & ERIAR_FLAG;
 952}
 953
 954static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
 955{
 956	void __iomem *ioaddr = tp->mmio_addr;
 957
 958	RTL_W8(ERIDR, cmd);
 959	RTL_W32(ERIAR, 0x800010e8);
 960	msleep(2);
 961
 962	if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
 963		return;
 964
 965	ocp_write(tp, 0x1, 0x30, 0x00000001);
 966}
 967
 968#define OOB_CMD_RESET		0x00
 969#define OOB_CMD_DRIVER_START	0x05
 970#define OOB_CMD_DRIVER_STOP	0x06
 971
 972static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
 973{
 974	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
 975}
 976
 977DECLARE_RTL_COND(rtl_ocp_read_cond)
 978{
 979	u16 reg;
 980
 981	reg = rtl8168_get_ocp_reg(tp);
 982
 983	return ocp_read(tp, 0x0f, reg) & 0x00000800;
 984}
 985
 986static void rtl8168_driver_start(struct rtl8169_private *tp)
 987{
 988	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
 989
 990	rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
 991}
 992
 993static void rtl8168_driver_stop(struct rtl8169_private *tp)
 994{
 995	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
 996
 997	rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
 998}
 999
1000static int r8168dp_check_dash(struct rtl8169_private *tp)
1001{
1002	u16 reg = rtl8168_get_ocp_reg(tp);
1003
1004	return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
1005}
1006
1007static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
1008{
1009	if (reg & 0xffff0001) {
1010		netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
1011		return true;
1012	}
1013	return false;
1014}
1015
1016DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1017{
1018	void __iomem *ioaddr = tp->mmio_addr;
1019
1020	return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1021}
1022
1023static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1024{
1025	void __iomem *ioaddr = tp->mmio_addr;
1026
1027	if (rtl_ocp_reg_failure(tp, reg))
1028		return;
1029
1030	RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1031
1032	rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1033}
1034
1035static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1036{
1037	void __iomem *ioaddr = tp->mmio_addr;
1038
1039	if (rtl_ocp_reg_failure(tp, reg))
1040		return 0;
1041
1042	RTL_W32(GPHY_OCP, reg << 15);
1043
1044	return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1045		(RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1046}
1047
1048static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1049{
1050	void __iomem *ioaddr = tp->mmio_addr;
1051
1052	if (rtl_ocp_reg_failure(tp, reg))
1053		return;
1054
1055	RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1056}
1057
1058static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1059{
1060	void __iomem *ioaddr = tp->mmio_addr;
1061
1062	if (rtl_ocp_reg_failure(tp, reg))
1063		return 0;
1064
1065	RTL_W32(OCPDR, reg << 15);
1066
1067	return RTL_R32(OCPDR);
1068}
1069
1070#define OCP_STD_PHY_BASE	0xa400
1071
1072static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1073{
1074	if (reg == 0x1f) {
1075		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1076		return;
1077	}
1078
1079	if (tp->ocp_base != OCP_STD_PHY_BASE)
1080		reg -= 0x10;
1081
1082	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1083}
1084
1085static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1086{
1087	if (tp->ocp_base != OCP_STD_PHY_BASE)
1088		reg -= 0x10;
1089
1090	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1091}
1092
1093static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1094{
1095	if (reg == 0x1f) {
1096		tp->ocp_base = value << 4;
1097		return;
1098	}
1099
1100	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1101}
1102
1103static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1104{
1105	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1106}
1107
1108DECLARE_RTL_COND(rtl_phyar_cond)
1109{
1110	void __iomem *ioaddr = tp->mmio_addr;
1111
1112	return RTL_R32(PHYAR) & 0x80000000;
1113}
1114
1115static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1116{
1117	void __iomem *ioaddr = tp->mmio_addr;
1118
1119	RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1120
1121	rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1122	/*
1123	 * According to hardware specs a 20us delay is required after write
1124	 * complete indication, but before sending next command.
1125	 */
1126	udelay(20);
1127}
1128
1129static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1130{
1131	void __iomem *ioaddr = tp->mmio_addr;
1132	int value;
1133
1134	RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1135
1136	value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1137		RTL_R32(PHYAR) & 0xffff : ~0;
1138
1139	/*
1140	 * According to hardware specs a 20us delay is required after read
1141	 * complete indication, but before sending next command.
1142	 */
1143	udelay(20);
1144
1145	return value;
1146}
1147
 
 
 
 
 
 
 
1148static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1149{
1150	void __iomem *ioaddr = tp->mmio_addr;
1151
1152	RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1153	RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1154	RTL_W32(EPHY_RXER_NUM, 0);
1155
1156	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1157}
1158
1159static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1160{
1161	r8168dp_1_mdio_access(tp, reg,
1162			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1163}
1164
1165static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1166{
1167	void __iomem *ioaddr = tp->mmio_addr;
1168
1169	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1170
1171	mdelay(1);
1172	RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1173	RTL_W32(EPHY_RXER_NUM, 0);
1174
1175	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1176		RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1177}
1178
1179#define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
1180
1181static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1182{
1183	RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1184}
1185
1186static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1187{
1188	RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1189}
1190
1191static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1192{
1193	void __iomem *ioaddr = tp->mmio_addr;
1194
1195	r8168dp_2_mdio_start(ioaddr);
1196
1197	r8169_mdio_write(tp, reg, value);
1198
1199	r8168dp_2_mdio_stop(ioaddr);
1200}
1201
1202static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1203{
1204	void __iomem *ioaddr = tp->mmio_addr;
1205	int value;
1206
1207	r8168dp_2_mdio_start(ioaddr);
1208
1209	value = r8169_mdio_read(tp, reg);
1210
1211	r8168dp_2_mdio_stop(ioaddr);
1212
1213	return value;
1214}
1215
1216static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1217{
1218	tp->mdio_ops.write(tp, location, val);
1219}
1220
1221static int rtl_readphy(struct rtl8169_private *tp, int location)
1222{
1223	return tp->mdio_ops.read(tp, location);
1224}
1225
1226static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1227{
1228	rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1229}
1230
1231static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1232{
1233	int val;
1234
1235	val = rtl_readphy(tp, reg_addr);
1236	rtl_writephy(tp, reg_addr, (val | p) & ~m);
1237}
1238
1239static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1240			   int val)
1241{
1242	struct rtl8169_private *tp = netdev_priv(dev);
1243
1244	rtl_writephy(tp, location, val);
1245}
1246
1247static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1248{
1249	struct rtl8169_private *tp = netdev_priv(dev);
1250
1251	return rtl_readphy(tp, location);
1252}
1253
1254DECLARE_RTL_COND(rtl_ephyar_cond)
1255{
1256	void __iomem *ioaddr = tp->mmio_addr;
1257
1258	return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1259}
1260
1261static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1262{
1263	void __iomem *ioaddr = tp->mmio_addr;
1264
1265	RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1266		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1267
1268	rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1269
1270	udelay(10);
1271}
1272
1273static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1274{
1275	void __iomem *ioaddr = tp->mmio_addr;
1276
1277	RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1278
1279	return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1280		RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1281}
1282
 
 
 
 
 
 
 
1283static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1284			  u32 val, int type)
1285{
1286	void __iomem *ioaddr = tp->mmio_addr;
1287
1288	BUG_ON((addr & 3) || (mask == 0));
1289	RTL_W32(ERIDR, val);
1290	RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1291
1292	rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1293}
1294
1295static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1296{
1297	void __iomem *ioaddr = tp->mmio_addr;
1298
1299	RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1300
1301	return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1302		RTL_R32(ERIDR) : ~0;
1303}
1304
1305static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1306			 u32 m, int type)
1307{
1308	u32 val;
1309
1310	val = rtl_eri_read(tp, addr, type);
1311	rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1312}
1313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314struct exgmac_reg {
1315	u16 addr;
1316	u16 mask;
1317	u32 val;
1318};
1319
1320static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1321				   const struct exgmac_reg *r, int len)
1322{
1323	while (len-- > 0) {
1324		rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1325		r++;
1326	}
1327}
1328
1329DECLARE_RTL_COND(rtl_efusear_cond)
1330{
1331	void __iomem *ioaddr = tp->mmio_addr;
1332
1333	return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1334}
1335
1336static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1337{
1338	void __iomem *ioaddr = tp->mmio_addr;
1339
1340	RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1341
1342	return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1343		RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1344}
1345
1346static u16 rtl_get_events(struct rtl8169_private *tp)
1347{
1348	void __iomem *ioaddr = tp->mmio_addr;
1349
1350	return RTL_R16(IntrStatus);
1351}
1352
1353static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1354{
1355	void __iomem *ioaddr = tp->mmio_addr;
1356
1357	RTL_W16(IntrStatus, bits);
1358	mmiowb();
1359}
1360
1361static void rtl_irq_disable(struct rtl8169_private *tp)
1362{
1363	void __iomem *ioaddr = tp->mmio_addr;
1364
1365	RTL_W16(IntrMask, 0);
1366	mmiowb();
1367}
1368
1369static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1370{
1371	void __iomem *ioaddr = tp->mmio_addr;
1372
1373	RTL_W16(IntrMask, bits);
1374}
1375
1376#define RTL_EVENT_NAPI_RX	(RxOK | RxErr)
1377#define RTL_EVENT_NAPI_TX	(TxOK | TxErr)
1378#define RTL_EVENT_NAPI		(RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1379
1380static void rtl_irq_enable_all(struct rtl8169_private *tp)
1381{
1382	rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1383}
1384
1385static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1386{
1387	void __iomem *ioaddr = tp->mmio_addr;
1388
1389	rtl_irq_disable(tp);
1390	rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1391	RTL_R8(ChipCmd);
1392}
1393
1394static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1395{
1396	void __iomem *ioaddr = tp->mmio_addr;
1397
1398	return RTL_R32(TBICSR) & TBIReset;
1399}
1400
1401static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1402{
1403	return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1404}
1405
1406static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1407{
1408	return RTL_R32(TBICSR) & TBILinkOk;
1409}
1410
1411static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1412{
1413	return RTL_R8(PHYstatus) & LinkStatus;
1414}
1415
1416static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1417{
1418	void __iomem *ioaddr = tp->mmio_addr;
1419
1420	RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1421}
1422
1423static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1424{
1425	unsigned int val;
1426
1427	val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1428	rtl_writephy(tp, MII_BMCR, val & 0xffff);
1429}
1430
1431static void rtl_link_chg_patch(struct rtl8169_private *tp)
1432{
1433	void __iomem *ioaddr = tp->mmio_addr;
1434	struct net_device *dev = tp->dev;
1435
1436	if (!netif_running(dev))
1437		return;
1438
1439	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1440	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1441		if (RTL_R8(PHYstatus) & _1000bpsF) {
1442			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1443				      ERIAR_EXGMAC);
1444			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1445				      ERIAR_EXGMAC);
1446		} else if (RTL_R8(PHYstatus) & _100bps) {
1447			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1448				      ERIAR_EXGMAC);
1449			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1450				      ERIAR_EXGMAC);
1451		} else {
1452			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1453				      ERIAR_EXGMAC);
1454			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1455				      ERIAR_EXGMAC);
1456		}
1457		/* Reset packet filter */
1458		rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1459			     ERIAR_EXGMAC);
1460		rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1461			     ERIAR_EXGMAC);
1462	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1463		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1464		if (RTL_R8(PHYstatus) & _1000bpsF) {
1465			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1466				      ERIAR_EXGMAC);
1467			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1468				      ERIAR_EXGMAC);
1469		} else {
1470			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1471				      ERIAR_EXGMAC);
1472			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1473				      ERIAR_EXGMAC);
1474		}
1475	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1476		if (RTL_R8(PHYstatus) & _10bps) {
1477			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1478				      ERIAR_EXGMAC);
1479			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1480				      ERIAR_EXGMAC);
1481		} else {
1482			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1483				      ERIAR_EXGMAC);
1484		}
1485	}
1486}
1487
1488static void __rtl8169_check_link_status(struct net_device *dev,
1489					struct rtl8169_private *tp,
1490					void __iomem *ioaddr, bool pm)
1491{
1492	if (tp->link_ok(ioaddr)) {
1493		rtl_link_chg_patch(tp);
1494		/* This is to cancel a scheduled suspend if there's one. */
1495		if (pm)
1496			pm_request_resume(&tp->pci_dev->dev);
1497		netif_carrier_on(dev);
1498		if (net_ratelimit())
1499			netif_info(tp, ifup, dev, "link up\n");
1500	} else {
1501		netif_carrier_off(dev);
1502		netif_info(tp, ifdown, dev, "link down\n");
1503		if (pm)
1504			pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1505	}
1506}
1507
1508static void rtl8169_check_link_status(struct net_device *dev,
1509				      struct rtl8169_private *tp,
1510				      void __iomem *ioaddr)
1511{
1512	__rtl8169_check_link_status(dev, tp, ioaddr, false);
1513}
1514
1515#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1516
1517static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1518{
1519	void __iomem *ioaddr = tp->mmio_addr;
1520	u8 options;
1521	u32 wolopts = 0;
1522
1523	options = RTL_R8(Config1);
1524	if (!(options & PMEnable))
1525		return 0;
1526
1527	options = RTL_R8(Config3);
1528	if (options & LinkUp)
1529		wolopts |= WAKE_PHY;
1530	if (options & MagicPacket)
1531		wolopts |= WAKE_MAGIC;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1532
1533	options = RTL_R8(Config5);
1534	if (options & UWF)
1535		wolopts |= WAKE_UCAST;
1536	if (options & BWF)
1537		wolopts |= WAKE_BCAST;
1538	if (options & MWF)
1539		wolopts |= WAKE_MCAST;
1540
1541	return wolopts;
1542}
1543
1544static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1545{
1546	struct rtl8169_private *tp = netdev_priv(dev);
1547
1548	rtl_lock_work(tp);
1549
1550	wol->supported = WAKE_ANY;
1551	wol->wolopts = __rtl8169_get_wol(tp);
1552
1553	rtl_unlock_work(tp);
1554}
1555
1556static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1557{
1558	void __iomem *ioaddr = tp->mmio_addr;
1559	unsigned int i;
1560	static const struct {
1561		u32 opt;
1562		u16 reg;
1563		u8  mask;
1564	} cfg[] = {
1565		{ WAKE_PHY,   Config3, LinkUp },
1566		{ WAKE_MAGIC, Config3, MagicPacket },
1567		{ WAKE_UCAST, Config5, UWF },
1568		{ WAKE_BCAST, Config5, BWF },
1569		{ WAKE_MCAST, Config5, MWF },
1570		{ WAKE_ANY,   Config5, LanWake }
 
1571	};
1572	u8 options;
1573
1574	RTL_W8(Cfg9346, Cfg9346_Unlock);
1575
1576	for (i = 0; i < ARRAY_SIZE(cfg); i++) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1577		options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1578		if (wolopts & cfg[i].opt)
1579			options |= cfg[i].mask;
1580		RTL_W8(cfg[i].reg, options);
1581	}
1582
1583	switch (tp->mac_version) {
1584	case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1585		options = RTL_R8(Config1) & ~PMEnable;
1586		if (wolopts)
1587			options |= PMEnable;
1588		RTL_W8(Config1, options);
1589		break;
1590	default:
1591		options = RTL_R8(Config2) & ~PME_SIGNAL;
1592		if (wolopts)
1593			options |= PME_SIGNAL;
1594		RTL_W8(Config2, options);
1595		break;
1596	}
1597
1598	RTL_W8(Cfg9346, Cfg9346_Lock);
1599}
1600
1601static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1602{
1603	struct rtl8169_private *tp = netdev_priv(dev);
1604
1605	rtl_lock_work(tp);
1606
1607	if (wol->wolopts)
1608		tp->features |= RTL_FEATURE_WOL;
1609	else
1610		tp->features &= ~RTL_FEATURE_WOL;
1611	__rtl8169_set_wol(tp, wol->wolopts);
1612
1613	rtl_unlock_work(tp);
1614
1615	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1616
1617	return 0;
1618}
1619
1620static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1621{
1622	return rtl_chip_infos[tp->mac_version].fw_name;
1623}
1624
1625static void rtl8169_get_drvinfo(struct net_device *dev,
1626				struct ethtool_drvinfo *info)
1627{
1628	struct rtl8169_private *tp = netdev_priv(dev);
1629	struct rtl_fw *rtl_fw = tp->rtl_fw;
1630
1631	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1632	strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1633	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1634	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1635	if (!IS_ERR_OR_NULL(rtl_fw))
1636		strlcpy(info->fw_version, rtl_fw->version,
1637			sizeof(info->fw_version));
1638}
1639
1640static int rtl8169_get_regs_len(struct net_device *dev)
1641{
1642	return R8169_REGS_SIZE;
1643}
1644
1645static int rtl8169_set_speed_tbi(struct net_device *dev,
1646				 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1647{
1648	struct rtl8169_private *tp = netdev_priv(dev);
1649	void __iomem *ioaddr = tp->mmio_addr;
1650	int ret = 0;
1651	u32 reg;
1652
1653	reg = RTL_R32(TBICSR);
1654	if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1655	    (duplex == DUPLEX_FULL)) {
1656		RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1657	} else if (autoneg == AUTONEG_ENABLE)
1658		RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1659	else {
1660		netif_warn(tp, link, dev,
1661			   "incorrect speed setting refused in TBI mode\n");
1662		ret = -EOPNOTSUPP;
1663	}
1664
1665	return ret;
1666}
1667
1668static int rtl8169_set_speed_xmii(struct net_device *dev,
1669				  u8 autoneg, u16 speed, u8 duplex, u32 adv)
1670{
1671	struct rtl8169_private *tp = netdev_priv(dev);
1672	int giga_ctrl, bmcr;
1673	int rc = -EINVAL;
1674
1675	rtl_writephy(tp, 0x1f, 0x0000);
1676
1677	if (autoneg == AUTONEG_ENABLE) {
1678		int auto_nego;
1679
1680		auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1681		auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1682				ADVERTISE_100HALF | ADVERTISE_100FULL);
1683
1684		if (adv & ADVERTISED_10baseT_Half)
1685			auto_nego |= ADVERTISE_10HALF;
1686		if (adv & ADVERTISED_10baseT_Full)
1687			auto_nego |= ADVERTISE_10FULL;
1688		if (adv & ADVERTISED_100baseT_Half)
1689			auto_nego |= ADVERTISE_100HALF;
1690		if (adv & ADVERTISED_100baseT_Full)
1691			auto_nego |= ADVERTISE_100FULL;
1692
1693		auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1694
1695		giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1696		giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1697
1698		/* The 8100e/8101e/8102e do Fast Ethernet only. */
1699		if (tp->mii.supports_gmii) {
1700			if (adv & ADVERTISED_1000baseT_Half)
1701				giga_ctrl |= ADVERTISE_1000HALF;
1702			if (adv & ADVERTISED_1000baseT_Full)
1703				giga_ctrl |= ADVERTISE_1000FULL;
1704		} else if (adv & (ADVERTISED_1000baseT_Half |
1705				  ADVERTISED_1000baseT_Full)) {
1706			netif_info(tp, link, dev,
1707				   "PHY does not support 1000Mbps\n");
1708			goto out;
1709		}
1710
1711		bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1712
1713		rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1714		rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1715	} else {
1716		giga_ctrl = 0;
1717
1718		if (speed == SPEED_10)
1719			bmcr = 0;
1720		else if (speed == SPEED_100)
1721			bmcr = BMCR_SPEED100;
1722		else
1723			goto out;
1724
1725		if (duplex == DUPLEX_FULL)
1726			bmcr |= BMCR_FULLDPLX;
1727	}
1728
1729	rtl_writephy(tp, MII_BMCR, bmcr);
1730
1731	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1732	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
1733		if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1734			rtl_writephy(tp, 0x17, 0x2138);
1735			rtl_writephy(tp, 0x0e, 0x0260);
1736		} else {
1737			rtl_writephy(tp, 0x17, 0x2108);
1738			rtl_writephy(tp, 0x0e, 0x0000);
1739		}
1740	}
1741
1742	rc = 0;
1743out:
1744	return rc;
1745}
1746
1747static int rtl8169_set_speed(struct net_device *dev,
1748			     u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1749{
1750	struct rtl8169_private *tp = netdev_priv(dev);
1751	int ret;
1752
1753	ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1754	if (ret < 0)
1755		goto out;
1756
1757	if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1758	    (advertising & ADVERTISED_1000baseT_Full)) {
 
1759		mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1760	}
1761out:
1762	return ret;
1763}
1764
1765static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1766{
1767	struct rtl8169_private *tp = netdev_priv(dev);
1768	int ret;
1769
1770	del_timer_sync(&tp->timer);
1771
1772	rtl_lock_work(tp);
1773	ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1774				cmd->duplex, cmd->advertising);
1775	rtl_unlock_work(tp);
1776
1777	return ret;
1778}
1779
1780static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1781	netdev_features_t features)
1782{
1783	struct rtl8169_private *tp = netdev_priv(dev);
1784
1785	if (dev->mtu > TD_MSS_MAX)
1786		features &= ~NETIF_F_ALL_TSO;
1787
1788	if (dev->mtu > JUMBO_1K &&
1789	    !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1790		features &= ~NETIF_F_IP_CSUM;
1791
1792	return features;
1793}
1794
1795static void __rtl8169_set_features(struct net_device *dev,
1796				   netdev_features_t features)
1797{
1798	struct rtl8169_private *tp = netdev_priv(dev);
1799	netdev_features_t changed = features ^ dev->features;
1800	void __iomem *ioaddr = tp->mmio_addr;
 
1801
1802	if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM |
1803			 NETIF_F_HW_VLAN_CTAG_RX)))
1804		return;
 
 
1805
1806	if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) {
1807		if (features & NETIF_F_RXCSUM)
1808			tp->cp_cmd |= RxChkSum;
1809		else
1810			tp->cp_cmd &= ~RxChkSum;
1811
1812		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
1813			tp->cp_cmd |= RxVlan;
1814		else
1815			tp->cp_cmd &= ~RxVlan;
1816
1817		RTL_W16(CPlusCmd, tp->cp_cmd);
1818		RTL_R16(CPlusCmd);
1819	}
1820	if (changed & NETIF_F_RXALL) {
1821		int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1822		if (features & NETIF_F_RXALL)
1823			tmp |= (AcceptErr | AcceptRunt);
1824		RTL_W32(RxConfig, tmp);
1825	}
1826}
1827
1828static int rtl8169_set_features(struct net_device *dev,
1829				netdev_features_t features)
1830{
1831	struct rtl8169_private *tp = netdev_priv(dev);
1832
 
 
1833	rtl_lock_work(tp);
1834	__rtl8169_set_features(dev, features);
 
1835	rtl_unlock_work(tp);
1836
1837	return 0;
1838}
1839
1840
1841static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1842{
1843	return (vlan_tx_tag_present(skb)) ?
1844		TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1845}
1846
1847static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1848{
1849	u32 opts2 = le32_to_cpu(desc->opts2);
1850
1851	if (opts2 & RxVlanTag)
1852		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1853}
1854
1855static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1856{
1857	struct rtl8169_private *tp = netdev_priv(dev);
1858	void __iomem *ioaddr = tp->mmio_addr;
1859	u32 status;
1860
1861	cmd->supported =
1862		SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1863	cmd->port = PORT_FIBRE;
1864	cmd->transceiver = XCVR_INTERNAL;
1865
1866	status = RTL_R32(TBICSR);
1867	cmd->advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
1868	cmd->autoneg = !!(status & TBINwEnable);
1869
1870	ethtool_cmd_speed_set(cmd, SPEED_1000);
1871	cmd->duplex = DUPLEX_FULL; /* Always set */
1872
1873	return 0;
1874}
1875
1876static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1877{
1878	struct rtl8169_private *tp = netdev_priv(dev);
1879
1880	return mii_ethtool_gset(&tp->mii, cmd);
1881}
1882
1883static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1884{
1885	struct rtl8169_private *tp = netdev_priv(dev);
1886	int rc;
1887
1888	rtl_lock_work(tp);
1889	rc = tp->get_settings(dev, cmd);
1890	rtl_unlock_work(tp);
1891
1892	return rc;
1893}
1894
1895static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1896			     void *p)
1897{
1898	struct rtl8169_private *tp = netdev_priv(dev);
1899	u32 __iomem *data = tp->mmio_addr;
1900	u32 *dw = p;
1901	int i;
1902
1903	rtl_lock_work(tp);
1904	for (i = 0; i < R8169_REGS_SIZE; i += 4)
1905		memcpy_fromio(dw++, data++, 4);
1906	rtl_unlock_work(tp);
1907}
1908
1909static u32 rtl8169_get_msglevel(struct net_device *dev)
1910{
1911	struct rtl8169_private *tp = netdev_priv(dev);
1912
1913	return tp->msg_enable;
1914}
1915
1916static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1917{
1918	struct rtl8169_private *tp = netdev_priv(dev);
1919
1920	tp->msg_enable = value;
1921}
1922
1923static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1924	"tx_packets",
1925	"rx_packets",
1926	"tx_errors",
1927	"rx_errors",
1928	"rx_missed",
1929	"align_errors",
1930	"tx_single_collisions",
1931	"tx_multi_collisions",
1932	"unicast",
1933	"broadcast",
1934	"multicast",
1935	"tx_aborted",
1936	"tx_underrun",
1937};
1938
1939static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1940{
1941	switch (sset) {
1942	case ETH_SS_STATS:
1943		return ARRAY_SIZE(rtl8169_gstrings);
1944	default:
1945		return -EOPNOTSUPP;
1946	}
1947}
1948
1949DECLARE_RTL_COND(rtl_counters_cond)
1950{
1951	void __iomem *ioaddr = tp->mmio_addr;
1952
1953	return RTL_R32(CounterAddrLow) & CounterDump;
1954}
1955
1956static void rtl8169_update_counters(struct net_device *dev)
1957{
1958	struct rtl8169_private *tp = netdev_priv(dev);
1959	void __iomem *ioaddr = tp->mmio_addr;
1960	struct device *d = &tp->pci_dev->dev;
1961	struct rtl8169_counters *counters;
1962	dma_addr_t paddr;
1963	u32 cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1964
1965	/*
1966	 * Some chips are unable to dump tally counters when the receiver
1967	 * is disabled.
1968	 */
1969	if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1970		return;
1971
1972	counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1973	if (!counters)
1974		return;
1975
1976	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1977	cmd = (u64)paddr & DMA_BIT_MASK(32);
1978	RTL_W32(CounterAddrLow, cmd);
1979	RTL_W32(CounterAddrLow, cmd | CounterDump);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1980
1981	if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1982		memcpy(&tp->counters, counters, sizeof(*counters));
1983
1984	RTL_W32(CounterAddrLow, 0);
1985	RTL_W32(CounterAddrHigh, 0);
 
 
 
 
 
 
 
 
 
1986
1987	dma_free_coherent(d, sizeof(*counters), counters, paddr);
1988}
1989
1990static void rtl8169_get_ethtool_stats(struct net_device *dev,
1991				      struct ethtool_stats *stats, u64 *data)
1992{
1993	struct rtl8169_private *tp = netdev_priv(dev);
 
1994
1995	ASSERT_RTNL();
1996
1997	rtl8169_update_counters(dev);
1998
1999	data[0] = le64_to_cpu(tp->counters.tx_packets);
2000	data[1] = le64_to_cpu(tp->counters.rx_packets);
2001	data[2] = le64_to_cpu(tp->counters.tx_errors);
2002	data[3] = le32_to_cpu(tp->counters.rx_errors);
2003	data[4] = le16_to_cpu(tp->counters.rx_missed);
2004	data[5] = le16_to_cpu(tp->counters.align_errors);
2005	data[6] = le32_to_cpu(tp->counters.tx_one_collision);
2006	data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
2007	data[8] = le64_to_cpu(tp->counters.rx_unicast);
2008	data[9] = le64_to_cpu(tp->counters.rx_broadcast);
2009	data[10] = le32_to_cpu(tp->counters.rx_multicast);
2010	data[11] = le16_to_cpu(tp->counters.tx_aborted);
2011	data[12] = le16_to_cpu(tp->counters.tx_underun);
2012}
2013
2014static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2015{
2016	switch(stringset) {
2017	case ETH_SS_STATS:
2018		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2019		break;
2020	}
2021}
2022
2023static const struct ethtool_ops rtl8169_ethtool_ops = {
2024	.get_drvinfo		= rtl8169_get_drvinfo,
2025	.get_regs_len		= rtl8169_get_regs_len,
2026	.get_link		= ethtool_op_get_link,
2027	.get_settings		= rtl8169_get_settings,
2028	.set_settings		= rtl8169_set_settings,
2029	.get_msglevel		= rtl8169_get_msglevel,
2030	.set_msglevel		= rtl8169_set_msglevel,
2031	.get_regs		= rtl8169_get_regs,
2032	.get_wol		= rtl8169_get_wol,
2033	.set_wol		= rtl8169_set_wol,
2034	.get_strings		= rtl8169_get_strings,
2035	.get_sset_count		= rtl8169_get_sset_count,
2036	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2037	.get_ts_info		= ethtool_op_get_ts_info,
2038};
2039
2040static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2041				    struct net_device *dev, u8 default_version)
2042{
2043	void __iomem *ioaddr = tp->mmio_addr;
2044	/*
2045	 * The driver currently handles the 8168Bf and the 8168Be identically
2046	 * but they can be identified more specifically through the test below
2047	 * if needed:
2048	 *
2049	 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2050	 *
2051	 * Same thing for the 8101Eb and the 8101Ec:
2052	 *
2053	 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2054	 */
2055	static const struct rtl_mac_info {
2056		u32 mask;
2057		u32 val;
2058		int mac_version;
2059	} mac_info[] = {
 
 
 
 
 
 
 
 
 
2060		/* 8168G family. */
2061		{ 0x7cf00000, 0x5c800000,	RTL_GIGA_MAC_VER_44 },
2062		{ 0x7cf00000, 0x50900000,	RTL_GIGA_MAC_VER_42 },
2063		{ 0x7cf00000, 0x4c100000,	RTL_GIGA_MAC_VER_41 },
2064		{ 0x7cf00000, 0x4c000000,	RTL_GIGA_MAC_VER_40 },
2065
2066		/* 8168F family. */
2067		{ 0x7c800000, 0x48800000,	RTL_GIGA_MAC_VER_38 },
2068		{ 0x7cf00000, 0x48100000,	RTL_GIGA_MAC_VER_36 },
2069		{ 0x7cf00000, 0x48000000,	RTL_GIGA_MAC_VER_35 },
2070
2071		/* 8168E family. */
2072		{ 0x7c800000, 0x2c800000,	RTL_GIGA_MAC_VER_34 },
2073		{ 0x7cf00000, 0x2c200000,	RTL_GIGA_MAC_VER_33 },
2074		{ 0x7cf00000, 0x2c100000,	RTL_GIGA_MAC_VER_32 },
2075		{ 0x7c800000, 0x2c000000,	RTL_GIGA_MAC_VER_33 },
2076
2077		/* 8168D family. */
2078		{ 0x7cf00000, 0x28300000,	RTL_GIGA_MAC_VER_26 },
2079		{ 0x7cf00000, 0x28100000,	RTL_GIGA_MAC_VER_25 },
2080		{ 0x7c800000, 0x28000000,	RTL_GIGA_MAC_VER_26 },
2081
2082		/* 8168DP family. */
2083		{ 0x7cf00000, 0x28800000,	RTL_GIGA_MAC_VER_27 },
2084		{ 0x7cf00000, 0x28a00000,	RTL_GIGA_MAC_VER_28 },
2085		{ 0x7cf00000, 0x28b00000,	RTL_GIGA_MAC_VER_31 },
2086
2087		/* 8168C family. */
2088		{ 0x7cf00000, 0x3cb00000,	RTL_GIGA_MAC_VER_24 },
2089		{ 0x7cf00000, 0x3c900000,	RTL_GIGA_MAC_VER_23 },
2090		{ 0x7cf00000, 0x3c800000,	RTL_GIGA_MAC_VER_18 },
2091		{ 0x7c800000, 0x3c800000,	RTL_GIGA_MAC_VER_24 },
2092		{ 0x7cf00000, 0x3c000000,	RTL_GIGA_MAC_VER_19 },
2093		{ 0x7cf00000, 0x3c200000,	RTL_GIGA_MAC_VER_20 },
2094		{ 0x7cf00000, 0x3c300000,	RTL_GIGA_MAC_VER_21 },
2095		{ 0x7cf00000, 0x3c400000,	RTL_GIGA_MAC_VER_22 },
2096		{ 0x7c800000, 0x3c000000,	RTL_GIGA_MAC_VER_22 },
2097
2098		/* 8168B family. */
2099		{ 0x7cf00000, 0x38000000,	RTL_GIGA_MAC_VER_12 },
2100		{ 0x7cf00000, 0x38500000,	RTL_GIGA_MAC_VER_17 },
2101		{ 0x7c800000, 0x38000000,	RTL_GIGA_MAC_VER_17 },
2102		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
2103
2104		/* 8101 family. */
2105		{ 0x7cf00000, 0x44900000,	RTL_GIGA_MAC_VER_39 },
2106		{ 0x7c800000, 0x44800000,	RTL_GIGA_MAC_VER_39 },
2107		{ 0x7c800000, 0x44000000,	RTL_GIGA_MAC_VER_37 },
2108		{ 0x7cf00000, 0x40b00000,	RTL_GIGA_MAC_VER_30 },
2109		{ 0x7cf00000, 0x40a00000,	RTL_GIGA_MAC_VER_30 },
2110		{ 0x7cf00000, 0x40900000,	RTL_GIGA_MAC_VER_29 },
2111		{ 0x7c800000, 0x40800000,	RTL_GIGA_MAC_VER_30 },
2112		{ 0x7cf00000, 0x34a00000,	RTL_GIGA_MAC_VER_09 },
2113		{ 0x7cf00000, 0x24a00000,	RTL_GIGA_MAC_VER_09 },
2114		{ 0x7cf00000, 0x34900000,	RTL_GIGA_MAC_VER_08 },
2115		{ 0x7cf00000, 0x24900000,	RTL_GIGA_MAC_VER_08 },
2116		{ 0x7cf00000, 0x34800000,	RTL_GIGA_MAC_VER_07 },
2117		{ 0x7cf00000, 0x24800000,	RTL_GIGA_MAC_VER_07 },
2118		{ 0x7cf00000, 0x34000000,	RTL_GIGA_MAC_VER_13 },
2119		{ 0x7cf00000, 0x34300000,	RTL_GIGA_MAC_VER_10 },
2120		{ 0x7cf00000, 0x34200000,	RTL_GIGA_MAC_VER_16 },
2121		{ 0x7c800000, 0x34800000,	RTL_GIGA_MAC_VER_09 },
2122		{ 0x7c800000, 0x24800000,	RTL_GIGA_MAC_VER_09 },
2123		{ 0x7c800000, 0x34000000,	RTL_GIGA_MAC_VER_16 },
2124		/* FIXME: where did these entries come from ? -- FR */
2125		{ 0xfc800000, 0x38800000,	RTL_GIGA_MAC_VER_15 },
2126		{ 0xfc800000, 0x30800000,	RTL_GIGA_MAC_VER_14 },
2127
2128		/* 8110 family. */
2129		{ 0xfc800000, 0x98000000,	RTL_GIGA_MAC_VER_06 },
2130		{ 0xfc800000, 0x18000000,	RTL_GIGA_MAC_VER_05 },
2131		{ 0xfc800000, 0x10000000,	RTL_GIGA_MAC_VER_04 },
2132		{ 0xfc800000, 0x04000000,	RTL_GIGA_MAC_VER_03 },
2133		{ 0xfc800000, 0x00800000,	RTL_GIGA_MAC_VER_02 },
2134		{ 0xfc800000, 0x00000000,	RTL_GIGA_MAC_VER_01 },
2135
2136		/* Catch-all */
2137		{ 0x00000000, 0x00000000,	RTL_GIGA_MAC_NONE   }
2138	};
2139	const struct rtl_mac_info *p = mac_info;
2140	u32 reg;
2141
2142	reg = RTL_R32(TxConfig);
2143	while ((reg & p->mask) != p->val)
2144		p++;
2145	tp->mac_version = p->mac_version;
2146
2147	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2148		netif_notice(tp, probe, dev,
2149			     "unknown MAC, using family default\n");
2150		tp->mac_version = default_version;
2151	} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
2152		tp->mac_version = tp->mii.supports_gmii ?
2153				  RTL_GIGA_MAC_VER_42 :
2154				  RTL_GIGA_MAC_VER_43;
 
 
 
 
 
 
 
 
2155	}
2156}
2157
2158static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2159{
2160	dprintk("mac_version = 0x%02x\n", tp->mac_version);
2161}
2162
2163struct phy_reg {
2164	u16 reg;
2165	u16 val;
2166};
2167
2168static void rtl_writephy_batch(struct rtl8169_private *tp,
2169			       const struct phy_reg *regs, int len)
2170{
2171	while (len-- > 0) {
2172		rtl_writephy(tp, regs->reg, regs->val);
2173		regs++;
2174	}
2175}
2176
2177#define PHY_READ		0x00000000
2178#define PHY_DATA_OR		0x10000000
2179#define PHY_DATA_AND		0x20000000
2180#define PHY_BJMPN		0x30000000
2181#define PHY_MDIO_CHG		0x40000000
2182#define PHY_CLEAR_READCOUNT	0x70000000
2183#define PHY_WRITE		0x80000000
2184#define PHY_READCOUNT_EQ_SKIP	0x90000000
2185#define PHY_COMP_EQ_SKIPN	0xa0000000
2186#define PHY_COMP_NEQ_SKIPN	0xb0000000
2187#define PHY_WRITE_PREVIOUS	0xc0000000
2188#define PHY_SKIPN		0xd0000000
2189#define PHY_DELAY_MS		0xe0000000
2190
2191struct fw_info {
2192	u32	magic;
2193	char	version[RTL_VER_SIZE];
2194	__le32	fw_start;
2195	__le32	fw_len;
2196	u8	chksum;
2197} __packed;
2198
2199#define FW_OPCODE_SIZE	sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2200
2201static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2202{
2203	const struct firmware *fw = rtl_fw->fw;
2204	struct fw_info *fw_info = (struct fw_info *)fw->data;
2205	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2206	char *version = rtl_fw->version;
2207	bool rc = false;
2208
2209	if (fw->size < FW_OPCODE_SIZE)
2210		goto out;
2211
2212	if (!fw_info->magic) {
2213		size_t i, size, start;
2214		u8 checksum = 0;
2215
2216		if (fw->size < sizeof(*fw_info))
2217			goto out;
2218
2219		for (i = 0; i < fw->size; i++)
2220			checksum += fw->data[i];
2221		if (checksum != 0)
2222			goto out;
2223
2224		start = le32_to_cpu(fw_info->fw_start);
2225		if (start > fw->size)
2226			goto out;
2227
2228		size = le32_to_cpu(fw_info->fw_len);
2229		if (size > (fw->size - start) / FW_OPCODE_SIZE)
2230			goto out;
2231
2232		memcpy(version, fw_info->version, RTL_VER_SIZE);
2233
2234		pa->code = (__le32 *)(fw->data + start);
2235		pa->size = size;
2236	} else {
2237		if (fw->size % FW_OPCODE_SIZE)
2238			goto out;
2239
2240		strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2241
2242		pa->code = (__le32 *)fw->data;
2243		pa->size = fw->size / FW_OPCODE_SIZE;
2244	}
2245	version[RTL_VER_SIZE - 1] = 0;
2246
2247	rc = true;
2248out:
2249	return rc;
2250}
2251
2252static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2253			   struct rtl_fw_phy_action *pa)
2254{
2255	bool rc = false;
2256	size_t index;
2257
2258	for (index = 0; index < pa->size; index++) {
2259		u32 action = le32_to_cpu(pa->code[index]);
2260		u32 regno = (action & 0x0fff0000) >> 16;
2261
2262		switch(action & 0xf0000000) {
2263		case PHY_READ:
2264		case PHY_DATA_OR:
2265		case PHY_DATA_AND:
2266		case PHY_MDIO_CHG:
2267		case PHY_CLEAR_READCOUNT:
2268		case PHY_WRITE:
2269		case PHY_WRITE_PREVIOUS:
2270		case PHY_DELAY_MS:
2271			break;
2272
2273		case PHY_BJMPN:
2274			if (regno > index) {
2275				netif_err(tp, ifup, tp->dev,
2276					  "Out of range of firmware\n");
2277				goto out;
2278			}
2279			break;
2280		case PHY_READCOUNT_EQ_SKIP:
2281			if (index + 2 >= pa->size) {
2282				netif_err(tp, ifup, tp->dev,
2283					  "Out of range of firmware\n");
2284				goto out;
2285			}
2286			break;
2287		case PHY_COMP_EQ_SKIPN:
2288		case PHY_COMP_NEQ_SKIPN:
2289		case PHY_SKIPN:
2290			if (index + 1 + regno >= pa->size) {
2291				netif_err(tp, ifup, tp->dev,
2292					  "Out of range of firmware\n");
2293				goto out;
2294			}
2295			break;
2296
2297		default:
2298			netif_err(tp, ifup, tp->dev,
2299				  "Invalid action 0x%08x\n", action);
2300			goto out;
2301		}
2302	}
2303	rc = true;
2304out:
2305	return rc;
2306}
2307
2308static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2309{
2310	struct net_device *dev = tp->dev;
2311	int rc = -EINVAL;
2312
2313	if (!rtl_fw_format_ok(tp, rtl_fw)) {
2314		netif_err(tp, ifup, dev, "invalid firwmare\n");
2315		goto out;
2316	}
2317
2318	if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2319		rc = 0;
2320out:
2321	return rc;
2322}
2323
2324static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2325{
2326	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2327	struct mdio_ops org, *ops = &tp->mdio_ops;
2328	u32 predata, count;
2329	size_t index;
2330
2331	predata = count = 0;
2332	org.write = ops->write;
2333	org.read = ops->read;
2334
2335	for (index = 0; index < pa->size; ) {
2336		u32 action = le32_to_cpu(pa->code[index]);
2337		u32 data = action & 0x0000ffff;
2338		u32 regno = (action & 0x0fff0000) >> 16;
2339
2340		if (!action)
2341			break;
2342
2343		switch(action & 0xf0000000) {
2344		case PHY_READ:
2345			predata = rtl_readphy(tp, regno);
2346			count++;
2347			index++;
2348			break;
2349		case PHY_DATA_OR:
2350			predata |= data;
2351			index++;
2352			break;
2353		case PHY_DATA_AND:
2354			predata &= data;
2355			index++;
2356			break;
2357		case PHY_BJMPN:
2358			index -= regno;
2359			break;
2360		case PHY_MDIO_CHG:
2361			if (data == 0) {
2362				ops->write = org.write;
2363				ops->read = org.read;
2364			} else if (data == 1) {
2365				ops->write = mac_mcu_write;
2366				ops->read = mac_mcu_read;
2367			}
2368
2369			index++;
2370			break;
2371		case PHY_CLEAR_READCOUNT:
2372			count = 0;
2373			index++;
2374			break;
2375		case PHY_WRITE:
2376			rtl_writephy(tp, regno, data);
2377			index++;
2378			break;
2379		case PHY_READCOUNT_EQ_SKIP:
2380			index += (count == data) ? 2 : 1;
2381			break;
2382		case PHY_COMP_EQ_SKIPN:
2383			if (predata == data)
2384				index += regno;
2385			index++;
2386			break;
2387		case PHY_COMP_NEQ_SKIPN:
2388			if (predata != data)
2389				index += regno;
2390			index++;
2391			break;
2392		case PHY_WRITE_PREVIOUS:
2393			rtl_writephy(tp, regno, predata);
2394			index++;
2395			break;
2396		case PHY_SKIPN:
2397			index += regno + 1;
2398			break;
2399		case PHY_DELAY_MS:
2400			mdelay(data);
2401			index++;
2402			break;
2403
2404		default:
2405			BUG();
2406		}
2407	}
2408
2409	ops->write = org.write;
2410	ops->read = org.read;
2411}
2412
2413static void rtl_release_firmware(struct rtl8169_private *tp)
2414{
2415	if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2416		release_firmware(tp->rtl_fw->fw);
2417		kfree(tp->rtl_fw);
2418	}
2419	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2420}
2421
2422static void rtl_apply_firmware(struct rtl8169_private *tp)
2423{
2424	struct rtl_fw *rtl_fw = tp->rtl_fw;
2425
2426	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
2427	if (!IS_ERR_OR_NULL(rtl_fw))
2428		rtl_phy_write_fw(tp, rtl_fw);
2429}
2430
2431static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2432{
2433	if (rtl_readphy(tp, reg) != val)
2434		netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2435	else
2436		rtl_apply_firmware(tp);
2437}
2438
2439static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2440{
2441	static const struct phy_reg phy_reg_init[] = {
2442		{ 0x1f, 0x0001 },
2443		{ 0x06, 0x006e },
2444		{ 0x08, 0x0708 },
2445		{ 0x15, 0x4000 },
2446		{ 0x18, 0x65c7 },
2447
2448		{ 0x1f, 0x0001 },
2449		{ 0x03, 0x00a1 },
2450		{ 0x02, 0x0008 },
2451		{ 0x01, 0x0120 },
2452		{ 0x00, 0x1000 },
2453		{ 0x04, 0x0800 },
2454		{ 0x04, 0x0000 },
2455
2456		{ 0x03, 0xff41 },
2457		{ 0x02, 0xdf60 },
2458		{ 0x01, 0x0140 },
2459		{ 0x00, 0x0077 },
2460		{ 0x04, 0x7800 },
2461		{ 0x04, 0x7000 },
2462
2463		{ 0x03, 0x802f },
2464		{ 0x02, 0x4f02 },
2465		{ 0x01, 0x0409 },
2466		{ 0x00, 0xf0f9 },
2467		{ 0x04, 0x9800 },
2468		{ 0x04, 0x9000 },
2469
2470		{ 0x03, 0xdf01 },
2471		{ 0x02, 0xdf20 },
2472		{ 0x01, 0xff95 },
2473		{ 0x00, 0xba00 },
2474		{ 0x04, 0xa800 },
2475		{ 0x04, 0xa000 },
2476
2477		{ 0x03, 0xff41 },
2478		{ 0x02, 0xdf20 },
2479		{ 0x01, 0x0140 },
2480		{ 0x00, 0x00bb },
2481		{ 0x04, 0xb800 },
2482		{ 0x04, 0xb000 },
2483
2484		{ 0x03, 0xdf41 },
2485		{ 0x02, 0xdc60 },
2486		{ 0x01, 0x6340 },
2487		{ 0x00, 0x007d },
2488		{ 0x04, 0xd800 },
2489		{ 0x04, 0xd000 },
2490
2491		{ 0x03, 0xdf01 },
2492		{ 0x02, 0xdf20 },
2493		{ 0x01, 0x100a },
2494		{ 0x00, 0xa0ff },
2495		{ 0x04, 0xf800 },
2496		{ 0x04, 0xf000 },
2497
2498		{ 0x1f, 0x0000 },
2499		{ 0x0b, 0x0000 },
2500		{ 0x00, 0x9200 }
2501	};
2502
2503	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2504}
2505
2506static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2507{
2508	static const struct phy_reg phy_reg_init[] = {
2509		{ 0x1f, 0x0002 },
2510		{ 0x01, 0x90d0 },
2511		{ 0x1f, 0x0000 }
2512	};
2513
2514	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2515}
2516
2517static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2518{
2519	struct pci_dev *pdev = tp->pci_dev;
2520
2521	if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2522	    (pdev->subsystem_device != 0xe000))
2523		return;
2524
2525	rtl_writephy(tp, 0x1f, 0x0001);
2526	rtl_writephy(tp, 0x10, 0xf01b);
2527	rtl_writephy(tp, 0x1f, 0x0000);
2528}
2529
2530static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2531{
2532	static const struct phy_reg phy_reg_init[] = {
2533		{ 0x1f, 0x0001 },
2534		{ 0x04, 0x0000 },
2535		{ 0x03, 0x00a1 },
2536		{ 0x02, 0x0008 },
2537		{ 0x01, 0x0120 },
2538		{ 0x00, 0x1000 },
2539		{ 0x04, 0x0800 },
2540		{ 0x04, 0x9000 },
2541		{ 0x03, 0x802f },
2542		{ 0x02, 0x4f02 },
2543		{ 0x01, 0x0409 },
2544		{ 0x00, 0xf099 },
2545		{ 0x04, 0x9800 },
2546		{ 0x04, 0xa000 },
2547		{ 0x03, 0xdf01 },
2548		{ 0x02, 0xdf20 },
2549		{ 0x01, 0xff95 },
2550		{ 0x00, 0xba00 },
2551		{ 0x04, 0xa800 },
2552		{ 0x04, 0xf000 },
2553		{ 0x03, 0xdf01 },
2554		{ 0x02, 0xdf20 },
2555		{ 0x01, 0x101a },
2556		{ 0x00, 0xa0ff },
2557		{ 0x04, 0xf800 },
2558		{ 0x04, 0x0000 },
2559		{ 0x1f, 0x0000 },
2560
2561		{ 0x1f, 0x0001 },
2562		{ 0x10, 0xf41b },
2563		{ 0x14, 0xfb54 },
2564		{ 0x18, 0xf5c7 },
2565		{ 0x1f, 0x0000 },
2566
2567		{ 0x1f, 0x0001 },
2568		{ 0x17, 0x0cc0 },
2569		{ 0x1f, 0x0000 }
2570	};
2571
2572	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2573
2574	rtl8169scd_hw_phy_config_quirk(tp);
2575}
2576
2577static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2578{
2579	static const struct phy_reg phy_reg_init[] = {
2580		{ 0x1f, 0x0001 },
2581		{ 0x04, 0x0000 },
2582		{ 0x03, 0x00a1 },
2583		{ 0x02, 0x0008 },
2584		{ 0x01, 0x0120 },
2585		{ 0x00, 0x1000 },
2586		{ 0x04, 0x0800 },
2587		{ 0x04, 0x9000 },
2588		{ 0x03, 0x802f },
2589		{ 0x02, 0x4f02 },
2590		{ 0x01, 0x0409 },
2591		{ 0x00, 0xf099 },
2592		{ 0x04, 0x9800 },
2593		{ 0x04, 0xa000 },
2594		{ 0x03, 0xdf01 },
2595		{ 0x02, 0xdf20 },
2596		{ 0x01, 0xff95 },
2597		{ 0x00, 0xba00 },
2598		{ 0x04, 0xa800 },
2599		{ 0x04, 0xf000 },
2600		{ 0x03, 0xdf01 },
2601		{ 0x02, 0xdf20 },
2602		{ 0x01, 0x101a },
2603		{ 0x00, 0xa0ff },
2604		{ 0x04, 0xf800 },
2605		{ 0x04, 0x0000 },
2606		{ 0x1f, 0x0000 },
2607
2608		{ 0x1f, 0x0001 },
2609		{ 0x0b, 0x8480 },
2610		{ 0x1f, 0x0000 },
2611
2612		{ 0x1f, 0x0001 },
2613		{ 0x18, 0x67c7 },
2614		{ 0x04, 0x2000 },
2615		{ 0x03, 0x002f },
2616		{ 0x02, 0x4360 },
2617		{ 0x01, 0x0109 },
2618		{ 0x00, 0x3022 },
2619		{ 0x04, 0x2800 },
2620		{ 0x1f, 0x0000 },
2621
2622		{ 0x1f, 0x0001 },
2623		{ 0x17, 0x0cc0 },
2624		{ 0x1f, 0x0000 }
2625	};
2626
2627	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2628}
2629
2630static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2631{
2632	static const struct phy_reg phy_reg_init[] = {
2633		{ 0x10, 0xf41b },
2634		{ 0x1f, 0x0000 }
2635	};
2636
2637	rtl_writephy(tp, 0x1f, 0x0001);
2638	rtl_patchphy(tp, 0x16, 1 << 0);
2639
2640	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2641}
2642
2643static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2644{
2645	static const struct phy_reg phy_reg_init[] = {
2646		{ 0x1f, 0x0001 },
2647		{ 0x10, 0xf41b },
2648		{ 0x1f, 0x0000 }
2649	};
2650
2651	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2652}
2653
2654static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2655{
2656	static const struct phy_reg phy_reg_init[] = {
2657		{ 0x1f, 0x0000 },
2658		{ 0x1d, 0x0f00 },
2659		{ 0x1f, 0x0002 },
2660		{ 0x0c, 0x1ec8 },
2661		{ 0x1f, 0x0000 }
2662	};
2663
2664	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2665}
2666
2667static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2668{
2669	static const struct phy_reg phy_reg_init[] = {
2670		{ 0x1f, 0x0001 },
2671		{ 0x1d, 0x3d98 },
2672		{ 0x1f, 0x0000 }
2673	};
2674
2675	rtl_writephy(tp, 0x1f, 0x0000);
2676	rtl_patchphy(tp, 0x14, 1 << 5);
2677	rtl_patchphy(tp, 0x0d, 1 << 5);
2678
2679	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2680}
2681
2682static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2683{
2684	static const struct phy_reg phy_reg_init[] = {
2685		{ 0x1f, 0x0001 },
2686		{ 0x12, 0x2300 },
2687		{ 0x1f, 0x0002 },
2688		{ 0x00, 0x88d4 },
2689		{ 0x01, 0x82b1 },
2690		{ 0x03, 0x7002 },
2691		{ 0x08, 0x9e30 },
2692		{ 0x09, 0x01f0 },
2693		{ 0x0a, 0x5500 },
2694		{ 0x0c, 0x00c8 },
2695		{ 0x1f, 0x0003 },
2696		{ 0x12, 0xc096 },
2697		{ 0x16, 0x000a },
2698		{ 0x1f, 0x0000 },
2699		{ 0x1f, 0x0000 },
2700		{ 0x09, 0x2000 },
2701		{ 0x09, 0x0000 }
2702	};
2703
2704	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2705
2706	rtl_patchphy(tp, 0x14, 1 << 5);
2707	rtl_patchphy(tp, 0x0d, 1 << 5);
2708	rtl_writephy(tp, 0x1f, 0x0000);
2709}
2710
2711static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2712{
2713	static const struct phy_reg phy_reg_init[] = {
2714		{ 0x1f, 0x0001 },
2715		{ 0x12, 0x2300 },
2716		{ 0x03, 0x802f },
2717		{ 0x02, 0x4f02 },
2718		{ 0x01, 0x0409 },
2719		{ 0x00, 0xf099 },
2720		{ 0x04, 0x9800 },
2721		{ 0x04, 0x9000 },
2722		{ 0x1d, 0x3d98 },
2723		{ 0x1f, 0x0002 },
2724		{ 0x0c, 0x7eb8 },
2725		{ 0x06, 0x0761 },
2726		{ 0x1f, 0x0003 },
2727		{ 0x16, 0x0f0a },
2728		{ 0x1f, 0x0000 }
2729	};
2730
2731	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2732
2733	rtl_patchphy(tp, 0x16, 1 << 0);
2734	rtl_patchphy(tp, 0x14, 1 << 5);
2735	rtl_patchphy(tp, 0x0d, 1 << 5);
2736	rtl_writephy(tp, 0x1f, 0x0000);
2737}
2738
2739static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2740{
2741	static const struct phy_reg phy_reg_init[] = {
2742		{ 0x1f, 0x0001 },
2743		{ 0x12, 0x2300 },
2744		{ 0x1d, 0x3d98 },
2745		{ 0x1f, 0x0002 },
2746		{ 0x0c, 0x7eb8 },
2747		{ 0x06, 0x5461 },
2748		{ 0x1f, 0x0003 },
2749		{ 0x16, 0x0f0a },
2750		{ 0x1f, 0x0000 }
2751	};
2752
2753	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2754
2755	rtl_patchphy(tp, 0x16, 1 << 0);
2756	rtl_patchphy(tp, 0x14, 1 << 5);
2757	rtl_patchphy(tp, 0x0d, 1 << 5);
2758	rtl_writephy(tp, 0x1f, 0x0000);
2759}
2760
2761static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2762{
2763	rtl8168c_3_hw_phy_config(tp);
2764}
2765
2766static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2767{
2768	static const struct phy_reg phy_reg_init_0[] = {
2769		/* Channel Estimation */
2770		{ 0x1f, 0x0001 },
2771		{ 0x06, 0x4064 },
2772		{ 0x07, 0x2863 },
2773		{ 0x08, 0x059c },
2774		{ 0x09, 0x26b4 },
2775		{ 0x0a, 0x6a19 },
2776		{ 0x0b, 0xdcc8 },
2777		{ 0x10, 0xf06d },
2778		{ 0x14, 0x7f68 },
2779		{ 0x18, 0x7fd9 },
2780		{ 0x1c, 0xf0ff },
2781		{ 0x1d, 0x3d9c },
2782		{ 0x1f, 0x0003 },
2783		{ 0x12, 0xf49f },
2784		{ 0x13, 0x070b },
2785		{ 0x1a, 0x05ad },
2786		{ 0x14, 0x94c0 },
2787
2788		/*
2789		 * Tx Error Issue
2790		 * Enhance line driver power
2791		 */
2792		{ 0x1f, 0x0002 },
2793		{ 0x06, 0x5561 },
2794		{ 0x1f, 0x0005 },
2795		{ 0x05, 0x8332 },
2796		{ 0x06, 0x5561 },
2797
2798		/*
2799		 * Can not link to 1Gbps with bad cable
2800		 * Decrease SNR threshold form 21.07dB to 19.04dB
2801		 */
2802		{ 0x1f, 0x0001 },
2803		{ 0x17, 0x0cc0 },
2804
2805		{ 0x1f, 0x0000 },
2806		{ 0x0d, 0xf880 }
2807	};
2808
2809	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2810
2811	/*
2812	 * Rx Error Issue
2813	 * Fine Tune Switching regulator parameter
2814	 */
2815	rtl_writephy(tp, 0x1f, 0x0002);
2816	rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2817	rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2818
2819	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2820		static const struct phy_reg phy_reg_init[] = {
2821			{ 0x1f, 0x0002 },
2822			{ 0x05, 0x669a },
2823			{ 0x1f, 0x0005 },
2824			{ 0x05, 0x8330 },
2825			{ 0x06, 0x669a },
2826			{ 0x1f, 0x0002 }
2827		};
2828		int val;
2829
2830		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2831
2832		val = rtl_readphy(tp, 0x0d);
2833
2834		if ((val & 0x00ff) != 0x006c) {
2835			static const u32 set[] = {
2836				0x0065, 0x0066, 0x0067, 0x0068,
2837				0x0069, 0x006a, 0x006b, 0x006c
2838			};
2839			int i;
2840
2841			rtl_writephy(tp, 0x1f, 0x0002);
2842
2843			val &= 0xff00;
2844			for (i = 0; i < ARRAY_SIZE(set); i++)
2845				rtl_writephy(tp, 0x0d, val | set[i]);
2846		}
2847	} else {
2848		static const struct phy_reg phy_reg_init[] = {
2849			{ 0x1f, 0x0002 },
2850			{ 0x05, 0x6662 },
2851			{ 0x1f, 0x0005 },
2852			{ 0x05, 0x8330 },
2853			{ 0x06, 0x6662 }
2854		};
2855
2856		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2857	}
2858
2859	/* RSET couple improve */
2860	rtl_writephy(tp, 0x1f, 0x0002);
2861	rtl_patchphy(tp, 0x0d, 0x0300);
2862	rtl_patchphy(tp, 0x0f, 0x0010);
2863
2864	/* Fine tune PLL performance */
2865	rtl_writephy(tp, 0x1f, 0x0002);
2866	rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2867	rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2868
2869	rtl_writephy(tp, 0x1f, 0x0005);
2870	rtl_writephy(tp, 0x05, 0x001b);
2871
2872	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2873
2874	rtl_writephy(tp, 0x1f, 0x0000);
2875}
2876
2877static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2878{
2879	static const struct phy_reg phy_reg_init_0[] = {
2880		/* Channel Estimation */
2881		{ 0x1f, 0x0001 },
2882		{ 0x06, 0x4064 },
2883		{ 0x07, 0x2863 },
2884		{ 0x08, 0x059c },
2885		{ 0x09, 0x26b4 },
2886		{ 0x0a, 0x6a19 },
2887		{ 0x0b, 0xdcc8 },
2888		{ 0x10, 0xf06d },
2889		{ 0x14, 0x7f68 },
2890		{ 0x18, 0x7fd9 },
2891		{ 0x1c, 0xf0ff },
2892		{ 0x1d, 0x3d9c },
2893		{ 0x1f, 0x0003 },
2894		{ 0x12, 0xf49f },
2895		{ 0x13, 0x070b },
2896		{ 0x1a, 0x05ad },
2897		{ 0x14, 0x94c0 },
2898
2899		/*
2900		 * Tx Error Issue
2901		 * Enhance line driver power
2902		 */
2903		{ 0x1f, 0x0002 },
2904		{ 0x06, 0x5561 },
2905		{ 0x1f, 0x0005 },
2906		{ 0x05, 0x8332 },
2907		{ 0x06, 0x5561 },
2908
2909		/*
2910		 * Can not link to 1Gbps with bad cable
2911		 * Decrease SNR threshold form 21.07dB to 19.04dB
2912		 */
2913		{ 0x1f, 0x0001 },
2914		{ 0x17, 0x0cc0 },
2915
2916		{ 0x1f, 0x0000 },
2917		{ 0x0d, 0xf880 }
2918	};
2919
2920	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2921
2922	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2923		static const struct phy_reg phy_reg_init[] = {
2924			{ 0x1f, 0x0002 },
2925			{ 0x05, 0x669a },
2926			{ 0x1f, 0x0005 },
2927			{ 0x05, 0x8330 },
2928			{ 0x06, 0x669a },
2929
2930			{ 0x1f, 0x0002 }
2931		};
2932		int val;
2933
2934		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2935
2936		val = rtl_readphy(tp, 0x0d);
2937		if ((val & 0x00ff) != 0x006c) {
2938			static const u32 set[] = {
2939				0x0065, 0x0066, 0x0067, 0x0068,
2940				0x0069, 0x006a, 0x006b, 0x006c
2941			};
2942			int i;
2943
2944			rtl_writephy(tp, 0x1f, 0x0002);
2945
2946			val &= 0xff00;
2947			for (i = 0; i < ARRAY_SIZE(set); i++)
2948				rtl_writephy(tp, 0x0d, val | set[i]);
2949		}
2950	} else {
2951		static const struct phy_reg phy_reg_init[] = {
2952			{ 0x1f, 0x0002 },
2953			{ 0x05, 0x2642 },
2954			{ 0x1f, 0x0005 },
2955			{ 0x05, 0x8330 },
2956			{ 0x06, 0x2642 }
2957		};
2958
2959		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2960	}
2961
2962	/* Fine tune PLL performance */
2963	rtl_writephy(tp, 0x1f, 0x0002);
2964	rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2965	rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2966
2967	/* Switching regulator Slew rate */
2968	rtl_writephy(tp, 0x1f, 0x0002);
2969	rtl_patchphy(tp, 0x0f, 0x0017);
2970
2971	rtl_writephy(tp, 0x1f, 0x0005);
2972	rtl_writephy(tp, 0x05, 0x001b);
2973
2974	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2975
2976	rtl_writephy(tp, 0x1f, 0x0000);
2977}
2978
2979static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2980{
2981	static const struct phy_reg phy_reg_init[] = {
2982		{ 0x1f, 0x0002 },
2983		{ 0x10, 0x0008 },
2984		{ 0x0d, 0x006c },
2985
2986		{ 0x1f, 0x0000 },
2987		{ 0x0d, 0xf880 },
2988
2989		{ 0x1f, 0x0001 },
2990		{ 0x17, 0x0cc0 },
2991
2992		{ 0x1f, 0x0001 },
2993		{ 0x0b, 0xa4d8 },
2994		{ 0x09, 0x281c },
2995		{ 0x07, 0x2883 },
2996		{ 0x0a, 0x6b35 },
2997		{ 0x1d, 0x3da4 },
2998		{ 0x1c, 0xeffd },
2999		{ 0x14, 0x7f52 },
3000		{ 0x18, 0x7fc6 },
3001		{ 0x08, 0x0601 },
3002		{ 0x06, 0x4063 },
3003		{ 0x10, 0xf074 },
3004		{ 0x1f, 0x0003 },
3005		{ 0x13, 0x0789 },
3006		{ 0x12, 0xf4bd },
3007		{ 0x1a, 0x04fd },
3008		{ 0x14, 0x84b0 },
3009		{ 0x1f, 0x0000 },
3010		{ 0x00, 0x9200 },
3011
3012		{ 0x1f, 0x0005 },
3013		{ 0x01, 0x0340 },
3014		{ 0x1f, 0x0001 },
3015		{ 0x04, 0x4000 },
3016		{ 0x03, 0x1d21 },
3017		{ 0x02, 0x0c32 },
3018		{ 0x01, 0x0200 },
3019		{ 0x00, 0x5554 },
3020		{ 0x04, 0x4800 },
3021		{ 0x04, 0x4000 },
3022		{ 0x04, 0xf000 },
3023		{ 0x03, 0xdf01 },
3024		{ 0x02, 0xdf20 },
3025		{ 0x01, 0x101a },
3026		{ 0x00, 0xa0ff },
3027		{ 0x04, 0xf800 },
3028		{ 0x04, 0xf000 },
3029		{ 0x1f, 0x0000 },
3030
3031		{ 0x1f, 0x0007 },
3032		{ 0x1e, 0x0023 },
3033		{ 0x16, 0x0000 },
3034		{ 0x1f, 0x0000 }
3035	};
3036
3037	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3038}
3039
3040static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3041{
3042	static const struct phy_reg phy_reg_init[] = {
3043		{ 0x1f, 0x0001 },
3044		{ 0x17, 0x0cc0 },
3045
3046		{ 0x1f, 0x0007 },
3047		{ 0x1e, 0x002d },
3048		{ 0x18, 0x0040 },
3049		{ 0x1f, 0x0000 }
3050	};
3051
3052	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3053	rtl_patchphy(tp, 0x0d, 1 << 5);
3054}
3055
3056static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3057{
3058	static const struct phy_reg phy_reg_init[] = {
3059		/* Enable Delay cap */
3060		{ 0x1f, 0x0005 },
3061		{ 0x05, 0x8b80 },
3062		{ 0x06, 0xc896 },
3063		{ 0x1f, 0x0000 },
3064
3065		/* Channel estimation fine tune */
3066		{ 0x1f, 0x0001 },
3067		{ 0x0b, 0x6c20 },
3068		{ 0x07, 0x2872 },
3069		{ 0x1c, 0xefff },
3070		{ 0x1f, 0x0003 },
3071		{ 0x14, 0x6420 },
3072		{ 0x1f, 0x0000 },
3073
3074		/* Update PFM & 10M TX idle timer */
3075		{ 0x1f, 0x0007 },
3076		{ 0x1e, 0x002f },
3077		{ 0x15, 0x1919 },
3078		{ 0x1f, 0x0000 },
3079
3080		{ 0x1f, 0x0007 },
3081		{ 0x1e, 0x00ac },
3082		{ 0x18, 0x0006 },
3083		{ 0x1f, 0x0000 }
3084	};
3085
3086	rtl_apply_firmware(tp);
3087
3088	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3089
3090	/* DCO enable for 10M IDLE Power */
3091	rtl_writephy(tp, 0x1f, 0x0007);
3092	rtl_writephy(tp, 0x1e, 0x0023);
3093	rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3094	rtl_writephy(tp, 0x1f, 0x0000);
3095
3096	/* For impedance matching */
3097	rtl_writephy(tp, 0x1f, 0x0002);
3098	rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3099	rtl_writephy(tp, 0x1f, 0x0000);
3100
3101	/* PHY auto speed down */
3102	rtl_writephy(tp, 0x1f, 0x0007);
3103	rtl_writephy(tp, 0x1e, 0x002d);
3104	rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3105	rtl_writephy(tp, 0x1f, 0x0000);
3106	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3107
3108	rtl_writephy(tp, 0x1f, 0x0005);
3109	rtl_writephy(tp, 0x05, 0x8b86);
3110	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3111	rtl_writephy(tp, 0x1f, 0x0000);
3112
3113	rtl_writephy(tp, 0x1f, 0x0005);
3114	rtl_writephy(tp, 0x05, 0x8b85);
3115	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3116	rtl_writephy(tp, 0x1f, 0x0007);
3117	rtl_writephy(tp, 0x1e, 0x0020);
3118	rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3119	rtl_writephy(tp, 0x1f, 0x0006);
3120	rtl_writephy(tp, 0x00, 0x5a00);
3121	rtl_writephy(tp, 0x1f, 0x0000);
3122	rtl_writephy(tp, 0x0d, 0x0007);
3123	rtl_writephy(tp, 0x0e, 0x003c);
3124	rtl_writephy(tp, 0x0d, 0x4007);
3125	rtl_writephy(tp, 0x0e, 0x0000);
3126	rtl_writephy(tp, 0x0d, 0x0000);
3127}
3128
3129static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3130{
3131	const u16 w[] = {
3132		addr[0] | (addr[1] << 8),
3133		addr[2] | (addr[3] << 8),
3134		addr[4] | (addr[5] << 8)
3135	};
3136	const struct exgmac_reg e[] = {
3137		{ .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3138		{ .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3139		{ .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3140		{ .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3141	};
3142
3143	rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3144}
3145
3146static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3147{
3148	static const struct phy_reg phy_reg_init[] = {
3149		/* Enable Delay cap */
3150		{ 0x1f, 0x0004 },
3151		{ 0x1f, 0x0007 },
3152		{ 0x1e, 0x00ac },
3153		{ 0x18, 0x0006 },
3154		{ 0x1f, 0x0002 },
3155		{ 0x1f, 0x0000 },
3156		{ 0x1f, 0x0000 },
3157
3158		/* Channel estimation fine tune */
3159		{ 0x1f, 0x0003 },
3160		{ 0x09, 0xa20f },
3161		{ 0x1f, 0x0000 },
3162		{ 0x1f, 0x0000 },
3163
3164		/* Green Setting */
3165		{ 0x1f, 0x0005 },
3166		{ 0x05, 0x8b5b },
3167		{ 0x06, 0x9222 },
3168		{ 0x05, 0x8b6d },
3169		{ 0x06, 0x8000 },
3170		{ 0x05, 0x8b76 },
3171		{ 0x06, 0x8000 },
3172		{ 0x1f, 0x0000 }
3173	};
3174
3175	rtl_apply_firmware(tp);
3176
3177	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3178
3179	/* For 4-corner performance improve */
3180	rtl_writephy(tp, 0x1f, 0x0005);
3181	rtl_writephy(tp, 0x05, 0x8b80);
3182	rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3183	rtl_writephy(tp, 0x1f, 0x0000);
3184
3185	/* PHY auto speed down */
3186	rtl_writephy(tp, 0x1f, 0x0004);
3187	rtl_writephy(tp, 0x1f, 0x0007);
3188	rtl_writephy(tp, 0x1e, 0x002d);
3189	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3190	rtl_writephy(tp, 0x1f, 0x0002);
3191	rtl_writephy(tp, 0x1f, 0x0000);
3192	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3193
3194	/* improve 10M EEE waveform */
3195	rtl_writephy(tp, 0x1f, 0x0005);
3196	rtl_writephy(tp, 0x05, 0x8b86);
3197	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3198	rtl_writephy(tp, 0x1f, 0x0000);
3199
3200	/* Improve 2-pair detection performance */
3201	rtl_writephy(tp, 0x1f, 0x0005);
3202	rtl_writephy(tp, 0x05, 0x8b85);
3203	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3204	rtl_writephy(tp, 0x1f, 0x0000);
3205
3206	/* EEE setting */
3207	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3208	rtl_writephy(tp, 0x1f, 0x0005);
3209	rtl_writephy(tp, 0x05, 0x8b85);
3210	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3211	rtl_writephy(tp, 0x1f, 0x0004);
3212	rtl_writephy(tp, 0x1f, 0x0007);
3213	rtl_writephy(tp, 0x1e, 0x0020);
3214	rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3215	rtl_writephy(tp, 0x1f, 0x0002);
3216	rtl_writephy(tp, 0x1f, 0x0000);
3217	rtl_writephy(tp, 0x0d, 0x0007);
3218	rtl_writephy(tp, 0x0e, 0x003c);
3219	rtl_writephy(tp, 0x0d, 0x4007);
3220	rtl_writephy(tp, 0x0e, 0x0000);
3221	rtl_writephy(tp, 0x0d, 0x0000);
3222
3223	/* Green feature */
3224	rtl_writephy(tp, 0x1f, 0x0003);
3225	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3226	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3227	rtl_writephy(tp, 0x1f, 0x0000);
3228
3229	/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3230	rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3231}
3232
3233static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3234{
3235	/* For 4-corner performance improve */
3236	rtl_writephy(tp, 0x1f, 0x0005);
3237	rtl_writephy(tp, 0x05, 0x8b80);
3238	rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3239	rtl_writephy(tp, 0x1f, 0x0000);
3240
3241	/* PHY auto speed down */
3242	rtl_writephy(tp, 0x1f, 0x0007);
3243	rtl_writephy(tp, 0x1e, 0x002d);
3244	rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3245	rtl_writephy(tp, 0x1f, 0x0000);
3246	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3247
3248	/* Improve 10M EEE waveform */
3249	rtl_writephy(tp, 0x1f, 0x0005);
3250	rtl_writephy(tp, 0x05, 0x8b86);
3251	rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3252	rtl_writephy(tp, 0x1f, 0x0000);
3253}
3254
3255static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3256{
3257	static const struct phy_reg phy_reg_init[] = {
3258		/* Channel estimation fine tune */
3259		{ 0x1f, 0x0003 },
3260		{ 0x09, 0xa20f },
3261		{ 0x1f, 0x0000 },
3262
3263		/* Modify green table for giga & fnet */
3264		{ 0x1f, 0x0005 },
3265		{ 0x05, 0x8b55 },
3266		{ 0x06, 0x0000 },
3267		{ 0x05, 0x8b5e },
3268		{ 0x06, 0x0000 },
3269		{ 0x05, 0x8b67 },
3270		{ 0x06, 0x0000 },
3271		{ 0x05, 0x8b70 },
3272		{ 0x06, 0x0000 },
3273		{ 0x1f, 0x0000 },
3274		{ 0x1f, 0x0007 },
3275		{ 0x1e, 0x0078 },
3276		{ 0x17, 0x0000 },
3277		{ 0x19, 0x00fb },
3278		{ 0x1f, 0x0000 },
3279
3280		/* Modify green table for 10M */
3281		{ 0x1f, 0x0005 },
3282		{ 0x05, 0x8b79 },
3283		{ 0x06, 0xaa00 },
3284		{ 0x1f, 0x0000 },
3285
3286		/* Disable hiimpedance detection (RTCT) */
3287		{ 0x1f, 0x0003 },
3288		{ 0x01, 0x328a },
3289		{ 0x1f, 0x0000 }
3290	};
3291
3292	rtl_apply_firmware(tp);
3293
3294	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3295
3296	rtl8168f_hw_phy_config(tp);
3297
3298	/* Improve 2-pair detection performance */
3299	rtl_writephy(tp, 0x1f, 0x0005);
3300	rtl_writephy(tp, 0x05, 0x8b85);
3301	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3302	rtl_writephy(tp, 0x1f, 0x0000);
3303}
3304
3305static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3306{
3307	rtl_apply_firmware(tp);
3308
3309	rtl8168f_hw_phy_config(tp);
3310}
3311
3312static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3313{
3314	static const struct phy_reg phy_reg_init[] = {
3315		/* Channel estimation fine tune */
3316		{ 0x1f, 0x0003 },
3317		{ 0x09, 0xa20f },
3318		{ 0x1f, 0x0000 },
3319
3320		/* Modify green table for giga & fnet */
3321		{ 0x1f, 0x0005 },
3322		{ 0x05, 0x8b55 },
3323		{ 0x06, 0x0000 },
3324		{ 0x05, 0x8b5e },
3325		{ 0x06, 0x0000 },
3326		{ 0x05, 0x8b67 },
3327		{ 0x06, 0x0000 },
3328		{ 0x05, 0x8b70 },
3329		{ 0x06, 0x0000 },
3330		{ 0x1f, 0x0000 },
3331		{ 0x1f, 0x0007 },
3332		{ 0x1e, 0x0078 },
3333		{ 0x17, 0x0000 },
3334		{ 0x19, 0x00aa },
3335		{ 0x1f, 0x0000 },
3336
3337		/* Modify green table for 10M */
3338		{ 0x1f, 0x0005 },
3339		{ 0x05, 0x8b79 },
3340		{ 0x06, 0xaa00 },
3341		{ 0x1f, 0x0000 },
3342
3343		/* Disable hiimpedance detection (RTCT) */
3344		{ 0x1f, 0x0003 },
3345		{ 0x01, 0x328a },
3346		{ 0x1f, 0x0000 }
3347	};
3348
3349
3350	rtl_apply_firmware(tp);
3351
3352	rtl8168f_hw_phy_config(tp);
3353
3354	/* Improve 2-pair detection performance */
3355	rtl_writephy(tp, 0x1f, 0x0005);
3356	rtl_writephy(tp, 0x05, 0x8b85);
3357	rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3358	rtl_writephy(tp, 0x1f, 0x0000);
3359
3360	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3361
3362	/* Modify green table for giga */
3363	rtl_writephy(tp, 0x1f, 0x0005);
3364	rtl_writephy(tp, 0x05, 0x8b54);
3365	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3366	rtl_writephy(tp, 0x05, 0x8b5d);
3367	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3368	rtl_writephy(tp, 0x05, 0x8a7c);
3369	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3370	rtl_writephy(tp, 0x05, 0x8a7f);
3371	rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3372	rtl_writephy(tp, 0x05, 0x8a82);
3373	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3374	rtl_writephy(tp, 0x05, 0x8a85);
3375	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3376	rtl_writephy(tp, 0x05, 0x8a88);
3377	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3378	rtl_writephy(tp, 0x1f, 0x0000);
3379
3380	/* uc same-seed solution */
3381	rtl_writephy(tp, 0x1f, 0x0005);
3382	rtl_writephy(tp, 0x05, 0x8b85);
3383	rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3384	rtl_writephy(tp, 0x1f, 0x0000);
3385
3386	/* eee setting */
3387	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3388	rtl_writephy(tp, 0x1f, 0x0005);
3389	rtl_writephy(tp, 0x05, 0x8b85);
3390	rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3391	rtl_writephy(tp, 0x1f, 0x0004);
3392	rtl_writephy(tp, 0x1f, 0x0007);
3393	rtl_writephy(tp, 0x1e, 0x0020);
3394	rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3395	rtl_writephy(tp, 0x1f, 0x0000);
3396	rtl_writephy(tp, 0x0d, 0x0007);
3397	rtl_writephy(tp, 0x0e, 0x003c);
3398	rtl_writephy(tp, 0x0d, 0x4007);
3399	rtl_writephy(tp, 0x0e, 0x0000);
3400	rtl_writephy(tp, 0x0d, 0x0000);
3401
3402	/* Green feature */
3403	rtl_writephy(tp, 0x1f, 0x0003);
3404	rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3405	rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3406	rtl_writephy(tp, 0x1f, 0x0000);
3407}
3408
3409static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3410{
3411	rtl_apply_firmware(tp);
3412
3413	rtl_writephy(tp, 0x1f, 0x0a46);
3414	if (rtl_readphy(tp, 0x10) & 0x0100) {
3415		rtl_writephy(tp, 0x1f, 0x0bcc);
3416		rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
3417	} else {
3418		rtl_writephy(tp, 0x1f, 0x0bcc);
3419		rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
3420	}
3421
3422	rtl_writephy(tp, 0x1f, 0x0a46);
3423	if (rtl_readphy(tp, 0x13) & 0x0100) {
3424		rtl_writephy(tp, 0x1f, 0x0c41);
3425		rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
3426	} else {
3427		rtl_writephy(tp, 0x1f, 0x0c41);
3428		rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
3429	}
3430
3431	/* Enable PHY auto speed down */
3432	rtl_writephy(tp, 0x1f, 0x0a44);
3433	rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
3434
3435	rtl_writephy(tp, 0x1f, 0x0bcc);
3436	rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
3437	rtl_writephy(tp, 0x1f, 0x0a44);
3438	rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
3439	rtl_writephy(tp, 0x1f, 0x0a43);
3440	rtl_writephy(tp, 0x13, 0x8084);
3441	rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
3442	rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
3443
3444	/* EEE auto-fallback function */
3445	rtl_writephy(tp, 0x1f, 0x0a4b);
3446	rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
3447
3448	/* Enable UC LPF tune function */
3449	rtl_writephy(tp, 0x1f, 0x0a43);
3450	rtl_writephy(tp, 0x13, 0x8012);
3451	rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3452
3453	rtl_writephy(tp, 0x1f, 0x0c42);
3454	rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
3455
3456	/* Improve SWR Efficiency */
3457	rtl_writephy(tp, 0x1f, 0x0bcd);
3458	rtl_writephy(tp, 0x14, 0x5065);
3459	rtl_writephy(tp, 0x14, 0xd065);
3460	rtl_writephy(tp, 0x1f, 0x0bc8);
3461	rtl_writephy(tp, 0x11, 0x5655);
3462	rtl_writephy(tp, 0x1f, 0x0bcd);
3463	rtl_writephy(tp, 0x14, 0x1065);
3464	rtl_writephy(tp, 0x14, 0x9065);
3465	rtl_writephy(tp, 0x14, 0x1065);
3466
3467	/* Check ALDPS bit, disable it if enabled */
3468	rtl_writephy(tp, 0x1f, 0x0a43);
3469	if (rtl_readphy(tp, 0x10) & 0x0004)
3470		rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
3471
3472	rtl_writephy(tp, 0x1f, 0x0000);
3473}
3474
3475static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3476{
3477	rtl_apply_firmware(tp);
3478}
3479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3480static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3481{
3482	static const struct phy_reg phy_reg_init[] = {
3483		{ 0x1f, 0x0003 },
3484		{ 0x08, 0x441d },
3485		{ 0x01, 0x9100 },
3486		{ 0x1f, 0x0000 }
3487	};
3488
3489	rtl_writephy(tp, 0x1f, 0x0000);
3490	rtl_patchphy(tp, 0x11, 1 << 12);
3491	rtl_patchphy(tp, 0x19, 1 << 13);
3492	rtl_patchphy(tp, 0x10, 1 << 15);
3493
3494	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3495}
3496
3497static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3498{
3499	static const struct phy_reg phy_reg_init[] = {
3500		{ 0x1f, 0x0005 },
3501		{ 0x1a, 0x0000 },
3502		{ 0x1f, 0x0000 },
3503
3504		{ 0x1f, 0x0004 },
3505		{ 0x1c, 0x0000 },
3506		{ 0x1f, 0x0000 },
3507
3508		{ 0x1f, 0x0001 },
3509		{ 0x15, 0x7701 },
3510		{ 0x1f, 0x0000 }
3511	};
3512
3513	/* Disable ALDPS before ram code */
3514	rtl_writephy(tp, 0x1f, 0x0000);
3515	rtl_writephy(tp, 0x18, 0x0310);
3516	msleep(100);
3517
3518	rtl_apply_firmware(tp);
3519
3520	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3521}
3522
3523static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3524{
3525	/* Disable ALDPS before setting firmware */
3526	rtl_writephy(tp, 0x1f, 0x0000);
3527	rtl_writephy(tp, 0x18, 0x0310);
3528	msleep(20);
3529
3530	rtl_apply_firmware(tp);
3531
3532	/* EEE setting */
3533	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3534	rtl_writephy(tp, 0x1f, 0x0004);
3535	rtl_writephy(tp, 0x10, 0x401f);
3536	rtl_writephy(tp, 0x19, 0x7030);
3537	rtl_writephy(tp, 0x1f, 0x0000);
3538}
3539
3540static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3541{
3542	static const struct phy_reg phy_reg_init[] = {
3543		{ 0x1f, 0x0004 },
3544		{ 0x10, 0xc07f },
3545		{ 0x19, 0x7030 },
3546		{ 0x1f, 0x0000 }
3547	};
3548
3549	/* Disable ALDPS before ram code */
3550	rtl_writephy(tp, 0x1f, 0x0000);
3551	rtl_writephy(tp, 0x18, 0x0310);
3552	msleep(100);
3553
3554	rtl_apply_firmware(tp);
3555
3556	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3557	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3558
3559	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3560}
3561
3562static void rtl_hw_phy_config(struct net_device *dev)
3563{
3564	struct rtl8169_private *tp = netdev_priv(dev);
3565
3566	rtl8169_print_mac_version(tp);
3567
3568	switch (tp->mac_version) {
3569	case RTL_GIGA_MAC_VER_01:
3570		break;
3571	case RTL_GIGA_MAC_VER_02:
3572	case RTL_GIGA_MAC_VER_03:
3573		rtl8169s_hw_phy_config(tp);
3574		break;
3575	case RTL_GIGA_MAC_VER_04:
3576		rtl8169sb_hw_phy_config(tp);
3577		break;
3578	case RTL_GIGA_MAC_VER_05:
3579		rtl8169scd_hw_phy_config(tp);
3580		break;
3581	case RTL_GIGA_MAC_VER_06:
3582		rtl8169sce_hw_phy_config(tp);
3583		break;
3584	case RTL_GIGA_MAC_VER_07:
3585	case RTL_GIGA_MAC_VER_08:
3586	case RTL_GIGA_MAC_VER_09:
3587		rtl8102e_hw_phy_config(tp);
3588		break;
3589	case RTL_GIGA_MAC_VER_11:
3590		rtl8168bb_hw_phy_config(tp);
3591		break;
3592	case RTL_GIGA_MAC_VER_12:
3593		rtl8168bef_hw_phy_config(tp);
3594		break;
3595	case RTL_GIGA_MAC_VER_17:
3596		rtl8168bef_hw_phy_config(tp);
3597		break;
3598	case RTL_GIGA_MAC_VER_18:
3599		rtl8168cp_1_hw_phy_config(tp);
3600		break;
3601	case RTL_GIGA_MAC_VER_19:
3602		rtl8168c_1_hw_phy_config(tp);
3603		break;
3604	case RTL_GIGA_MAC_VER_20:
3605		rtl8168c_2_hw_phy_config(tp);
3606		break;
3607	case RTL_GIGA_MAC_VER_21:
3608		rtl8168c_3_hw_phy_config(tp);
3609		break;
3610	case RTL_GIGA_MAC_VER_22:
3611		rtl8168c_4_hw_phy_config(tp);
3612		break;
3613	case RTL_GIGA_MAC_VER_23:
3614	case RTL_GIGA_MAC_VER_24:
3615		rtl8168cp_2_hw_phy_config(tp);
3616		break;
3617	case RTL_GIGA_MAC_VER_25:
3618		rtl8168d_1_hw_phy_config(tp);
3619		break;
3620	case RTL_GIGA_MAC_VER_26:
3621		rtl8168d_2_hw_phy_config(tp);
3622		break;
3623	case RTL_GIGA_MAC_VER_27:
3624		rtl8168d_3_hw_phy_config(tp);
3625		break;
3626	case RTL_GIGA_MAC_VER_28:
3627		rtl8168d_4_hw_phy_config(tp);
3628		break;
3629	case RTL_GIGA_MAC_VER_29:
3630	case RTL_GIGA_MAC_VER_30:
3631		rtl8105e_hw_phy_config(tp);
3632		break;
3633	case RTL_GIGA_MAC_VER_31:
3634		/* None. */
3635		break;
3636	case RTL_GIGA_MAC_VER_32:
3637	case RTL_GIGA_MAC_VER_33:
3638		rtl8168e_1_hw_phy_config(tp);
3639		break;
3640	case RTL_GIGA_MAC_VER_34:
3641		rtl8168e_2_hw_phy_config(tp);
3642		break;
3643	case RTL_GIGA_MAC_VER_35:
3644		rtl8168f_1_hw_phy_config(tp);
3645		break;
3646	case RTL_GIGA_MAC_VER_36:
3647		rtl8168f_2_hw_phy_config(tp);
3648		break;
3649
3650	case RTL_GIGA_MAC_VER_37:
3651		rtl8402_hw_phy_config(tp);
3652		break;
3653
3654	case RTL_GIGA_MAC_VER_38:
3655		rtl8411_hw_phy_config(tp);
3656		break;
3657
3658	case RTL_GIGA_MAC_VER_39:
3659		rtl8106e_hw_phy_config(tp);
3660		break;
3661
3662	case RTL_GIGA_MAC_VER_40:
3663		rtl8168g_1_hw_phy_config(tp);
3664		break;
3665	case RTL_GIGA_MAC_VER_42:
3666	case RTL_GIGA_MAC_VER_43:
3667	case RTL_GIGA_MAC_VER_44:
3668		rtl8168g_2_hw_phy_config(tp);
3669		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3670
3671	case RTL_GIGA_MAC_VER_41:
3672	default:
3673		break;
3674	}
3675}
3676
3677static void rtl_phy_work(struct rtl8169_private *tp)
3678{
3679	struct timer_list *timer = &tp->timer;
3680	void __iomem *ioaddr = tp->mmio_addr;
3681	unsigned long timeout = RTL8169_PHY_TIMEOUT;
3682
3683	assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3684
3685	if (tp->phy_reset_pending(tp)) {
3686		/*
3687		 * A busy loop could burn quite a few cycles on nowadays CPU.
3688		 * Let's delay the execution of the timer for a few ticks.
3689		 */
3690		timeout = HZ/10;
3691		goto out_mod_timer;
3692	}
3693
3694	if (tp->link_ok(ioaddr))
3695		return;
3696
3697	netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
3698
3699	tp->phy_reset_enable(tp);
3700
3701out_mod_timer:
3702	mod_timer(timer, jiffies + timeout);
3703}
3704
3705static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3706{
3707	if (!test_and_set_bit(flag, tp->wk.flags))
3708		schedule_work(&tp->wk.work);
3709}
3710
3711static void rtl8169_phy_timer(unsigned long __opaque)
3712{
3713	struct net_device *dev = (struct net_device *)__opaque;
3714	struct rtl8169_private *tp = netdev_priv(dev);
3715
3716	rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3717}
3718
3719static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3720				  void __iomem *ioaddr)
3721{
3722	iounmap(ioaddr);
3723	pci_release_regions(pdev);
3724	pci_clear_mwi(pdev);
3725	pci_disable_device(pdev);
3726	free_netdev(dev);
3727}
3728
3729DECLARE_RTL_COND(rtl_phy_reset_cond)
3730{
3731	return tp->phy_reset_pending(tp);
3732}
3733
3734static void rtl8169_phy_reset(struct net_device *dev,
3735			      struct rtl8169_private *tp)
3736{
3737	tp->phy_reset_enable(tp);
3738	rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3739}
3740
3741static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3742{
3743	void __iomem *ioaddr = tp->mmio_addr;
3744
3745	return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3746	    (RTL_R8(PHYstatus) & TBI_Enable);
3747}
3748
3749static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3750{
3751	void __iomem *ioaddr = tp->mmio_addr;
3752
3753	rtl_hw_phy_config(dev);
3754
3755	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3756		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3757		RTL_W8(0x82, 0x01);
3758	}
3759
3760	pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3761
3762	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3763		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3764
3765	if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3766		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3767		RTL_W8(0x82, 0x01);
3768		dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3769		rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3770	}
3771
3772	rtl8169_phy_reset(dev, tp);
3773
3774	rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3775			  ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3776			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3777			  (tp->mii.supports_gmii ?
3778			   ADVERTISED_1000baseT_Half |
3779			   ADVERTISED_1000baseT_Full : 0));
3780
3781	if (rtl_tbi_enabled(tp))
3782		netif_info(tp, link, dev, "TBI auto-negotiating\n");
3783}
3784
3785static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3786{
3787	void __iomem *ioaddr = tp->mmio_addr;
3788
3789	rtl_lock_work(tp);
3790
3791	RTL_W8(Cfg9346, Cfg9346_Unlock);
3792
3793	RTL_W32(MAC4, addr[4] | addr[5] << 8);
3794	RTL_R32(MAC4);
3795
3796	RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3797	RTL_R32(MAC0);
3798
3799	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3800		rtl_rar_exgmac_set(tp, addr);
3801
3802	RTL_W8(Cfg9346, Cfg9346_Lock);
3803
3804	rtl_unlock_work(tp);
3805}
3806
3807static int rtl_set_mac_address(struct net_device *dev, void *p)
3808{
3809	struct rtl8169_private *tp = netdev_priv(dev);
3810	struct sockaddr *addr = p;
3811
3812	if (!is_valid_ether_addr(addr->sa_data))
3813		return -EADDRNOTAVAIL;
3814
3815	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3816
3817	rtl_rar_set(tp, dev->dev_addr);
3818
3819	return 0;
3820}
3821
3822static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3823{
3824	struct rtl8169_private *tp = netdev_priv(dev);
3825	struct mii_ioctl_data *data = if_mii(ifr);
3826
3827	return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3828}
3829
3830static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3831			  struct mii_ioctl_data *data, int cmd)
3832{
3833	switch (cmd) {
3834	case SIOCGMIIPHY:
3835		data->phy_id = 32; /* Internal PHY */
3836		return 0;
3837
3838	case SIOCGMIIREG:
3839		data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3840		return 0;
3841
3842	case SIOCSMIIREG:
3843		rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3844		return 0;
3845	}
3846	return -EOPNOTSUPP;
3847}
3848
3849static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3850{
3851	return -EOPNOTSUPP;
3852}
3853
3854static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3855{
3856	if (tp->features & RTL_FEATURE_MSI) {
3857		pci_disable_msi(pdev);
3858		tp->features &= ~RTL_FEATURE_MSI;
3859	}
3860}
3861
3862static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3863{
3864	struct mdio_ops *ops = &tp->mdio_ops;
3865
3866	switch (tp->mac_version) {
3867	case RTL_GIGA_MAC_VER_27:
3868		ops->write	= r8168dp_1_mdio_write;
3869		ops->read	= r8168dp_1_mdio_read;
3870		break;
3871	case RTL_GIGA_MAC_VER_28:
3872	case RTL_GIGA_MAC_VER_31:
3873		ops->write	= r8168dp_2_mdio_write;
3874		ops->read	= r8168dp_2_mdio_read;
3875		break;
3876	case RTL_GIGA_MAC_VER_40:
3877	case RTL_GIGA_MAC_VER_41:
3878	case RTL_GIGA_MAC_VER_42:
3879	case RTL_GIGA_MAC_VER_43:
3880	case RTL_GIGA_MAC_VER_44:
 
 
 
 
 
 
 
3881		ops->write	= r8168g_mdio_write;
3882		ops->read	= r8168g_mdio_read;
3883		break;
3884	default:
3885		ops->write	= r8169_mdio_write;
3886		ops->read	= r8169_mdio_read;
3887		break;
3888	}
3889}
3890
3891static void rtl_speed_down(struct rtl8169_private *tp)
3892{
3893	u32 adv;
3894	int lpa;
3895
3896	rtl_writephy(tp, 0x1f, 0x0000);
3897	lpa = rtl_readphy(tp, MII_LPA);
3898
3899	if (lpa & (LPA_10HALF | LPA_10FULL))
3900		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
3901	else if (lpa & (LPA_100HALF | LPA_100FULL))
3902		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3903		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3904	else
3905		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3906		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3907		      (tp->mii.supports_gmii ?
3908		       ADVERTISED_1000baseT_Half |
3909		       ADVERTISED_1000baseT_Full : 0);
3910
3911	rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3912			  adv);
3913}
3914
3915static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3916{
3917	void __iomem *ioaddr = tp->mmio_addr;
3918
3919	switch (tp->mac_version) {
3920	case RTL_GIGA_MAC_VER_25:
3921	case RTL_GIGA_MAC_VER_26:
3922	case RTL_GIGA_MAC_VER_29:
3923	case RTL_GIGA_MAC_VER_30:
3924	case RTL_GIGA_MAC_VER_32:
3925	case RTL_GIGA_MAC_VER_33:
3926	case RTL_GIGA_MAC_VER_34:
3927	case RTL_GIGA_MAC_VER_37:
3928	case RTL_GIGA_MAC_VER_38:
3929	case RTL_GIGA_MAC_VER_39:
3930	case RTL_GIGA_MAC_VER_40:
3931	case RTL_GIGA_MAC_VER_41:
3932	case RTL_GIGA_MAC_VER_42:
3933	case RTL_GIGA_MAC_VER_43:
3934	case RTL_GIGA_MAC_VER_44:
 
 
 
 
 
 
 
3935		RTL_W32(RxConfig, RTL_R32(RxConfig) |
3936			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3937		break;
3938	default:
3939		break;
3940	}
3941}
3942
3943static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3944{
3945	if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3946		return false;
3947
3948	rtl_speed_down(tp);
3949	rtl_wol_suspend_quirk(tp);
3950
3951	return true;
3952}
3953
3954static void r810x_phy_power_down(struct rtl8169_private *tp)
3955{
3956	rtl_writephy(tp, 0x1f, 0x0000);
3957	rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3958}
3959
3960static void r810x_phy_power_up(struct rtl8169_private *tp)
3961{
3962	rtl_writephy(tp, 0x1f, 0x0000);
3963	rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3964}
3965
3966static void r810x_pll_power_down(struct rtl8169_private *tp)
3967{
3968	void __iomem *ioaddr = tp->mmio_addr;
3969
3970	if (rtl_wol_pll_power_down(tp))
3971		return;
3972
3973	r810x_phy_power_down(tp);
3974
3975	switch (tp->mac_version) {
3976	case RTL_GIGA_MAC_VER_07:
3977	case RTL_GIGA_MAC_VER_08:
3978	case RTL_GIGA_MAC_VER_09:
3979	case RTL_GIGA_MAC_VER_10:
3980	case RTL_GIGA_MAC_VER_13:
3981	case RTL_GIGA_MAC_VER_16:
3982		break;
3983	default:
3984		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3985		break;
3986	}
3987}
3988
3989static void r810x_pll_power_up(struct rtl8169_private *tp)
3990{
3991	void __iomem *ioaddr = tp->mmio_addr;
3992
3993	r810x_phy_power_up(tp);
3994
3995	switch (tp->mac_version) {
3996	case RTL_GIGA_MAC_VER_07:
3997	case RTL_GIGA_MAC_VER_08:
3998	case RTL_GIGA_MAC_VER_09:
3999	case RTL_GIGA_MAC_VER_10:
4000	case RTL_GIGA_MAC_VER_13:
4001	case RTL_GIGA_MAC_VER_16:
4002		break;
 
 
 
 
4003	default:
4004		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4005		break;
4006	}
4007}
4008
4009static void r8168_phy_power_up(struct rtl8169_private *tp)
4010{
4011	rtl_writephy(tp, 0x1f, 0x0000);
4012	switch (tp->mac_version) {
4013	case RTL_GIGA_MAC_VER_11:
4014	case RTL_GIGA_MAC_VER_12:
4015	case RTL_GIGA_MAC_VER_17:
4016	case RTL_GIGA_MAC_VER_18:
4017	case RTL_GIGA_MAC_VER_19:
4018	case RTL_GIGA_MAC_VER_20:
4019	case RTL_GIGA_MAC_VER_21:
4020	case RTL_GIGA_MAC_VER_22:
4021	case RTL_GIGA_MAC_VER_23:
4022	case RTL_GIGA_MAC_VER_24:
4023	case RTL_GIGA_MAC_VER_25:
4024	case RTL_GIGA_MAC_VER_26:
4025	case RTL_GIGA_MAC_VER_27:
4026	case RTL_GIGA_MAC_VER_28:
4027	case RTL_GIGA_MAC_VER_31:
4028		rtl_writephy(tp, 0x0e, 0x0000);
4029		break;
4030	default:
4031		break;
4032	}
4033	rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4034}
4035
4036static void r8168_phy_power_down(struct rtl8169_private *tp)
4037{
4038	rtl_writephy(tp, 0x1f, 0x0000);
4039	switch (tp->mac_version) {
4040	case RTL_GIGA_MAC_VER_32:
4041	case RTL_GIGA_MAC_VER_33:
4042	case RTL_GIGA_MAC_VER_40:
4043	case RTL_GIGA_MAC_VER_41:
4044		rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
4045		break;
4046
4047	case RTL_GIGA_MAC_VER_11:
4048	case RTL_GIGA_MAC_VER_12:
4049	case RTL_GIGA_MAC_VER_17:
4050	case RTL_GIGA_MAC_VER_18:
4051	case RTL_GIGA_MAC_VER_19:
4052	case RTL_GIGA_MAC_VER_20:
4053	case RTL_GIGA_MAC_VER_21:
4054	case RTL_GIGA_MAC_VER_22:
4055	case RTL_GIGA_MAC_VER_23:
4056	case RTL_GIGA_MAC_VER_24:
4057	case RTL_GIGA_MAC_VER_25:
4058	case RTL_GIGA_MAC_VER_26:
4059	case RTL_GIGA_MAC_VER_27:
4060	case RTL_GIGA_MAC_VER_28:
4061	case RTL_GIGA_MAC_VER_31:
4062		rtl_writephy(tp, 0x0e, 0x0200);
4063	default:
4064		rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4065		break;
4066	}
4067}
4068
4069static void r8168_pll_power_down(struct rtl8169_private *tp)
4070{
4071	void __iomem *ioaddr = tp->mmio_addr;
4072
4073	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4074	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4075	     tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4076	    r8168dp_check_dash(tp)) {
 
 
 
4077		return;
4078	}
4079
4080	if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4081	     tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4082	    (RTL_R16(CPlusCmd) & ASF)) {
4083		return;
4084	}
4085
4086	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4087	    tp->mac_version == RTL_GIGA_MAC_VER_33)
4088		rtl_ephy_write(tp, 0x19, 0xff64);
4089
4090	if (rtl_wol_pll_power_down(tp))
4091		return;
4092
4093	r8168_phy_power_down(tp);
4094
4095	switch (tp->mac_version) {
4096	case RTL_GIGA_MAC_VER_25:
4097	case RTL_GIGA_MAC_VER_26:
4098	case RTL_GIGA_MAC_VER_27:
4099	case RTL_GIGA_MAC_VER_28:
4100	case RTL_GIGA_MAC_VER_31:
4101	case RTL_GIGA_MAC_VER_32:
4102	case RTL_GIGA_MAC_VER_33:
 
 
 
 
 
4103		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4104		break;
4105	case RTL_GIGA_MAC_VER_40:
4106	case RTL_GIGA_MAC_VER_41:
4107		rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
 
4108			     0xfc000000, ERIAR_EXGMAC);
 
4109		break;
4110	}
4111}
4112
4113static void r8168_pll_power_up(struct rtl8169_private *tp)
4114{
4115	void __iomem *ioaddr = tp->mmio_addr;
4116
4117	switch (tp->mac_version) {
4118	case RTL_GIGA_MAC_VER_25:
4119	case RTL_GIGA_MAC_VER_26:
4120	case RTL_GIGA_MAC_VER_27:
4121	case RTL_GIGA_MAC_VER_28:
4122	case RTL_GIGA_MAC_VER_31:
4123	case RTL_GIGA_MAC_VER_32:
4124	case RTL_GIGA_MAC_VER_33:
4125		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4126		break;
 
 
 
 
 
 
 
4127	case RTL_GIGA_MAC_VER_40:
4128	case RTL_GIGA_MAC_VER_41:
4129		rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
 
 
4130			     0x00000000, ERIAR_EXGMAC);
4131		break;
4132	}
4133
4134	r8168_phy_power_up(tp);
4135}
4136
4137static void rtl_generic_op(struct rtl8169_private *tp,
4138			   void (*op)(struct rtl8169_private *))
4139{
4140	if (op)
4141		op(tp);
4142}
4143
4144static void rtl_pll_power_down(struct rtl8169_private *tp)
4145{
4146	rtl_generic_op(tp, tp->pll_power_ops.down);
4147}
4148
4149static void rtl_pll_power_up(struct rtl8169_private *tp)
4150{
4151	rtl_generic_op(tp, tp->pll_power_ops.up);
4152}
4153
4154static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4155{
4156	struct pll_power_ops *ops = &tp->pll_power_ops;
4157
4158	switch (tp->mac_version) {
4159	case RTL_GIGA_MAC_VER_07:
4160	case RTL_GIGA_MAC_VER_08:
4161	case RTL_GIGA_MAC_VER_09:
4162	case RTL_GIGA_MAC_VER_10:
4163	case RTL_GIGA_MAC_VER_16:
4164	case RTL_GIGA_MAC_VER_29:
4165	case RTL_GIGA_MAC_VER_30:
4166	case RTL_GIGA_MAC_VER_37:
4167	case RTL_GIGA_MAC_VER_39:
4168	case RTL_GIGA_MAC_VER_43:
 
 
4169		ops->down	= r810x_pll_power_down;
4170		ops->up		= r810x_pll_power_up;
4171		break;
4172
4173	case RTL_GIGA_MAC_VER_11:
4174	case RTL_GIGA_MAC_VER_12:
4175	case RTL_GIGA_MAC_VER_17:
4176	case RTL_GIGA_MAC_VER_18:
4177	case RTL_GIGA_MAC_VER_19:
4178	case RTL_GIGA_MAC_VER_20:
4179	case RTL_GIGA_MAC_VER_21:
4180	case RTL_GIGA_MAC_VER_22:
4181	case RTL_GIGA_MAC_VER_23:
4182	case RTL_GIGA_MAC_VER_24:
4183	case RTL_GIGA_MAC_VER_25:
4184	case RTL_GIGA_MAC_VER_26:
4185	case RTL_GIGA_MAC_VER_27:
4186	case RTL_GIGA_MAC_VER_28:
4187	case RTL_GIGA_MAC_VER_31:
4188	case RTL_GIGA_MAC_VER_32:
4189	case RTL_GIGA_MAC_VER_33:
4190	case RTL_GIGA_MAC_VER_34:
4191	case RTL_GIGA_MAC_VER_35:
4192	case RTL_GIGA_MAC_VER_36:
4193	case RTL_GIGA_MAC_VER_38:
4194	case RTL_GIGA_MAC_VER_40:
4195	case RTL_GIGA_MAC_VER_41:
4196	case RTL_GIGA_MAC_VER_42:
4197	case RTL_GIGA_MAC_VER_44:
 
 
 
 
 
4198		ops->down	= r8168_pll_power_down;
4199		ops->up		= r8168_pll_power_up;
4200		break;
4201
4202	default:
4203		ops->down	= NULL;
4204		ops->up		= NULL;
4205		break;
4206	}
4207}
4208
4209static void rtl_init_rxcfg(struct rtl8169_private *tp)
4210{
4211	void __iomem *ioaddr = tp->mmio_addr;
4212
4213	switch (tp->mac_version) {
4214	case RTL_GIGA_MAC_VER_01:
4215	case RTL_GIGA_MAC_VER_02:
4216	case RTL_GIGA_MAC_VER_03:
4217	case RTL_GIGA_MAC_VER_04:
4218	case RTL_GIGA_MAC_VER_05:
4219	case RTL_GIGA_MAC_VER_06:
4220	case RTL_GIGA_MAC_VER_10:
4221	case RTL_GIGA_MAC_VER_11:
4222	case RTL_GIGA_MAC_VER_12:
4223	case RTL_GIGA_MAC_VER_13:
4224	case RTL_GIGA_MAC_VER_14:
4225	case RTL_GIGA_MAC_VER_15:
4226	case RTL_GIGA_MAC_VER_16:
4227	case RTL_GIGA_MAC_VER_17:
4228		RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4229		break;
4230	case RTL_GIGA_MAC_VER_18:
4231	case RTL_GIGA_MAC_VER_19:
4232	case RTL_GIGA_MAC_VER_20:
4233	case RTL_GIGA_MAC_VER_21:
4234	case RTL_GIGA_MAC_VER_22:
4235	case RTL_GIGA_MAC_VER_23:
4236	case RTL_GIGA_MAC_VER_24:
4237	case RTL_GIGA_MAC_VER_34:
4238	case RTL_GIGA_MAC_VER_35:
4239		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4240		break;
4241	case RTL_GIGA_MAC_VER_40:
4242	case RTL_GIGA_MAC_VER_41:
4243	case RTL_GIGA_MAC_VER_42:
4244	case RTL_GIGA_MAC_VER_43:
4245	case RTL_GIGA_MAC_VER_44:
4246		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
 
 
 
 
 
 
 
4247		break;
4248	default:
4249		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4250		break;
4251	}
4252}
4253
4254static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4255{
4256	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4257}
4258
4259static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4260{
4261	void __iomem *ioaddr = tp->mmio_addr;
4262
4263	RTL_W8(Cfg9346, Cfg9346_Unlock);
4264	rtl_generic_op(tp, tp->jumbo_ops.enable);
4265	RTL_W8(Cfg9346, Cfg9346_Lock);
4266}
4267
4268static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4269{
4270	void __iomem *ioaddr = tp->mmio_addr;
4271
4272	RTL_W8(Cfg9346, Cfg9346_Unlock);
4273	rtl_generic_op(tp, tp->jumbo_ops.disable);
4274	RTL_W8(Cfg9346, Cfg9346_Lock);
4275}
4276
4277static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4278{
4279	void __iomem *ioaddr = tp->mmio_addr;
4280
4281	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4282	RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4283	rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4284}
4285
4286static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4287{
4288	void __iomem *ioaddr = tp->mmio_addr;
4289
4290	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4291	RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4292	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4293}
4294
4295static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4296{
4297	void __iomem *ioaddr = tp->mmio_addr;
4298
4299	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4300}
4301
4302static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4303{
4304	void __iomem *ioaddr = tp->mmio_addr;
4305
4306	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4307}
4308
4309static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4310{
4311	void __iomem *ioaddr = tp->mmio_addr;
4312
4313	RTL_W8(MaxTxPacketSize, 0x3f);
4314	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4315	RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4316	rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4317}
4318
4319static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4320{
4321	void __iomem *ioaddr = tp->mmio_addr;
4322
4323	RTL_W8(MaxTxPacketSize, 0x0c);
4324	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4325	RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4326	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4327}
4328
4329static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4330{
4331	rtl_tx_performance_tweak(tp->pci_dev,
4332		(0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4333}
4334
4335static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4336{
4337	rtl_tx_performance_tweak(tp->pci_dev,
4338		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4339}
4340
4341static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4342{
4343	void __iomem *ioaddr = tp->mmio_addr;
4344
4345	r8168b_0_hw_jumbo_enable(tp);
4346
4347	RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4348}
4349
4350static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4351{
4352	void __iomem *ioaddr = tp->mmio_addr;
4353
4354	r8168b_0_hw_jumbo_disable(tp);
4355
4356	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4357}
4358
4359static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4360{
4361	struct jumbo_ops *ops = &tp->jumbo_ops;
4362
4363	switch (tp->mac_version) {
4364	case RTL_GIGA_MAC_VER_11:
4365		ops->disable	= r8168b_0_hw_jumbo_disable;
4366		ops->enable	= r8168b_0_hw_jumbo_enable;
4367		break;
4368	case RTL_GIGA_MAC_VER_12:
4369	case RTL_GIGA_MAC_VER_17:
4370		ops->disable	= r8168b_1_hw_jumbo_disable;
4371		ops->enable	= r8168b_1_hw_jumbo_enable;
4372		break;
4373	case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4374	case RTL_GIGA_MAC_VER_19:
4375	case RTL_GIGA_MAC_VER_20:
4376	case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4377	case RTL_GIGA_MAC_VER_22:
4378	case RTL_GIGA_MAC_VER_23:
4379	case RTL_GIGA_MAC_VER_24:
4380	case RTL_GIGA_MAC_VER_25:
4381	case RTL_GIGA_MAC_VER_26:
4382		ops->disable	= r8168c_hw_jumbo_disable;
4383		ops->enable	= r8168c_hw_jumbo_enable;
4384		break;
4385	case RTL_GIGA_MAC_VER_27:
4386	case RTL_GIGA_MAC_VER_28:
4387		ops->disable	= r8168dp_hw_jumbo_disable;
4388		ops->enable	= r8168dp_hw_jumbo_enable;
4389		break;
4390	case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4391	case RTL_GIGA_MAC_VER_32:
4392	case RTL_GIGA_MAC_VER_33:
4393	case RTL_GIGA_MAC_VER_34:
4394		ops->disable	= r8168e_hw_jumbo_disable;
4395		ops->enable	= r8168e_hw_jumbo_enable;
4396		break;
4397
4398	/*
4399	 * No action needed for jumbo frames with 8169.
4400	 * No jumbo for 810x at all.
4401	 */
4402	case RTL_GIGA_MAC_VER_40:
4403	case RTL_GIGA_MAC_VER_41:
4404	case RTL_GIGA_MAC_VER_42:
4405	case RTL_GIGA_MAC_VER_43:
4406	case RTL_GIGA_MAC_VER_44:
 
 
 
 
 
 
 
4407	default:
4408		ops->disable	= NULL;
4409		ops->enable	= NULL;
4410		break;
4411	}
4412}
4413
4414DECLARE_RTL_COND(rtl_chipcmd_cond)
4415{
4416	void __iomem *ioaddr = tp->mmio_addr;
4417
4418	return RTL_R8(ChipCmd) & CmdReset;
4419}
4420
4421static void rtl_hw_reset(struct rtl8169_private *tp)
4422{
4423	void __iomem *ioaddr = tp->mmio_addr;
4424
4425	RTL_W8(ChipCmd, CmdReset);
4426
4427	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4428}
4429
4430static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4431{
4432	struct rtl_fw *rtl_fw;
4433	const char *name;
4434	int rc = -ENOMEM;
4435
4436	name = rtl_lookup_firmware_name(tp);
4437	if (!name)
4438		goto out_no_firmware;
4439
4440	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4441	if (!rtl_fw)
4442		goto err_warn;
4443
4444	rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4445	if (rc < 0)
4446		goto err_free;
4447
4448	rc = rtl_check_firmware(tp, rtl_fw);
4449	if (rc < 0)
4450		goto err_release_firmware;
4451
4452	tp->rtl_fw = rtl_fw;
4453out:
4454	return;
4455
4456err_release_firmware:
4457	release_firmware(rtl_fw->fw);
4458err_free:
4459	kfree(rtl_fw);
4460err_warn:
4461	netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4462		   name, rc);
4463out_no_firmware:
4464	tp->rtl_fw = NULL;
4465	goto out;
4466}
4467
4468static void rtl_request_firmware(struct rtl8169_private *tp)
4469{
4470	if (IS_ERR(tp->rtl_fw))
4471		rtl_request_uncached_firmware(tp);
4472}
4473
4474static void rtl_rx_close(struct rtl8169_private *tp)
4475{
4476	void __iomem *ioaddr = tp->mmio_addr;
4477
4478	RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4479}
4480
4481DECLARE_RTL_COND(rtl_npq_cond)
4482{
4483	void __iomem *ioaddr = tp->mmio_addr;
4484
4485	return RTL_R8(TxPoll) & NPQ;
4486}
4487
4488DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4489{
4490	void __iomem *ioaddr = tp->mmio_addr;
4491
4492	return RTL_R32(TxConfig) & TXCFG_EMPTY;
4493}
4494
4495static void rtl8169_hw_reset(struct rtl8169_private *tp)
4496{
4497	void __iomem *ioaddr = tp->mmio_addr;
4498
4499	/* Disable interrupts */
4500	rtl8169_irq_mask_and_ack(tp);
4501
4502	rtl_rx_close(tp);
4503
4504	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4505	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4506	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
4507		rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4508	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4509	           tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4510	           tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4511	           tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4512	           tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4513	           tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4514	           tp->mac_version == RTL_GIGA_MAC_VER_42 ||
4515	           tp->mac_version == RTL_GIGA_MAC_VER_43 ||
4516	           tp->mac_version == RTL_GIGA_MAC_VER_44 ||
4517	           tp->mac_version == RTL_GIGA_MAC_VER_38) {
 
 
 
 
 
 
 
4518		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4519		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4520	} else {
4521		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4522		udelay(100);
4523	}
4524
4525	rtl_hw_reset(tp);
4526}
4527
4528static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4529{
4530	void __iomem *ioaddr = tp->mmio_addr;
4531
4532	/* Set DMA burst size and Interframe Gap Time */
4533	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4534		(InterFrameGap << TxInterFrameGapShift));
4535}
4536
4537static void rtl_hw_start(struct net_device *dev)
4538{
4539	struct rtl8169_private *tp = netdev_priv(dev);
4540
4541	tp->hw_start(dev);
4542
4543	rtl_irq_enable_all(tp);
4544}
4545
4546static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4547					 void __iomem *ioaddr)
4548{
4549	/*
4550	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4551	 * register to be written before TxDescAddrLow to work.
4552	 * Switching from MMIO to I/O access fixes the issue as well.
4553	 */
4554	RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4555	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4556	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4557	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4558}
4559
4560static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4561{
4562	u16 cmd;
4563
4564	cmd = RTL_R16(CPlusCmd);
4565	RTL_W16(CPlusCmd, cmd);
4566	return cmd;
4567}
4568
4569static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4570{
4571	/* Low hurts. Let's disable the filtering. */
4572	RTL_W16(RxMaxSize, rx_buf_sz + 1);
4573}
4574
4575static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4576{
4577	static const struct rtl_cfg2_info {
4578		u32 mac_version;
4579		u32 clk;
4580		u32 val;
4581	} cfg2_info [] = {
4582		{ RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4583		{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4584		{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4585		{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4586	};
4587	const struct rtl_cfg2_info *p = cfg2_info;
4588	unsigned int i;
4589	u32 clk;
4590
4591	clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4592	for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4593		if ((p->mac_version == mac_version) && (p->clk == clk)) {
4594			RTL_W32(0x7c, p->val);
4595			break;
4596		}
4597	}
4598}
4599
4600static void rtl_set_rx_mode(struct net_device *dev)
4601{
4602	struct rtl8169_private *tp = netdev_priv(dev);
4603	void __iomem *ioaddr = tp->mmio_addr;
4604	u32 mc_filter[2];	/* Multicast hash filter */
4605	int rx_mode;
4606	u32 tmp = 0;
4607
4608	if (dev->flags & IFF_PROMISC) {
4609		/* Unconditionally log net taps. */
4610		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4611		rx_mode =
4612		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4613		    AcceptAllPhys;
4614		mc_filter[1] = mc_filter[0] = 0xffffffff;
4615	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4616		   (dev->flags & IFF_ALLMULTI)) {
4617		/* Too many to filter perfectly -- accept all multicasts. */
4618		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4619		mc_filter[1] = mc_filter[0] = 0xffffffff;
4620	} else {
4621		struct netdev_hw_addr *ha;
4622
4623		rx_mode = AcceptBroadcast | AcceptMyPhys;
4624		mc_filter[1] = mc_filter[0] = 0;
4625		netdev_for_each_mc_addr(ha, dev) {
4626			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4627			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4628			rx_mode |= AcceptMulticast;
4629		}
4630	}
4631
4632	if (dev->features & NETIF_F_RXALL)
4633		rx_mode |= (AcceptErr | AcceptRunt);
4634
4635	tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4636
4637	if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4638		u32 data = mc_filter[0];
4639
4640		mc_filter[0] = swab32(mc_filter[1]);
4641		mc_filter[1] = swab32(data);
4642	}
4643
4644	if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4645		mc_filter[1] = mc_filter[0] = 0xffffffff;
4646
4647	RTL_W32(MAR0 + 4, mc_filter[1]);
4648	RTL_W32(MAR0 + 0, mc_filter[0]);
4649
4650	RTL_W32(RxConfig, tmp);
4651}
4652
4653static void rtl_hw_start_8169(struct net_device *dev)
4654{
4655	struct rtl8169_private *tp = netdev_priv(dev);
4656	void __iomem *ioaddr = tp->mmio_addr;
4657	struct pci_dev *pdev = tp->pci_dev;
4658
4659	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4660		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4661		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4662	}
4663
4664	RTL_W8(Cfg9346, Cfg9346_Unlock);
4665	if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4666	    tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4667	    tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4668	    tp->mac_version == RTL_GIGA_MAC_VER_04)
4669		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4670
4671	rtl_init_rxcfg(tp);
4672
4673	RTL_W8(EarlyTxThres, NoEarlyTx);
4674
4675	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4676
4677	if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4678	    tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4679	    tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4680	    tp->mac_version == RTL_GIGA_MAC_VER_04)
4681		rtl_set_rx_tx_config_registers(tp);
4682
4683	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4684
4685	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4686	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
4687		dprintk("Set MAC Reg C+CR Offset 0xE0. "
4688			"Bit-3 and bit-14 MUST be 1\n");
4689		tp->cp_cmd |= (1 << 14);
4690	}
4691
4692	RTL_W16(CPlusCmd, tp->cp_cmd);
4693
4694	rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4695
4696	/*
4697	 * Undocumented corner. Supposedly:
4698	 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4699	 */
4700	RTL_W16(IntrMitigate, 0x0000);
4701
4702	rtl_set_rx_tx_desc_registers(tp, ioaddr);
4703
4704	if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4705	    tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4706	    tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4707	    tp->mac_version != RTL_GIGA_MAC_VER_04) {
4708		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4709		rtl_set_rx_tx_config_registers(tp);
4710	}
4711
4712	RTL_W8(Cfg9346, Cfg9346_Lock);
4713
4714	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4715	RTL_R8(IntrMask);
4716
4717	RTL_W32(RxMissed, 0);
4718
4719	rtl_set_rx_mode(dev);
4720
4721	/* no early-rx interrupts */
4722	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4723}
4724
4725static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4726{
4727	if (tp->csi_ops.write)
4728		tp->csi_ops.write(tp, addr, value);
4729}
4730
4731static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4732{
4733	return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4734}
4735
4736static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4737{
4738	u32 csi;
4739
4740	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4741	rtl_csi_write(tp, 0x070c, csi | bits);
4742}
4743
4744static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4745{
4746	rtl_csi_access_enable(tp, 0x17000000);
4747}
4748
4749static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4750{
4751	rtl_csi_access_enable(tp, 0x27000000);
4752}
4753
4754DECLARE_RTL_COND(rtl_csiar_cond)
4755{
4756	void __iomem *ioaddr = tp->mmio_addr;
4757
4758	return RTL_R32(CSIAR) & CSIAR_FLAG;
4759}
4760
4761static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4762{
4763	void __iomem *ioaddr = tp->mmio_addr;
4764
4765	RTL_W32(CSIDR, value);
4766	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4767		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4768
4769	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4770}
4771
4772static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4773{
4774	void __iomem *ioaddr = tp->mmio_addr;
4775
4776	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4777		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4778
4779	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4780		RTL_R32(CSIDR) : ~0;
4781}
4782
4783static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4784{
4785	void __iomem *ioaddr = tp->mmio_addr;
4786
4787	RTL_W32(CSIDR, value);
4788	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4789		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4790		CSIAR_FUNC_NIC);
4791
4792	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4793}
4794
4795static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4796{
4797	void __iomem *ioaddr = tp->mmio_addr;
4798
4799	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4800		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4801
4802	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4803		RTL_R32(CSIDR) : ~0;
4804}
4805
4806static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
4807{
4808	void __iomem *ioaddr = tp->mmio_addr;
4809
4810	RTL_W32(CSIDR, value);
4811	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4812		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4813		CSIAR_FUNC_NIC2);
4814
4815	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4816}
4817
4818static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
4819{
4820	void __iomem *ioaddr = tp->mmio_addr;
4821
4822	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
4823		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4824
4825	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4826		RTL_R32(CSIDR) : ~0;
4827}
4828
4829static void rtl_init_csi_ops(struct rtl8169_private *tp)
4830{
4831	struct csi_ops *ops = &tp->csi_ops;
4832
4833	switch (tp->mac_version) {
4834	case RTL_GIGA_MAC_VER_01:
4835	case RTL_GIGA_MAC_VER_02:
4836	case RTL_GIGA_MAC_VER_03:
4837	case RTL_GIGA_MAC_VER_04:
4838	case RTL_GIGA_MAC_VER_05:
4839	case RTL_GIGA_MAC_VER_06:
4840	case RTL_GIGA_MAC_VER_10:
4841	case RTL_GIGA_MAC_VER_11:
4842	case RTL_GIGA_MAC_VER_12:
4843	case RTL_GIGA_MAC_VER_13:
4844	case RTL_GIGA_MAC_VER_14:
4845	case RTL_GIGA_MAC_VER_15:
4846	case RTL_GIGA_MAC_VER_16:
4847	case RTL_GIGA_MAC_VER_17:
4848		ops->write	= NULL;
4849		ops->read	= NULL;
4850		break;
4851
4852	case RTL_GIGA_MAC_VER_37:
4853	case RTL_GIGA_MAC_VER_38:
4854		ops->write	= r8402_csi_write;
4855		ops->read	= r8402_csi_read;
4856		break;
4857
4858	case RTL_GIGA_MAC_VER_44:
4859		ops->write	= r8411_csi_write;
4860		ops->read	= r8411_csi_read;
4861		break;
4862
4863	default:
4864		ops->write	= r8169_csi_write;
4865		ops->read	= r8169_csi_read;
4866		break;
4867	}
4868}
4869
4870struct ephy_info {
4871	unsigned int offset;
4872	u16 mask;
4873	u16 bits;
4874};
4875
4876static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4877			  int len)
4878{
4879	u16 w;
4880
4881	while (len-- > 0) {
4882		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4883		rtl_ephy_write(tp, e->offset, w);
4884		e++;
4885	}
4886}
4887
4888static void rtl_disable_clock_request(struct pci_dev *pdev)
4889{
4890	pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4891				   PCI_EXP_LNKCTL_CLKREQ_EN);
4892}
4893
4894static void rtl_enable_clock_request(struct pci_dev *pdev)
4895{
4896	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4897				 PCI_EXP_LNKCTL_CLKREQ_EN);
4898}
4899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4900#define R8168_CPCMD_QUIRK_MASK (\
4901	EnableBist | \
4902	Mac_dbgo_oe | \
4903	Force_half_dup | \
4904	Force_rxflow_en | \
4905	Force_txflow_en | \
4906	Cxpl_dbg_sel | \
4907	ASF | \
4908	PktCntrDisable | \
4909	Mac_dbgo_sel)
4910
4911static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4912{
4913	void __iomem *ioaddr = tp->mmio_addr;
4914	struct pci_dev *pdev = tp->pci_dev;
4915
4916	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4917
4918	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4919
4920	if (tp->dev->mtu <= ETH_DATA_LEN) {
4921		rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
4922					 PCI_EXP_DEVCTL_NOSNOOP_EN);
4923	}
4924}
4925
4926static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4927{
4928	void __iomem *ioaddr = tp->mmio_addr;
4929
4930	rtl_hw_start_8168bb(tp);
4931
4932	RTL_W8(MaxTxPacketSize, TxPacketMax);
4933
4934	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4935}
4936
4937static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4938{
4939	void __iomem *ioaddr = tp->mmio_addr;
4940	struct pci_dev *pdev = tp->pci_dev;
4941
4942	RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4943
4944	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4945
4946	if (tp->dev->mtu <= ETH_DATA_LEN)
4947		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4948
4949	rtl_disable_clock_request(pdev);
4950
4951	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4952}
4953
4954static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4955{
4956	static const struct ephy_info e_info_8168cp[] = {
4957		{ 0x01, 0,	0x0001 },
4958		{ 0x02, 0x0800,	0x1000 },
4959		{ 0x03, 0,	0x0042 },
4960		{ 0x06, 0x0080,	0x0000 },
4961		{ 0x07, 0,	0x2000 }
4962	};
4963
4964	rtl_csi_access_enable_2(tp);
4965
4966	rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4967
4968	__rtl_hw_start_8168cp(tp);
4969}
4970
4971static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4972{
4973	void __iomem *ioaddr = tp->mmio_addr;
4974	struct pci_dev *pdev = tp->pci_dev;
4975
4976	rtl_csi_access_enable_2(tp);
4977
4978	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4979
4980	if (tp->dev->mtu <= ETH_DATA_LEN)
4981		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4982
4983	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4984}
4985
4986static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4987{
4988	void __iomem *ioaddr = tp->mmio_addr;
4989	struct pci_dev *pdev = tp->pci_dev;
4990
4991	rtl_csi_access_enable_2(tp);
4992
4993	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4994
4995	/* Magic. */
4996	RTL_W8(DBG_REG, 0x20);
4997
4998	RTL_W8(MaxTxPacketSize, TxPacketMax);
4999
5000	if (tp->dev->mtu <= ETH_DATA_LEN)
5001		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5002
5003	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5004}
5005
5006static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
5007{
5008	void __iomem *ioaddr = tp->mmio_addr;
5009	static const struct ephy_info e_info_8168c_1[] = {
5010		{ 0x02, 0x0800,	0x1000 },
5011		{ 0x03, 0,	0x0002 },
5012		{ 0x06, 0x0080,	0x0000 }
5013	};
5014
5015	rtl_csi_access_enable_2(tp);
5016
5017	RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
5018
5019	rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
5020
5021	__rtl_hw_start_8168cp(tp);
5022}
5023
5024static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
5025{
5026	static const struct ephy_info e_info_8168c_2[] = {
5027		{ 0x01, 0,	0x0001 },
5028		{ 0x03, 0x0400,	0x0220 }
5029	};
5030
5031	rtl_csi_access_enable_2(tp);
5032
5033	rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
5034
5035	__rtl_hw_start_8168cp(tp);
5036}
5037
5038static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
5039{
5040	rtl_hw_start_8168c_2(tp);
5041}
5042
5043static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
5044{
5045	rtl_csi_access_enable_2(tp);
5046
5047	__rtl_hw_start_8168cp(tp);
5048}
5049
5050static void rtl_hw_start_8168d(struct rtl8169_private *tp)
5051{
5052	void __iomem *ioaddr = tp->mmio_addr;
5053	struct pci_dev *pdev = tp->pci_dev;
5054
5055	rtl_csi_access_enable_2(tp);
5056
5057	rtl_disable_clock_request(pdev);
5058
5059	RTL_W8(MaxTxPacketSize, TxPacketMax);
5060
5061	if (tp->dev->mtu <= ETH_DATA_LEN)
5062		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5063
5064	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5065}
5066
5067static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
5068{
5069	void __iomem *ioaddr = tp->mmio_addr;
5070	struct pci_dev *pdev = tp->pci_dev;
5071
5072	rtl_csi_access_enable_1(tp);
5073
5074	if (tp->dev->mtu <= ETH_DATA_LEN)
5075		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5076
5077	RTL_W8(MaxTxPacketSize, TxPacketMax);
5078
5079	rtl_disable_clock_request(pdev);
5080}
5081
5082static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
5083{
5084	void __iomem *ioaddr = tp->mmio_addr;
5085	struct pci_dev *pdev = tp->pci_dev;
5086	static const struct ephy_info e_info_8168d_4[] = {
5087		{ 0x0b, ~0,	0x48 },
5088		{ 0x19, 0x20,	0x50 },
5089		{ 0x0c, ~0,	0x20 }
5090	};
5091	int i;
5092
5093	rtl_csi_access_enable_1(tp);
5094
5095	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5096
5097	RTL_W8(MaxTxPacketSize, TxPacketMax);
5098
5099	for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
5100		const struct ephy_info *e = e_info_8168d_4 + i;
5101		u16 w;
5102
5103		w = rtl_ephy_read(tp, e->offset);
5104		rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
5105	}
5106
5107	rtl_enable_clock_request(pdev);
5108}
5109
5110static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
5111{
5112	void __iomem *ioaddr = tp->mmio_addr;
5113	struct pci_dev *pdev = tp->pci_dev;
5114	static const struct ephy_info e_info_8168e_1[] = {
5115		{ 0x00, 0x0200,	0x0100 },
5116		{ 0x00, 0x0000,	0x0004 },
5117		{ 0x06, 0x0002,	0x0001 },
5118		{ 0x06, 0x0000,	0x0030 },
5119		{ 0x07, 0x0000,	0x2000 },
5120		{ 0x00, 0x0000,	0x0020 },
5121		{ 0x03, 0x5800,	0x2000 },
5122		{ 0x03, 0x0000,	0x0001 },
5123		{ 0x01, 0x0800,	0x1000 },
5124		{ 0x07, 0x0000,	0x4000 },
5125		{ 0x1e, 0x0000,	0x2000 },
5126		{ 0x19, 0xffff,	0xfe6c },
5127		{ 0x0a, 0x0000,	0x0040 }
5128	};
5129
5130	rtl_csi_access_enable_2(tp);
5131
5132	rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5133
5134	if (tp->dev->mtu <= ETH_DATA_LEN)
5135		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5136
5137	RTL_W8(MaxTxPacketSize, TxPacketMax);
5138
5139	rtl_disable_clock_request(pdev);
5140
5141	/* Reset tx FIFO pointer */
5142	RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5143	RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5144
5145	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5146}
5147
5148static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5149{
5150	void __iomem *ioaddr = tp->mmio_addr;
5151	struct pci_dev *pdev = tp->pci_dev;
5152	static const struct ephy_info e_info_8168e_2[] = {
5153		{ 0x09, 0x0000,	0x0080 },
5154		{ 0x19, 0x0000,	0x0224 }
5155	};
5156
5157	rtl_csi_access_enable_1(tp);
5158
5159	rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5160
5161	if (tp->dev->mtu <= ETH_DATA_LEN)
5162		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5163
5164	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5165	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5166	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5167	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5168	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5169	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5170	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5171	rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5172
5173	RTL_W8(MaxTxPacketSize, EarlySize);
5174
5175	rtl_disable_clock_request(pdev);
5176
5177	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5178	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5179
5180	/* Adjust EEE LED frequency */
5181	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5182
5183	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5184	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5185	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5186}
5187
5188static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5189{
5190	void __iomem *ioaddr = tp->mmio_addr;
5191	struct pci_dev *pdev = tp->pci_dev;
5192
5193	rtl_csi_access_enable_2(tp);
5194
5195	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5196
5197	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5198	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5199	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5200	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5201	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5202	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5203	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5204	rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5205	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5206	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5207
5208	RTL_W8(MaxTxPacketSize, EarlySize);
5209
5210	rtl_disable_clock_request(pdev);
5211
5212	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5213	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5214	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5215	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5216	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5217}
5218
5219static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5220{
5221	void __iomem *ioaddr = tp->mmio_addr;
5222	static const struct ephy_info e_info_8168f_1[] = {
5223		{ 0x06, 0x00c0,	0x0020 },
5224		{ 0x08, 0x0001,	0x0002 },
5225		{ 0x09, 0x0000,	0x0080 },
5226		{ 0x19, 0x0000,	0x0224 }
5227	};
5228
5229	rtl_hw_start_8168f(tp);
5230
5231	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5232
5233	rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5234
5235	/* Adjust EEE LED frequency */
5236	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5237}
5238
5239static void rtl_hw_start_8411(struct rtl8169_private *tp)
5240{
5241	static const struct ephy_info e_info_8168f_1[] = {
5242		{ 0x06, 0x00c0,	0x0020 },
5243		{ 0x0f, 0xffff,	0x5200 },
5244		{ 0x1e, 0x0000,	0x4000 },
5245		{ 0x19, 0x0000,	0x0224 }
5246	};
5247
5248	rtl_hw_start_8168f(tp);
 
5249
5250	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5251
5252	rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5253}
5254
5255static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5256{
5257	void __iomem *ioaddr = tp->mmio_addr;
5258	struct pci_dev *pdev = tp->pci_dev;
5259
5260	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5261
5262	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5263	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5264	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5265	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5266
5267	rtl_csi_access_enable_1(tp);
5268
5269	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5270
5271	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5272	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5273	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
5274
5275	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5276	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5277	RTL_W8(MaxTxPacketSize, EarlySize);
5278
5279	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5280	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5281
5282	/* Adjust EEE LED frequency */
5283	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5284
5285	rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5286	rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5287}
5288
5289static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
5290{
5291	void __iomem *ioaddr = tp->mmio_addr;
5292	static const struct ephy_info e_info_8168g_2[] = {
5293		{ 0x00, 0x0000,	0x0008 },
5294		{ 0x0c, 0x3df0,	0x0200 },
5295		{ 0x19, 0xffff,	0xfc00 },
5296		{ 0x1e, 0xffff,	0x20eb }
5297	};
5298
5299	rtl_hw_start_8168g_1(tp);
5300
5301	/* disable aspm and clock request before access ephy */
5302	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5303	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5304	rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
5305}
5306
5307static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
5308{
5309	void __iomem *ioaddr = tp->mmio_addr;
5310	static const struct ephy_info e_info_8411_2[] = {
5311		{ 0x00, 0x0000,	0x0008 },
5312		{ 0x0c, 0x3df0,	0x0200 },
5313		{ 0x0f, 0xffff,	0x5200 },
5314		{ 0x19, 0x0020,	0x0000 },
5315		{ 0x1e, 0x0000,	0x2000 }
5316	};
5317
5318	rtl_hw_start_8168g_1(tp);
5319
5320	/* disable aspm and clock request before access ephy */
5321	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5322	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5323	rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
5324}
5325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5326static void rtl_hw_start_8168(struct net_device *dev)
5327{
5328	struct rtl8169_private *tp = netdev_priv(dev);
5329	void __iomem *ioaddr = tp->mmio_addr;
5330
5331	RTL_W8(Cfg9346, Cfg9346_Unlock);
5332
5333	RTL_W8(MaxTxPacketSize, TxPacketMax);
5334
5335	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5336
5337	tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5338
5339	RTL_W16(CPlusCmd, tp->cp_cmd);
5340
5341	RTL_W16(IntrMitigate, 0x5151);
5342
5343	/* Work around for RxFIFO overflow. */
5344	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5345		tp->event_slow |= RxFIFOOver | PCSTimeout;
5346		tp->event_slow &= ~RxOverflow;
5347	}
5348
5349	rtl_set_rx_tx_desc_registers(tp, ioaddr);
5350
5351	rtl_set_rx_tx_config_registers(tp);
5352
5353	RTL_R8(IntrMask);
5354
5355	switch (tp->mac_version) {
5356	case RTL_GIGA_MAC_VER_11:
5357		rtl_hw_start_8168bb(tp);
5358		break;
5359
5360	case RTL_GIGA_MAC_VER_12:
5361	case RTL_GIGA_MAC_VER_17:
5362		rtl_hw_start_8168bef(tp);
5363		break;
5364
5365	case RTL_GIGA_MAC_VER_18:
5366		rtl_hw_start_8168cp_1(tp);
5367		break;
5368
5369	case RTL_GIGA_MAC_VER_19:
5370		rtl_hw_start_8168c_1(tp);
5371		break;
5372
5373	case RTL_GIGA_MAC_VER_20:
5374		rtl_hw_start_8168c_2(tp);
5375		break;
5376
5377	case RTL_GIGA_MAC_VER_21:
5378		rtl_hw_start_8168c_3(tp);
5379		break;
5380
5381	case RTL_GIGA_MAC_VER_22:
5382		rtl_hw_start_8168c_4(tp);
5383		break;
5384
5385	case RTL_GIGA_MAC_VER_23:
5386		rtl_hw_start_8168cp_2(tp);
5387		break;
5388
5389	case RTL_GIGA_MAC_VER_24:
5390		rtl_hw_start_8168cp_3(tp);
5391		break;
5392
5393	case RTL_GIGA_MAC_VER_25:
5394	case RTL_GIGA_MAC_VER_26:
5395	case RTL_GIGA_MAC_VER_27:
5396		rtl_hw_start_8168d(tp);
5397		break;
5398
5399	case RTL_GIGA_MAC_VER_28:
5400		rtl_hw_start_8168d_4(tp);
5401		break;
5402
5403	case RTL_GIGA_MAC_VER_31:
5404		rtl_hw_start_8168dp(tp);
5405		break;
5406
5407	case RTL_GIGA_MAC_VER_32:
5408	case RTL_GIGA_MAC_VER_33:
5409		rtl_hw_start_8168e_1(tp);
5410		break;
5411	case RTL_GIGA_MAC_VER_34:
5412		rtl_hw_start_8168e_2(tp);
5413		break;
5414
5415	case RTL_GIGA_MAC_VER_35:
5416	case RTL_GIGA_MAC_VER_36:
5417		rtl_hw_start_8168f_1(tp);
5418		break;
5419
5420	case RTL_GIGA_MAC_VER_38:
5421		rtl_hw_start_8411(tp);
5422		break;
5423
5424	case RTL_GIGA_MAC_VER_40:
5425	case RTL_GIGA_MAC_VER_41:
5426		rtl_hw_start_8168g_1(tp);
5427		break;
5428	case RTL_GIGA_MAC_VER_42:
5429		rtl_hw_start_8168g_2(tp);
5430		break;
5431
5432	case RTL_GIGA_MAC_VER_44:
5433		rtl_hw_start_8411_2(tp);
5434		break;
5435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5436	default:
5437		printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5438			dev->name, tp->mac_version);
5439		break;
5440	}
5441
5442	RTL_W8(Cfg9346, Cfg9346_Lock);
5443
5444	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5445
5446	rtl_set_rx_mode(dev);
5447
5448	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5449}
5450
5451#define R810X_CPCMD_QUIRK_MASK (\
5452	EnableBist | \
5453	Mac_dbgo_oe | \
5454	Force_half_dup | \
5455	Force_rxflow_en | \
5456	Force_txflow_en | \
5457	Cxpl_dbg_sel | \
5458	ASF | \
5459	PktCntrDisable | \
5460	Mac_dbgo_sel)
5461
5462static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5463{
5464	void __iomem *ioaddr = tp->mmio_addr;
5465	struct pci_dev *pdev = tp->pci_dev;
5466	static const struct ephy_info e_info_8102e_1[] = {
5467		{ 0x01,	0, 0x6e65 },
5468		{ 0x02,	0, 0x091f },
5469		{ 0x03,	0, 0xc2f9 },
5470		{ 0x06,	0, 0xafb5 },
5471		{ 0x07,	0, 0x0e00 },
5472		{ 0x19,	0, 0xec80 },
5473		{ 0x01,	0, 0x2e65 },
5474		{ 0x01,	0, 0x6e65 }
5475	};
5476	u8 cfg1;
5477
5478	rtl_csi_access_enable_2(tp);
5479
5480	RTL_W8(DBG_REG, FIX_NAK_1);
5481
5482	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5483
5484	RTL_W8(Config1,
5485	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5486	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5487
5488	cfg1 = RTL_R8(Config1);
5489	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5490		RTL_W8(Config1, cfg1 & ~LEDS0);
5491
5492	rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5493}
5494
5495static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5496{
5497	void __iomem *ioaddr = tp->mmio_addr;
5498	struct pci_dev *pdev = tp->pci_dev;
5499
5500	rtl_csi_access_enable_2(tp);
5501
5502	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5503
5504	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5505	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5506}
5507
5508static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5509{
5510	rtl_hw_start_8102e_2(tp);
5511
5512	rtl_ephy_write(tp, 0x03, 0xc2f9);
5513}
5514
5515static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5516{
5517	void __iomem *ioaddr = tp->mmio_addr;
5518	static const struct ephy_info e_info_8105e_1[] = {
5519		{ 0x07,	0, 0x4000 },
5520		{ 0x19,	0, 0x0200 },
5521		{ 0x19,	0, 0x0020 },
5522		{ 0x1e,	0, 0x2000 },
5523		{ 0x03,	0, 0x0001 },
5524		{ 0x19,	0, 0x0100 },
5525		{ 0x19,	0, 0x0004 },
5526		{ 0x0a,	0, 0x0020 }
5527	};
5528
5529	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5530	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5531
5532	/* Disable Early Tally Counter */
5533	RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5534
5535	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5536	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5537
5538	rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
 
 
5539}
5540
5541static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5542{
5543	rtl_hw_start_8105e_1(tp);
5544	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5545}
5546
5547static void rtl_hw_start_8402(struct rtl8169_private *tp)
5548{
5549	void __iomem *ioaddr = tp->mmio_addr;
5550	static const struct ephy_info e_info_8402[] = {
5551		{ 0x19,	0xffff, 0xff64 },
5552		{ 0x1e,	0, 0x4000 }
5553	};
5554
5555	rtl_csi_access_enable_2(tp);
5556
5557	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5558	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5559
5560	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5561	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5562
5563	rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5564
5565	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5566
5567	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5568	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5569	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5570	rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5571	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5572	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5573	rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
 
 
5574}
5575
5576static void rtl_hw_start_8106(struct rtl8169_private *tp)
5577{
5578	void __iomem *ioaddr = tp->mmio_addr;
5579
5580	/* Force LAN exit from ASPM if Rx/Tx are not idle */
5581	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5582
5583	RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5584	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5585	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
 
 
5586}
5587
5588static void rtl_hw_start_8101(struct net_device *dev)
5589{
5590	struct rtl8169_private *tp = netdev_priv(dev);
5591	void __iomem *ioaddr = tp->mmio_addr;
5592	struct pci_dev *pdev = tp->pci_dev;
5593
5594	if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5595		tp->event_slow &= ~RxFIFOOver;
5596
5597	if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5598	    tp->mac_version == RTL_GIGA_MAC_VER_16)
5599		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5600					 PCI_EXP_DEVCTL_NOSNOOP_EN);
5601
5602	RTL_W8(Cfg9346, Cfg9346_Unlock);
5603
5604	RTL_W8(MaxTxPacketSize, TxPacketMax);
5605
5606	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5607
5608	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5609	RTL_W16(CPlusCmd, tp->cp_cmd);
5610
5611	rtl_set_rx_tx_desc_registers(tp, ioaddr);
5612
5613	rtl_set_rx_tx_config_registers(tp);
5614
5615	switch (tp->mac_version) {
5616	case RTL_GIGA_MAC_VER_07:
5617		rtl_hw_start_8102e_1(tp);
5618		break;
5619
5620	case RTL_GIGA_MAC_VER_08:
5621		rtl_hw_start_8102e_3(tp);
5622		break;
5623
5624	case RTL_GIGA_MAC_VER_09:
5625		rtl_hw_start_8102e_2(tp);
5626		break;
5627
5628	case RTL_GIGA_MAC_VER_29:
5629		rtl_hw_start_8105e_1(tp);
5630		break;
5631	case RTL_GIGA_MAC_VER_30:
5632		rtl_hw_start_8105e_2(tp);
5633		break;
5634
5635	case RTL_GIGA_MAC_VER_37:
5636		rtl_hw_start_8402(tp);
5637		break;
5638
5639	case RTL_GIGA_MAC_VER_39:
5640		rtl_hw_start_8106(tp);
5641		break;
5642	case RTL_GIGA_MAC_VER_43:
5643		rtl_hw_start_8168g_2(tp);
5644		break;
 
 
 
 
5645	}
5646
5647	RTL_W8(Cfg9346, Cfg9346_Lock);
5648
5649	RTL_W16(IntrMitigate, 0x0000);
5650
5651	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5652
5653	rtl_set_rx_mode(dev);
5654
5655	RTL_R8(IntrMask);
5656
5657	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5658}
5659
5660static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5661{
5662	struct rtl8169_private *tp = netdev_priv(dev);
5663
5664	if (new_mtu < ETH_ZLEN ||
5665	    new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5666		return -EINVAL;
5667
5668	if (new_mtu > ETH_DATA_LEN)
5669		rtl_hw_jumbo_enable(tp);
5670	else
5671		rtl_hw_jumbo_disable(tp);
5672
5673	dev->mtu = new_mtu;
5674	netdev_update_features(dev);
5675
5676	return 0;
5677}
5678
5679static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5680{
5681	desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5682	desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5683}
5684
5685static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5686				     void **data_buff, struct RxDesc *desc)
5687{
5688	dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5689			 DMA_FROM_DEVICE);
5690
5691	kfree(*data_buff);
5692	*data_buff = NULL;
5693	rtl8169_make_unusable_by_asic(desc);
5694}
5695
5696static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5697{
5698	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5699
 
 
 
5700	desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5701}
5702
5703static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5704				       u32 rx_buf_sz)
5705{
5706	desc->addr = cpu_to_le64(mapping);
5707	wmb();
5708	rtl8169_mark_to_asic(desc, rx_buf_sz);
5709}
5710
5711static inline void *rtl8169_align(void *data)
5712{
5713	return (void *)ALIGN((long)data, 16);
5714}
5715
5716static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5717					     struct RxDesc *desc)
5718{
5719	void *data;
5720	dma_addr_t mapping;
5721	struct device *d = &tp->pci_dev->dev;
5722	struct net_device *dev = tp->dev;
5723	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5724
5725	data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5726	if (!data)
5727		return NULL;
5728
5729	if (rtl8169_align(data) != data) {
5730		kfree(data);
5731		data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5732		if (!data)
5733			return NULL;
5734	}
5735
5736	mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5737				 DMA_FROM_DEVICE);
5738	if (unlikely(dma_mapping_error(d, mapping))) {
5739		if (net_ratelimit())
5740			netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5741		goto err_out;
5742	}
5743
5744	rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5745	return data;
5746
5747err_out:
5748	kfree(data);
5749	return NULL;
5750}
5751
5752static void rtl8169_rx_clear(struct rtl8169_private *tp)
5753{
5754	unsigned int i;
5755
5756	for (i = 0; i < NUM_RX_DESC; i++) {
5757		if (tp->Rx_databuff[i]) {
5758			rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5759					    tp->RxDescArray + i);
5760		}
5761	}
5762}
5763
5764static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5765{
5766	desc->opts1 |= cpu_to_le32(RingEnd);
5767}
5768
5769static int rtl8169_rx_fill(struct rtl8169_private *tp)
5770{
5771	unsigned int i;
5772
5773	for (i = 0; i < NUM_RX_DESC; i++) {
5774		void *data;
5775
5776		if (tp->Rx_databuff[i])
5777			continue;
5778
5779		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5780		if (!data) {
5781			rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5782			goto err_out;
5783		}
5784		tp->Rx_databuff[i] = data;
5785	}
5786
5787	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5788	return 0;
5789
5790err_out:
5791	rtl8169_rx_clear(tp);
5792	return -ENOMEM;
5793}
5794
5795static int rtl8169_init_ring(struct net_device *dev)
5796{
5797	struct rtl8169_private *tp = netdev_priv(dev);
5798
5799	rtl8169_init_ring_indexes(tp);
5800
5801	memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5802	memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5803
5804	return rtl8169_rx_fill(tp);
5805}
5806
5807static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5808				 struct TxDesc *desc)
5809{
5810	unsigned int len = tx_skb->len;
5811
5812	dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5813
5814	desc->opts1 = 0x00;
5815	desc->opts2 = 0x00;
5816	desc->addr = 0x00;
5817	tx_skb->len = 0;
5818}
5819
5820static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5821				   unsigned int n)
5822{
5823	unsigned int i;
5824
5825	for (i = 0; i < n; i++) {
5826		unsigned int entry = (start + i) % NUM_TX_DESC;
5827		struct ring_info *tx_skb = tp->tx_skb + entry;
5828		unsigned int len = tx_skb->len;
5829
5830		if (len) {
5831			struct sk_buff *skb = tx_skb->skb;
5832
5833			rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5834					     tp->TxDescArray + entry);
5835			if (skb) {
5836				tp->dev->stats.tx_dropped++;
5837				dev_kfree_skb_any(skb);
5838				tx_skb->skb = NULL;
5839			}
5840		}
5841	}
5842}
5843
5844static void rtl8169_tx_clear(struct rtl8169_private *tp)
5845{
5846	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5847	tp->cur_tx = tp->dirty_tx = 0;
5848}
5849
5850static void rtl_reset_work(struct rtl8169_private *tp)
5851{
5852	struct net_device *dev = tp->dev;
5853	int i;
5854
5855	napi_disable(&tp->napi);
5856	netif_stop_queue(dev);
5857	synchronize_sched();
5858
5859	rtl8169_hw_reset(tp);
5860
5861	for (i = 0; i < NUM_RX_DESC; i++)
5862		rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5863
5864	rtl8169_tx_clear(tp);
5865	rtl8169_init_ring_indexes(tp);
5866
5867	napi_enable(&tp->napi);
5868	rtl_hw_start(dev);
5869	netif_wake_queue(dev);
5870	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5871}
5872
5873static void rtl8169_tx_timeout(struct net_device *dev)
5874{
5875	struct rtl8169_private *tp = netdev_priv(dev);
5876
5877	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5878}
5879
5880static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5881			      u32 *opts)
5882{
5883	struct skb_shared_info *info = skb_shinfo(skb);
5884	unsigned int cur_frag, entry;
5885	struct TxDesc * uninitialized_var(txd);
5886	struct device *d = &tp->pci_dev->dev;
5887
5888	entry = tp->cur_tx;
5889	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5890		const skb_frag_t *frag = info->frags + cur_frag;
5891		dma_addr_t mapping;
5892		u32 status, len;
5893		void *addr;
5894
5895		entry = (entry + 1) % NUM_TX_DESC;
5896
5897		txd = tp->TxDescArray + entry;
5898		len = skb_frag_size(frag);
5899		addr = skb_frag_address(frag);
5900		mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5901		if (unlikely(dma_mapping_error(d, mapping))) {
5902			if (net_ratelimit())
5903				netif_err(tp, drv, tp->dev,
5904					  "Failed to map TX fragments DMA!\n");
5905			goto err_out;
5906		}
5907
5908		/* Anti gcc 2.95.3 bugware (sic) */
5909		status = opts[0] | len |
5910			(RingEnd * !((entry + 1) % NUM_TX_DESC));
5911
5912		txd->opts1 = cpu_to_le32(status);
5913		txd->opts2 = cpu_to_le32(opts[1]);
5914		txd->addr = cpu_to_le64(mapping);
5915
5916		tp->tx_skb[entry].len = len;
5917	}
5918
5919	if (cur_frag) {
5920		tp->tx_skb[entry].skb = skb;
5921		txd->opts1 |= cpu_to_le32(LastFrag);
5922	}
5923
5924	return cur_frag;
5925
5926err_out:
5927	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5928	return -EIO;
5929}
5930
5931static bool rtl_skb_pad(struct sk_buff *skb)
5932{
5933	if (skb_padto(skb, ETH_ZLEN))
5934		return false;
5935	skb_put(skb, ETH_ZLEN - skb->len);
5936	return true;
5937}
5938
5939static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
 
 
 
 
 
 
 
5940{
5941	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5942}
5943
5944static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
5945				    struct sk_buff *skb, u32 *opts)
5946{
5947	const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5948	u32 mss = skb_shinfo(skb)->gso_size;
5949	int offset = info->opts_offset;
5950
5951	if (mss) {
5952		opts[0] |= TD_LSO;
5953		opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5954	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5955		const struct iphdr *ip = ip_hdr(skb);
5956
5957		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5958			return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
5959
5960		if (ip->protocol == IPPROTO_TCP)
5961			opts[offset] |= info->checksum.tcp;
5962		else if (ip->protocol == IPPROTO_UDP)
5963			opts[offset] |= info->checksum.udp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5964		else
5965			WARN_ON_ONCE(1);
 
 
5966	} else {
5967		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5968			return rtl_skb_pad(skb);
5969	}
 
5970	return true;
5971}
5972
5973static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5974				      struct net_device *dev)
5975{
5976	struct rtl8169_private *tp = netdev_priv(dev);
5977	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5978	struct TxDesc *txd = tp->TxDescArray + entry;
5979	void __iomem *ioaddr = tp->mmio_addr;
5980	struct device *d = &tp->pci_dev->dev;
5981	dma_addr_t mapping;
5982	u32 status, len;
5983	u32 opts[2];
5984	int frags;
5985
5986	if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5987		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5988		goto err_stop_0;
5989	}
5990
5991	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5992		goto err_stop_0;
5993
5994	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5995	opts[0] = DescOwn;
5996
5997	if (!rtl8169_tso_csum(tp, skb, opts))
5998		goto err_update_stats;
 
 
5999
6000	len = skb_headlen(skb);
6001	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
6002	if (unlikely(dma_mapping_error(d, mapping))) {
6003		if (net_ratelimit())
6004			netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
6005		goto err_dma_0;
6006	}
6007
6008	tp->tx_skb[entry].len = len;
6009	txd->addr = cpu_to_le64(mapping);
6010
6011	frags = rtl8169_xmit_frags(tp, skb, opts);
6012	if (frags < 0)
6013		goto err_dma_1;
6014	else if (frags)
6015		opts[0] |= FirstFrag;
6016	else {
6017		opts[0] |= FirstFrag | LastFrag;
6018		tp->tx_skb[entry].skb = skb;
6019	}
6020
6021	txd->opts2 = cpu_to_le32(opts[1]);
6022
6023	skb_tx_timestamp(skb);
6024
6025	wmb();
 
6026
6027	/* Anti gcc 2.95.3 bugware (sic) */
6028	status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
6029	txd->opts1 = cpu_to_le32(status);
6030
6031	tp->cur_tx += frags + 1;
6032
6033	wmb();
6034
 
 
6035	RTL_W8(TxPoll, NPQ);
6036
6037	mmiowb();
6038
6039	if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
6040		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
6041		 * not miss a ring update when it notices a stopped queue.
6042		 */
6043		smp_wmb();
6044		netif_stop_queue(dev);
6045		/* Sync with rtl_tx:
6046		 * - publish queue status and cur_tx ring index (write barrier)
6047		 * - refresh dirty_tx ring index (read barrier).
6048		 * May the current thread have a pessimistic view of the ring
6049		 * status and forget to wake up queue, a racing rtl_tx thread
6050		 * can't.
6051		 */
6052		smp_mb();
6053		if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
6054			netif_wake_queue(dev);
6055	}
6056
6057	return NETDEV_TX_OK;
6058
6059err_dma_1:
6060	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
6061err_dma_0:
6062	dev_kfree_skb_any(skb);
6063err_update_stats:
6064	dev->stats.tx_dropped++;
6065	return NETDEV_TX_OK;
6066
6067err_stop_0:
6068	netif_stop_queue(dev);
6069	dev->stats.tx_dropped++;
6070	return NETDEV_TX_BUSY;
6071}
6072
6073static void rtl8169_pcierr_interrupt(struct net_device *dev)
6074{
6075	struct rtl8169_private *tp = netdev_priv(dev);
6076	struct pci_dev *pdev = tp->pci_dev;
6077	u16 pci_status, pci_cmd;
6078
6079	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
6080	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
6081
6082	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
6083		  pci_cmd, pci_status);
6084
6085	/*
6086	 * The recovery sequence below admits a very elaborated explanation:
6087	 * - it seems to work;
6088	 * - I did not see what else could be done;
6089	 * - it makes iop3xx happy.
6090	 *
6091	 * Feel free to adjust to your needs.
6092	 */
6093	if (pdev->broken_parity_status)
6094		pci_cmd &= ~PCI_COMMAND_PARITY;
6095	else
6096		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
6097
6098	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
6099
6100	pci_write_config_word(pdev, PCI_STATUS,
6101		pci_status & (PCI_STATUS_DETECTED_PARITY |
6102		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
6103		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
6104
6105	/* The infamous DAC f*ckup only happens at boot time */
6106	if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
6107		void __iomem *ioaddr = tp->mmio_addr;
6108
6109		netif_info(tp, intr, dev, "disabling PCI DAC\n");
6110		tp->cp_cmd &= ~PCIDAC;
6111		RTL_W16(CPlusCmd, tp->cp_cmd);
6112		dev->features &= ~NETIF_F_HIGHDMA;
6113	}
6114
6115	rtl8169_hw_reset(tp);
6116
6117	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6118}
6119
6120static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6121{
6122	unsigned int dirty_tx, tx_left;
6123
6124	dirty_tx = tp->dirty_tx;
6125	smp_rmb();
6126	tx_left = tp->cur_tx - dirty_tx;
6127
6128	while (tx_left > 0) {
6129		unsigned int entry = dirty_tx % NUM_TX_DESC;
6130		struct ring_info *tx_skb = tp->tx_skb + entry;
6131		u32 status;
6132
6133		rmb();
6134		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
6135		if (status & DescOwn)
6136			break;
6137
 
 
 
 
 
 
6138		rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6139				     tp->TxDescArray + entry);
6140		if (status & LastFrag) {
6141			u64_stats_update_begin(&tp->tx_stats.syncp);
6142			tp->tx_stats.packets++;
6143			tp->tx_stats.bytes += tx_skb->skb->len;
6144			u64_stats_update_end(&tp->tx_stats.syncp);
6145			dev_kfree_skb_any(tx_skb->skb);
6146			tx_skb->skb = NULL;
6147		}
6148		dirty_tx++;
6149		tx_left--;
6150	}
6151
6152	if (tp->dirty_tx != dirty_tx) {
6153		tp->dirty_tx = dirty_tx;
6154		/* Sync with rtl8169_start_xmit:
6155		 * - publish dirty_tx ring index (write barrier)
6156		 * - refresh cur_tx ring index and queue status (read barrier)
6157		 * May the current thread miss the stopped queue condition,
6158		 * a racing xmit thread can only have a right view of the
6159		 * ring status.
6160		 */
6161		smp_mb();
6162		if (netif_queue_stopped(dev) &&
6163		    TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
6164			netif_wake_queue(dev);
6165		}
6166		/*
6167		 * 8168 hack: TxPoll requests are lost when the Tx packets are
6168		 * too close. Let's kick an extra TxPoll request when a burst
6169		 * of start_xmit activity is detected (if it is not detected,
6170		 * it is slow enough). -- FR
6171		 */
6172		if (tp->cur_tx != dirty_tx) {
6173			void __iomem *ioaddr = tp->mmio_addr;
6174
6175			RTL_W8(TxPoll, NPQ);
6176		}
6177	}
6178}
6179
6180static inline int rtl8169_fragmented_frame(u32 status)
6181{
6182	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
6183}
6184
6185static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
6186{
6187	u32 status = opts1 & RxProtoMask;
6188
6189	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
6190	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
6191		skb->ip_summed = CHECKSUM_UNNECESSARY;
6192	else
6193		skb_checksum_none_assert(skb);
6194}
6195
6196static struct sk_buff *rtl8169_try_rx_copy(void *data,
6197					   struct rtl8169_private *tp,
6198					   int pkt_size,
6199					   dma_addr_t addr)
6200{
6201	struct sk_buff *skb;
6202	struct device *d = &tp->pci_dev->dev;
6203
6204	data = rtl8169_align(data);
6205	dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6206	prefetch(data);
6207	skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6208	if (skb)
6209		memcpy(skb->data, data, pkt_size);
6210	dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6211
6212	return skb;
6213}
6214
6215static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6216{
6217	unsigned int cur_rx, rx_left;
6218	unsigned int count;
6219
6220	cur_rx = tp->cur_rx;
6221
6222	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
6223		unsigned int entry = cur_rx % NUM_RX_DESC;
6224		struct RxDesc *desc = tp->RxDescArray + entry;
6225		u32 status;
6226
6227		rmb();
6228		status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6229
6230		if (status & DescOwn)
6231			break;
 
 
 
 
 
 
 
6232		if (unlikely(status & RxRES)) {
6233			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6234				   status);
6235			dev->stats.rx_errors++;
6236			if (status & (RxRWT | RxRUNT))
6237				dev->stats.rx_length_errors++;
6238			if (status & RxCRC)
6239				dev->stats.rx_crc_errors++;
6240			if (status & RxFOVF) {
6241				rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6242				dev->stats.rx_fifo_errors++;
6243			}
6244			if ((status & (RxRUNT | RxCRC)) &&
6245			    !(status & (RxRWT | RxFOVF)) &&
6246			    (dev->features & NETIF_F_RXALL))
6247				goto process_pkt;
6248		} else {
6249			struct sk_buff *skb;
6250			dma_addr_t addr;
6251			int pkt_size;
6252
6253process_pkt:
6254			addr = le64_to_cpu(desc->addr);
6255			if (likely(!(dev->features & NETIF_F_RXFCS)))
6256				pkt_size = (status & 0x00003fff) - 4;
6257			else
6258				pkt_size = status & 0x00003fff;
6259
6260			/*
6261			 * The driver does not support incoming fragmented
6262			 * frames. They are seen as a symptom of over-mtu
6263			 * sized frames.
6264			 */
6265			if (unlikely(rtl8169_fragmented_frame(status))) {
6266				dev->stats.rx_dropped++;
6267				dev->stats.rx_length_errors++;
6268				goto release_descriptor;
6269			}
6270
6271			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6272						  tp, pkt_size, addr);
6273			if (!skb) {
6274				dev->stats.rx_dropped++;
6275				goto release_descriptor;
6276			}
6277
6278			rtl8169_rx_csum(skb, status);
6279			skb_put(skb, pkt_size);
6280			skb->protocol = eth_type_trans(skb, dev);
6281
6282			rtl8169_rx_vlan_tag(desc, skb);
6283
 
 
 
6284			napi_gro_receive(&tp->napi, skb);
6285
6286			u64_stats_update_begin(&tp->rx_stats.syncp);
6287			tp->rx_stats.packets++;
6288			tp->rx_stats.bytes += pkt_size;
6289			u64_stats_update_end(&tp->rx_stats.syncp);
6290		}
6291release_descriptor:
6292		desc->opts2 = 0;
6293		wmb();
6294		rtl8169_mark_to_asic(desc, rx_buf_sz);
6295	}
6296
6297	count = cur_rx - tp->cur_rx;
6298	tp->cur_rx = cur_rx;
6299
6300	return count;
6301}
6302
6303static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6304{
6305	struct net_device *dev = dev_instance;
6306	struct rtl8169_private *tp = netdev_priv(dev);
6307	int handled = 0;
6308	u16 status;
6309
6310	status = rtl_get_events(tp);
6311	if (status && status != 0xffff) {
6312		status &= RTL_EVENT_NAPI | tp->event_slow;
6313		if (status) {
6314			handled = 1;
6315
6316			rtl_irq_disable(tp);
6317			napi_schedule(&tp->napi);
6318		}
6319	}
6320	return IRQ_RETVAL(handled);
6321}
6322
6323/*
6324 * Workqueue context.
6325 */
6326static void rtl_slow_event_work(struct rtl8169_private *tp)
6327{
6328	struct net_device *dev = tp->dev;
6329	u16 status;
6330
6331	status = rtl_get_events(tp) & tp->event_slow;
6332	rtl_ack_events(tp, status);
6333
6334	if (unlikely(status & RxFIFOOver)) {
6335		switch (tp->mac_version) {
6336		/* Work around for rx fifo overflow */
6337		case RTL_GIGA_MAC_VER_11:
6338			netif_stop_queue(dev);
6339			/* XXX - Hack alert. See rtl_task(). */
6340			set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6341		default:
6342			break;
6343		}
6344	}
6345
6346	if (unlikely(status & SYSErr))
6347		rtl8169_pcierr_interrupt(dev);
6348
6349	if (status & LinkChg)
6350		__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6351
6352	rtl_irq_enable_all(tp);
6353}
6354
6355static void rtl_task(struct work_struct *work)
6356{
6357	static const struct {
6358		int bitnr;
6359		void (*action)(struct rtl8169_private *);
6360	} rtl_work[] = {
6361		/* XXX - keep rtl_slow_event_work() as first element. */
6362		{ RTL_FLAG_TASK_SLOW_PENDING,	rtl_slow_event_work },
6363		{ RTL_FLAG_TASK_RESET_PENDING,	rtl_reset_work },
6364		{ RTL_FLAG_TASK_PHY_PENDING,	rtl_phy_work }
6365	};
6366	struct rtl8169_private *tp =
6367		container_of(work, struct rtl8169_private, wk.work);
6368	struct net_device *dev = tp->dev;
6369	int i;
6370
6371	rtl_lock_work(tp);
6372
6373	if (!netif_running(dev) ||
6374	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6375		goto out_unlock;
6376
6377	for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6378		bool pending;
6379
6380		pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6381		if (pending)
6382			rtl_work[i].action(tp);
6383	}
6384
6385out_unlock:
6386	rtl_unlock_work(tp);
6387}
6388
6389static int rtl8169_poll(struct napi_struct *napi, int budget)
6390{
6391	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6392	struct net_device *dev = tp->dev;
6393	u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6394	int work_done= 0;
6395	u16 status;
6396
6397	status = rtl_get_events(tp);
6398	rtl_ack_events(tp, status & ~tp->event_slow);
6399
6400	if (status & RTL_EVENT_NAPI_RX)
6401		work_done = rtl_rx(dev, tp, (u32) budget);
6402
6403	if (status & RTL_EVENT_NAPI_TX)
6404		rtl_tx(dev, tp);
6405
6406	if (status & tp->event_slow) {
6407		enable_mask &= ~tp->event_slow;
6408
6409		rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6410	}
6411
6412	if (work_done < budget) {
6413		napi_complete(napi);
6414
6415		rtl_irq_enable(tp, enable_mask);
6416		mmiowb();
6417	}
6418
6419	return work_done;
6420}
6421
6422static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6423{
6424	struct rtl8169_private *tp = netdev_priv(dev);
6425
6426	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6427		return;
6428
6429	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6430	RTL_W32(RxMissed, 0);
6431}
6432
6433static void rtl8169_down(struct net_device *dev)
6434{
6435	struct rtl8169_private *tp = netdev_priv(dev);
6436	void __iomem *ioaddr = tp->mmio_addr;
6437
6438	del_timer_sync(&tp->timer);
6439
6440	napi_disable(&tp->napi);
6441	netif_stop_queue(dev);
6442
6443	rtl8169_hw_reset(tp);
6444	/*
6445	 * At this point device interrupts can not be enabled in any function,
6446	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6447	 * and napi is disabled (rtl8169_poll).
6448	 */
6449	rtl8169_rx_missed(dev, ioaddr);
6450
6451	/* Give a racing hard_start_xmit a few cycles to complete. */
6452	synchronize_sched();
6453
6454	rtl8169_tx_clear(tp);
6455
6456	rtl8169_rx_clear(tp);
6457
6458	rtl_pll_power_down(tp);
6459}
6460
6461static int rtl8169_close(struct net_device *dev)
6462{
6463	struct rtl8169_private *tp = netdev_priv(dev);
6464	struct pci_dev *pdev = tp->pci_dev;
6465
6466	pm_runtime_get_sync(&pdev->dev);
6467
6468	/* Update counters before going down */
6469	rtl8169_update_counters(dev);
6470
6471	rtl_lock_work(tp);
6472	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6473
6474	rtl8169_down(dev);
6475	rtl_unlock_work(tp);
6476
6477	cancel_work_sync(&tp->wk.work);
6478
6479	free_irq(pdev->irq, dev);
6480
6481	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6482			  tp->RxPhyAddr);
6483	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6484			  tp->TxPhyAddr);
6485	tp->TxDescArray = NULL;
6486	tp->RxDescArray = NULL;
6487
6488	pm_runtime_put_sync(&pdev->dev);
6489
6490	return 0;
6491}
6492
6493#ifdef CONFIG_NET_POLL_CONTROLLER
6494static void rtl8169_netpoll(struct net_device *dev)
6495{
6496	struct rtl8169_private *tp = netdev_priv(dev);
6497
6498	rtl8169_interrupt(tp->pci_dev->irq, dev);
6499}
6500#endif
6501
6502static int rtl_open(struct net_device *dev)
6503{
6504	struct rtl8169_private *tp = netdev_priv(dev);
6505	void __iomem *ioaddr = tp->mmio_addr;
6506	struct pci_dev *pdev = tp->pci_dev;
6507	int retval = -ENOMEM;
6508
6509	pm_runtime_get_sync(&pdev->dev);
6510
6511	/*
6512	 * Rx and Tx descriptors needs 256 bytes alignment.
6513	 * dma_alloc_coherent provides more.
6514	 */
6515	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6516					     &tp->TxPhyAddr, GFP_KERNEL);
6517	if (!tp->TxDescArray)
6518		goto err_pm_runtime_put;
6519
6520	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6521					     &tp->RxPhyAddr, GFP_KERNEL);
6522	if (!tp->RxDescArray)
6523		goto err_free_tx_0;
6524
6525	retval = rtl8169_init_ring(dev);
6526	if (retval < 0)
6527		goto err_free_rx_1;
6528
6529	INIT_WORK(&tp->wk.work, rtl_task);
6530
6531	smp_mb();
6532
6533	rtl_request_firmware(tp);
6534
6535	retval = request_irq(pdev->irq, rtl8169_interrupt,
6536			     (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6537			     dev->name, dev);
6538	if (retval < 0)
6539		goto err_release_fw_2;
6540
6541	rtl_lock_work(tp);
6542
6543	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6544
6545	napi_enable(&tp->napi);
6546
6547	rtl8169_init_phy(dev, tp);
6548
6549	__rtl8169_set_features(dev, dev->features);
6550
6551	rtl_pll_power_up(tp);
6552
6553	rtl_hw_start(dev);
6554
 
 
 
6555	netif_start_queue(dev);
6556
6557	rtl_unlock_work(tp);
6558
6559	tp->saved_wolopts = 0;
6560	pm_runtime_put_noidle(&pdev->dev);
6561
6562	rtl8169_check_link_status(dev, tp, ioaddr);
6563out:
6564	return retval;
6565
6566err_release_fw_2:
6567	rtl_release_firmware(tp);
6568	rtl8169_rx_clear(tp);
6569err_free_rx_1:
6570	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6571			  tp->RxPhyAddr);
6572	tp->RxDescArray = NULL;
6573err_free_tx_0:
6574	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6575			  tp->TxPhyAddr);
6576	tp->TxDescArray = NULL;
6577err_pm_runtime_put:
6578	pm_runtime_put_noidle(&pdev->dev);
6579	goto out;
6580}
6581
6582static struct rtnl_link_stats64 *
6583rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6584{
6585	struct rtl8169_private *tp = netdev_priv(dev);
6586	void __iomem *ioaddr = tp->mmio_addr;
 
 
6587	unsigned int start;
6588
6589	if (netif_running(dev))
 
 
6590		rtl8169_rx_missed(dev, ioaddr);
6591
6592	do {
6593		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
6594		stats->rx_packets = tp->rx_stats.packets;
6595		stats->rx_bytes	= tp->rx_stats.bytes;
6596	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
6597
6598
6599	do {
6600		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
6601		stats->tx_packets = tp->tx_stats.packets;
6602		stats->tx_bytes	= tp->tx_stats.bytes;
6603	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
6604
6605	stats->rx_dropped	= dev->stats.rx_dropped;
6606	stats->tx_dropped	= dev->stats.tx_dropped;
6607	stats->rx_length_errors = dev->stats.rx_length_errors;
6608	stats->rx_errors	= dev->stats.rx_errors;
6609	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
6610	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
6611	stats->rx_missed_errors = dev->stats.rx_missed_errors;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6612
6613	return stats;
6614}
6615
6616static void rtl8169_net_suspend(struct net_device *dev)
6617{
6618	struct rtl8169_private *tp = netdev_priv(dev);
6619
6620	if (!netif_running(dev))
6621		return;
6622
6623	netif_device_detach(dev);
6624	netif_stop_queue(dev);
6625
6626	rtl_lock_work(tp);
6627	napi_disable(&tp->napi);
6628	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6629	rtl_unlock_work(tp);
6630
6631	rtl_pll_power_down(tp);
6632}
6633
6634#ifdef CONFIG_PM
6635
6636static int rtl8169_suspend(struct device *device)
6637{
6638	struct pci_dev *pdev = to_pci_dev(device);
6639	struct net_device *dev = pci_get_drvdata(pdev);
6640
6641	rtl8169_net_suspend(dev);
6642
6643	return 0;
6644}
6645
6646static void __rtl8169_resume(struct net_device *dev)
6647{
6648	struct rtl8169_private *tp = netdev_priv(dev);
6649
6650	netif_device_attach(dev);
6651
6652	rtl_pll_power_up(tp);
6653
6654	rtl_lock_work(tp);
6655	napi_enable(&tp->napi);
6656	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6657	rtl_unlock_work(tp);
6658
6659	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6660}
6661
6662static int rtl8169_resume(struct device *device)
6663{
6664	struct pci_dev *pdev = to_pci_dev(device);
6665	struct net_device *dev = pci_get_drvdata(pdev);
6666	struct rtl8169_private *tp = netdev_priv(dev);
6667
6668	rtl8169_init_phy(dev, tp);
6669
6670	if (netif_running(dev))
6671		__rtl8169_resume(dev);
6672
6673	return 0;
6674}
6675
6676static int rtl8169_runtime_suspend(struct device *device)
6677{
6678	struct pci_dev *pdev = to_pci_dev(device);
6679	struct net_device *dev = pci_get_drvdata(pdev);
6680	struct rtl8169_private *tp = netdev_priv(dev);
6681
6682	if (!tp->TxDescArray)
6683		return 0;
6684
6685	rtl_lock_work(tp);
6686	tp->saved_wolopts = __rtl8169_get_wol(tp);
6687	__rtl8169_set_wol(tp, WAKE_ANY);
6688	rtl_unlock_work(tp);
6689
6690	rtl8169_net_suspend(dev);
6691
 
 
 
 
6692	return 0;
6693}
6694
6695static int rtl8169_runtime_resume(struct device *device)
6696{
6697	struct pci_dev *pdev = to_pci_dev(device);
6698	struct net_device *dev = pci_get_drvdata(pdev);
6699	struct rtl8169_private *tp = netdev_priv(dev);
6700
6701	if (!tp->TxDescArray)
6702		return 0;
6703
6704	rtl_lock_work(tp);
6705	__rtl8169_set_wol(tp, tp->saved_wolopts);
6706	tp->saved_wolopts = 0;
6707	rtl_unlock_work(tp);
6708
6709	rtl8169_init_phy(dev, tp);
6710
6711	__rtl8169_resume(dev);
6712
6713	return 0;
6714}
6715
6716static int rtl8169_runtime_idle(struct device *device)
6717{
6718	struct pci_dev *pdev = to_pci_dev(device);
6719	struct net_device *dev = pci_get_drvdata(pdev);
6720	struct rtl8169_private *tp = netdev_priv(dev);
6721
6722	return tp->TxDescArray ? -EBUSY : 0;
6723}
6724
6725static const struct dev_pm_ops rtl8169_pm_ops = {
6726	.suspend		= rtl8169_suspend,
6727	.resume			= rtl8169_resume,
6728	.freeze			= rtl8169_suspend,
6729	.thaw			= rtl8169_resume,
6730	.poweroff		= rtl8169_suspend,
6731	.restore		= rtl8169_resume,
6732	.runtime_suspend	= rtl8169_runtime_suspend,
6733	.runtime_resume		= rtl8169_runtime_resume,
6734	.runtime_idle		= rtl8169_runtime_idle,
6735};
6736
6737#define RTL8169_PM_OPS	(&rtl8169_pm_ops)
6738
6739#else /* !CONFIG_PM */
6740
6741#define RTL8169_PM_OPS	NULL
6742
6743#endif /* !CONFIG_PM */
6744
6745static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6746{
6747	void __iomem *ioaddr = tp->mmio_addr;
6748
6749	/* WoL fails with 8168b when the receiver is disabled. */
6750	switch (tp->mac_version) {
6751	case RTL_GIGA_MAC_VER_11:
6752	case RTL_GIGA_MAC_VER_12:
6753	case RTL_GIGA_MAC_VER_17:
6754		pci_clear_master(tp->pci_dev);
6755
6756		RTL_W8(ChipCmd, CmdRxEnb);
6757		/* PCI commit */
6758		RTL_R8(ChipCmd);
6759		break;
6760	default:
6761		break;
6762	}
6763}
6764
6765static void rtl_shutdown(struct pci_dev *pdev)
6766{
6767	struct net_device *dev = pci_get_drvdata(pdev);
6768	struct rtl8169_private *tp = netdev_priv(dev);
6769	struct device *d = &pdev->dev;
6770
6771	pm_runtime_get_sync(d);
6772
6773	rtl8169_net_suspend(dev);
6774
6775	/* Restore original MAC address */
6776	rtl_rar_set(tp, dev->perm_addr);
6777
6778	rtl8169_hw_reset(tp);
6779
6780	if (system_state == SYSTEM_POWER_OFF) {
6781		if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6782			rtl_wol_suspend_quirk(tp);
6783			rtl_wol_shutdown_quirk(tp);
6784		}
6785
6786		pci_wake_from_d3(pdev, true);
6787		pci_set_power_state(pdev, PCI_D3hot);
6788	}
6789
6790	pm_runtime_put_noidle(d);
6791}
6792
6793static void rtl_remove_one(struct pci_dev *pdev)
6794{
6795	struct net_device *dev = pci_get_drvdata(pdev);
6796	struct rtl8169_private *tp = netdev_priv(dev);
6797
6798	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6799	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6800	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
 
 
 
 
6801		rtl8168_driver_stop(tp);
6802	}
6803
6804	netif_napi_del(&tp->napi);
6805
6806	unregister_netdev(dev);
6807
 
 
 
6808	rtl_release_firmware(tp);
6809
6810	if (pci_dev_run_wake(pdev))
6811		pm_runtime_get_noresume(&pdev->dev);
6812
6813	/* restore original MAC address */
6814	rtl_rar_set(tp, dev->perm_addr);
6815
6816	rtl_disable_msi(pdev, tp);
6817	rtl8169_release_board(pdev, dev, tp->mmio_addr);
6818}
6819
6820static const struct net_device_ops rtl_netdev_ops = {
6821	.ndo_open		= rtl_open,
6822	.ndo_stop		= rtl8169_close,
6823	.ndo_get_stats64	= rtl8169_get_stats64,
6824	.ndo_start_xmit		= rtl8169_start_xmit,
6825	.ndo_tx_timeout		= rtl8169_tx_timeout,
6826	.ndo_validate_addr	= eth_validate_addr,
6827	.ndo_change_mtu		= rtl8169_change_mtu,
6828	.ndo_fix_features	= rtl8169_fix_features,
6829	.ndo_set_features	= rtl8169_set_features,
6830	.ndo_set_mac_address	= rtl_set_mac_address,
6831	.ndo_do_ioctl		= rtl8169_ioctl,
6832	.ndo_set_rx_mode	= rtl_set_rx_mode,
6833#ifdef CONFIG_NET_POLL_CONTROLLER
6834	.ndo_poll_controller	= rtl8169_netpoll,
6835#endif
6836
6837};
6838
6839static const struct rtl_cfg_info {
6840	void (*hw_start)(struct net_device *);
6841	unsigned int region;
6842	unsigned int align;
6843	u16 event_slow;
6844	unsigned features;
6845	u8 default_ver;
6846} rtl_cfg_infos [] = {
6847	[RTL_CFG_0] = {
6848		.hw_start	= rtl_hw_start_8169,
6849		.region		= 1,
6850		.align		= 0,
6851		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6852		.features	= RTL_FEATURE_GMII,
6853		.default_ver	= RTL_GIGA_MAC_VER_01,
6854	},
6855	[RTL_CFG_1] = {
6856		.hw_start	= rtl_hw_start_8168,
6857		.region		= 2,
6858		.align		= 8,
6859		.event_slow	= SYSErr | LinkChg | RxOverflow,
6860		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6861		.default_ver	= RTL_GIGA_MAC_VER_11,
6862	},
6863	[RTL_CFG_2] = {
6864		.hw_start	= rtl_hw_start_8101,
6865		.region		= 2,
6866		.align		= 8,
6867		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6868				  PCSTimeout,
6869		.features	= RTL_FEATURE_MSI,
6870		.default_ver	= RTL_GIGA_MAC_VER_13,
6871	}
6872};
6873
6874/* Cfg9346_Unlock assumed. */
6875static unsigned rtl_try_msi(struct rtl8169_private *tp,
6876			    const struct rtl_cfg_info *cfg)
6877{
6878	void __iomem *ioaddr = tp->mmio_addr;
6879	unsigned msi = 0;
6880	u8 cfg2;
6881
6882	cfg2 = RTL_R8(Config2) & ~MSIEnable;
6883	if (cfg->features & RTL_FEATURE_MSI) {
6884		if (pci_enable_msi(tp->pci_dev)) {
6885			netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6886		} else {
6887			cfg2 |= MSIEnable;
6888			msi = RTL_FEATURE_MSI;
6889		}
6890	}
6891	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6892		RTL_W8(Config2, cfg2);
6893	return msi;
6894}
6895
6896DECLARE_RTL_COND(rtl_link_list_ready_cond)
6897{
6898	void __iomem *ioaddr = tp->mmio_addr;
6899
6900	return RTL_R8(MCU) & LINK_LIST_RDY;
6901}
6902
6903DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6904{
6905	void __iomem *ioaddr = tp->mmio_addr;
6906
6907	return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6908}
6909
6910static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6911{
6912	void __iomem *ioaddr = tp->mmio_addr;
6913	u32 data;
6914
6915	tp->ocp_base = OCP_STD_PHY_BASE;
6916
6917	RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6918
6919	if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6920		return;
6921
6922	if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6923		return;
6924
6925	RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6926	msleep(1);
6927	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6928
6929	data = r8168_mac_ocp_read(tp, 0xe8de);
6930	data &= ~(1 << 14);
6931	r8168_mac_ocp_write(tp, 0xe8de, data);
6932
6933	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6934		return;
6935
6936	data = r8168_mac_ocp_read(tp, 0xe8de);
6937	data |= (1 << 15);
6938	r8168_mac_ocp_write(tp, 0xe8de, data);
6939
6940	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6941		return;
6942}
6943
 
 
 
 
 
 
6944static void rtl_hw_initialize(struct rtl8169_private *tp)
6945{
6946	switch (tp->mac_version) {
6947	case RTL_GIGA_MAC_VER_40:
6948	case RTL_GIGA_MAC_VER_41:
6949	case RTL_GIGA_MAC_VER_42:
6950	case RTL_GIGA_MAC_VER_43:
6951	case RTL_GIGA_MAC_VER_44:
 
 
 
 
6952		rtl_hw_init_8168g(tp);
6953		break;
6954
 
 
 
 
6955	default:
6956		break;
6957	}
6958}
6959
6960static int
6961rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6962{
6963	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6964	const unsigned int region = cfg->region;
6965	struct rtl8169_private *tp;
6966	struct mii_if_info *mii;
6967	struct net_device *dev;
6968	void __iomem *ioaddr;
6969	int chipset, i;
6970	int rc;
6971
6972	if (netif_msg_drv(&debug)) {
6973		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6974		       MODULENAME, RTL8169_VERSION);
6975	}
6976
6977	dev = alloc_etherdev(sizeof (*tp));
6978	if (!dev) {
6979		rc = -ENOMEM;
6980		goto out;
6981	}
6982
6983	SET_NETDEV_DEV(dev, &pdev->dev);
6984	dev->netdev_ops = &rtl_netdev_ops;
6985	tp = netdev_priv(dev);
6986	tp->dev = dev;
6987	tp->pci_dev = pdev;
6988	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6989
6990	mii = &tp->mii;
6991	mii->dev = dev;
6992	mii->mdio_read = rtl_mdio_read;
6993	mii->mdio_write = rtl_mdio_write;
6994	mii->phy_id_mask = 0x1f;
6995	mii->reg_num_mask = 0x1f;
6996	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6997
6998	/* disable ASPM completely as that cause random device stop working
6999	 * problems as well as full system hangs for some PCIe devices users */
7000	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
7001				     PCIE_LINK_STATE_CLKPM);
7002
7003	/* enable device (incl. PCI PM wakeup and hotplug setup) */
7004	rc = pci_enable_device(pdev);
7005	if (rc < 0) {
7006		netif_err(tp, probe, dev, "enable failure\n");
7007		goto err_out_free_dev_1;
7008	}
7009
7010	if (pci_set_mwi(pdev) < 0)
7011		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
7012
7013	/* make sure PCI base addr 1 is MMIO */
7014	if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
7015		netif_err(tp, probe, dev,
7016			  "region #%d not an MMIO resource, aborting\n",
7017			  region);
7018		rc = -ENODEV;
7019		goto err_out_mwi_2;
7020	}
7021
7022	/* check for weird/broken PCI region reporting */
7023	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
7024		netif_err(tp, probe, dev,
7025			  "Invalid PCI region size(s), aborting\n");
7026		rc = -ENODEV;
7027		goto err_out_mwi_2;
7028	}
7029
7030	rc = pci_request_regions(pdev, MODULENAME);
7031	if (rc < 0) {
7032		netif_err(tp, probe, dev, "could not request regions\n");
7033		goto err_out_mwi_2;
7034	}
7035
7036	tp->cp_cmd = RxChkSum;
7037
7038	if ((sizeof(dma_addr_t) > 4) &&
7039	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
7040		tp->cp_cmd |= PCIDAC;
7041		dev->features |= NETIF_F_HIGHDMA;
7042	} else {
7043		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7044		if (rc < 0) {
7045			netif_err(tp, probe, dev, "DMA configuration failed\n");
7046			goto err_out_free_res_3;
7047		}
7048	}
7049
7050	/* ioremap MMIO region */
7051	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
7052	if (!ioaddr) {
7053		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
7054		rc = -EIO;
7055		goto err_out_free_res_3;
7056	}
7057	tp->mmio_addr = ioaddr;
7058
7059	if (!pci_is_pcie(pdev))
7060		netif_info(tp, probe, dev, "not PCI Express\n");
7061
7062	/* Identify chip attached to board */
7063	rtl8169_get_mac_version(tp, dev, cfg->default_ver);
7064
7065	rtl_init_rxcfg(tp);
7066
7067	rtl_irq_disable(tp);
7068
7069	rtl_hw_initialize(tp);
7070
7071	rtl_hw_reset(tp);
7072
7073	rtl_ack_events(tp, 0xffff);
7074
7075	pci_set_master(pdev);
7076
7077	/*
7078	 * Pretend we are using VLANs; This bypasses a nasty bug where
7079	 * Interrupts stop flowing on high load on 8110SCd controllers.
7080	 */
7081	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7082		tp->cp_cmd |= RxVlan;
7083
7084	rtl_init_mdio_ops(tp);
7085	rtl_init_pll_power_ops(tp);
7086	rtl_init_jumbo_ops(tp);
7087	rtl_init_csi_ops(tp);
7088
7089	rtl8169_print_mac_version(tp);
7090
7091	chipset = tp->mac_version;
7092	tp->txd_version = rtl_chip_infos[chipset].txd_version;
7093
7094	RTL_W8(Cfg9346, Cfg9346_Unlock);
7095	RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7096	RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7097	if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
7098		tp->features |= RTL_FEATURE_WOL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7099	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
7100		tp->features |= RTL_FEATURE_WOL;
7101	tp->features |= rtl_try_msi(tp, cfg);
7102	RTL_W8(Cfg9346, Cfg9346_Lock);
7103
7104	if (rtl_tbi_enabled(tp)) {
7105		tp->set_speed = rtl8169_set_speed_tbi;
7106		tp->get_settings = rtl8169_gset_tbi;
7107		tp->phy_reset_enable = rtl8169_tbi_reset_enable;
7108		tp->phy_reset_pending = rtl8169_tbi_reset_pending;
7109		tp->link_ok = rtl8169_tbi_link_ok;
7110		tp->do_ioctl = rtl_tbi_ioctl;
7111	} else {
7112		tp->set_speed = rtl8169_set_speed_xmii;
7113		tp->get_settings = rtl8169_gset_xmii;
7114		tp->phy_reset_enable = rtl8169_xmii_reset_enable;
7115		tp->phy_reset_pending = rtl8169_xmii_reset_pending;
7116		tp->link_ok = rtl8169_xmii_link_ok;
7117		tp->do_ioctl = rtl_xmii_ioctl;
7118	}
7119
7120	mutex_init(&tp->wk.mutex);
7121	u64_stats_init(&tp->rx_stats.syncp);
7122	u64_stats_init(&tp->tx_stats.syncp);
7123
7124	/* Get MAC address */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7125	for (i = 0; i < ETH_ALEN; i++)
7126		dev->dev_addr[i] = RTL_R8(MAC0 + i);
7127
7128	SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
7129	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
7130
7131	netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
7132
7133	/* don't enable SG, IP_CSUM and TSO by default - it might not work
7134	 * properly for all devices */
7135	dev->features |= NETIF_F_RXCSUM |
7136		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7137
7138	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7139		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
7140		NETIF_F_HW_VLAN_CTAG_RX;
7141	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7142		NETIF_F_HIGHDMA;
7143
 
 
 
 
 
 
7144	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7145		/* 8110SCd requires hardware Rx VLAN - disallow toggling */
7146		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
7147
 
 
 
 
 
 
 
 
7148	dev->hw_features |= NETIF_F_RXALL;
7149	dev->hw_features |= NETIF_F_RXFCS;
7150
7151	tp->hw_start = cfg->hw_start;
7152	tp->event_slow = cfg->event_slow;
7153
7154	tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
7155		~(RxBOVF | RxFOVF) : ~0;
7156
7157	init_timer(&tp->timer);
7158	tp->timer.data = (unsigned long) dev;
7159	tp->timer.function = rtl8169_phy_timer;
7160
7161	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
7162
 
 
 
 
 
 
 
7163	rc = register_netdev(dev);
7164	if (rc < 0)
7165		goto err_out_msi_4;
7166
7167	pci_set_drvdata(pdev, dev);
7168
7169	netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
7170		   rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
7171		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
7172	if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
7173		netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
7174			   "tx checksumming: %s]\n",
7175			   rtl_chip_infos[chipset].jumbo_max,
7176			   rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
7177	}
7178
7179	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
7180	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
7181	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
 
 
 
 
7182		rtl8168_driver_start(tp);
7183	}
7184
7185	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
7186
7187	if (pci_dev_run_wake(pdev))
7188		pm_runtime_put_noidle(&pdev->dev);
7189
7190	netif_carrier_off(dev);
7191
7192out:
7193	return rc;
7194
 
 
 
7195err_out_msi_4:
7196	netif_napi_del(&tp->napi);
7197	rtl_disable_msi(pdev, tp);
7198	iounmap(ioaddr);
7199err_out_free_res_3:
7200	pci_release_regions(pdev);
7201err_out_mwi_2:
7202	pci_clear_mwi(pdev);
7203	pci_disable_device(pdev);
7204err_out_free_dev_1:
7205	free_netdev(dev);
7206	goto out;
7207}
7208
7209static struct pci_driver rtl8169_pci_driver = {
7210	.name		= MODULENAME,
7211	.id_table	= rtl8169_pci_tbl,
7212	.probe		= rtl_init_one,
7213	.remove		= rtl_remove_one,
7214	.shutdown	= rtl_shutdown,
7215	.driver.pm	= RTL8169_PM_OPS,
7216};
7217
7218module_pci_driver(rtl8169_pci_driver);
v4.6
   1/*
   2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
   3 *
   4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
   5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
   6 * Copyright (c) a lot of people too. Please respect their work.
   7 *
   8 * See MAINTAINERS file for support contact information.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/moduleparam.h>
  13#include <linux/pci.h>
  14#include <linux/netdevice.h>
  15#include <linux/etherdevice.h>
  16#include <linux/delay.h>
  17#include <linux/ethtool.h>
  18#include <linux/mii.h>
  19#include <linux/if_vlan.h>
  20#include <linux/crc32.h>
  21#include <linux/in.h>
  22#include <linux/ip.h>
  23#include <linux/tcp.h>
  24#include <linux/interrupt.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/firmware.h>
  28#include <linux/pci-aspm.h>
  29#include <linux/prefetch.h>
  30#include <linux/ipv6.h>
  31#include <net/ip6_checksum.h>
  32
  33#include <asm/io.h>
  34#include <asm/irq.h>
  35
  36#define RTL8169_VERSION "2.3LK-NAPI"
  37#define MODULENAME "r8169"
  38#define PFX MODULENAME ": "
  39
  40#define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
  41#define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
  42#define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
  43#define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
  44#define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
  45#define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
  46#define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
  47#define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
  48#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
  49#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
  50#define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
  51#define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
  52#define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
  53#define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
  54#define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
  55#define FIRMWARE_8168H_1	"rtl_nic/rtl8168h-1.fw"
  56#define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
  57#define FIRMWARE_8107E_1	"rtl_nic/rtl8107e-1.fw"
  58#define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
  59
  60#ifdef RTL8169_DEBUG
  61#define assert(expr) \
  62	if (!(expr)) {					\
  63		printk( "Assertion failed! %s,%s,%s,line=%d\n",	\
  64		#expr,__FILE__,__func__,__LINE__);		\
  65	}
  66#define dprintk(fmt, args...) \
  67	do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
  68#else
  69#define assert(expr) do {} while (0)
  70#define dprintk(fmt, args...)	do {} while (0)
  71#endif /* RTL8169_DEBUG */
  72
  73#define R8169_MSG_DEFAULT \
  74	(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
  75
  76#define TX_SLOTS_AVAIL(tp) \
  77	(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
  78
  79/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
  80#define TX_FRAGS_READY_FOR(tp,nr_frags) \
  81	(TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
  82
  83/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  84   The RTL chips use a 64 element hash table based on the Ethernet CRC. */
  85static const int multicast_filter_limit = 32;
  86
  87#define MAX_READ_REQUEST_SHIFT	12
  88#define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
  89#define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
  90
  91#define R8169_REGS_SIZE		256
  92#define R8169_NAPI_WEIGHT	64
  93#define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
  94#define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
  95#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
  96#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
  97
  98#define RTL8169_TX_TIMEOUT	(6*HZ)
  99#define RTL8169_PHY_TIMEOUT	(10*HZ)
 100
 101/* write/read MMIO register */
 102#define RTL_W8(reg, val8)	writeb ((val8), ioaddr + (reg))
 103#define RTL_W16(reg, val16)	writew ((val16), ioaddr + (reg))
 104#define RTL_W32(reg, val32)	writel ((val32), ioaddr + (reg))
 105#define RTL_R8(reg)		readb (ioaddr + (reg))
 106#define RTL_R16(reg)		readw (ioaddr + (reg))
 107#define RTL_R32(reg)		readl (ioaddr + (reg))
 108
 109enum mac_version {
 110	RTL_GIGA_MAC_VER_01 = 0,
 111	RTL_GIGA_MAC_VER_02,
 112	RTL_GIGA_MAC_VER_03,
 113	RTL_GIGA_MAC_VER_04,
 114	RTL_GIGA_MAC_VER_05,
 115	RTL_GIGA_MAC_VER_06,
 116	RTL_GIGA_MAC_VER_07,
 117	RTL_GIGA_MAC_VER_08,
 118	RTL_GIGA_MAC_VER_09,
 119	RTL_GIGA_MAC_VER_10,
 120	RTL_GIGA_MAC_VER_11,
 121	RTL_GIGA_MAC_VER_12,
 122	RTL_GIGA_MAC_VER_13,
 123	RTL_GIGA_MAC_VER_14,
 124	RTL_GIGA_MAC_VER_15,
 125	RTL_GIGA_MAC_VER_16,
 126	RTL_GIGA_MAC_VER_17,
 127	RTL_GIGA_MAC_VER_18,
 128	RTL_GIGA_MAC_VER_19,
 129	RTL_GIGA_MAC_VER_20,
 130	RTL_GIGA_MAC_VER_21,
 131	RTL_GIGA_MAC_VER_22,
 132	RTL_GIGA_MAC_VER_23,
 133	RTL_GIGA_MAC_VER_24,
 134	RTL_GIGA_MAC_VER_25,
 135	RTL_GIGA_MAC_VER_26,
 136	RTL_GIGA_MAC_VER_27,
 137	RTL_GIGA_MAC_VER_28,
 138	RTL_GIGA_MAC_VER_29,
 139	RTL_GIGA_MAC_VER_30,
 140	RTL_GIGA_MAC_VER_31,
 141	RTL_GIGA_MAC_VER_32,
 142	RTL_GIGA_MAC_VER_33,
 143	RTL_GIGA_MAC_VER_34,
 144	RTL_GIGA_MAC_VER_35,
 145	RTL_GIGA_MAC_VER_36,
 146	RTL_GIGA_MAC_VER_37,
 147	RTL_GIGA_MAC_VER_38,
 148	RTL_GIGA_MAC_VER_39,
 149	RTL_GIGA_MAC_VER_40,
 150	RTL_GIGA_MAC_VER_41,
 151	RTL_GIGA_MAC_VER_42,
 152	RTL_GIGA_MAC_VER_43,
 153	RTL_GIGA_MAC_VER_44,
 154	RTL_GIGA_MAC_VER_45,
 155	RTL_GIGA_MAC_VER_46,
 156	RTL_GIGA_MAC_VER_47,
 157	RTL_GIGA_MAC_VER_48,
 158	RTL_GIGA_MAC_VER_49,
 159	RTL_GIGA_MAC_VER_50,
 160	RTL_GIGA_MAC_VER_51,
 161	RTL_GIGA_MAC_NONE   = 0xff,
 162};
 163
 164enum rtl_tx_desc_version {
 165	RTL_TD_0	= 0,
 166	RTL_TD_1	= 1,
 167};
 168
 169#define JUMBO_1K	ETH_DATA_LEN
 170#define JUMBO_4K	(4*1024 - ETH_HLEN - 2)
 171#define JUMBO_6K	(6*1024 - ETH_HLEN - 2)
 172#define JUMBO_7K	(7*1024 - ETH_HLEN - 2)
 173#define JUMBO_9K	(9*1024 - ETH_HLEN - 2)
 174
 175#define _R(NAME,TD,FW,SZ,B) {	\
 176	.name = NAME,		\
 177	.txd_version = TD,	\
 178	.fw_name = FW,		\
 179	.jumbo_max = SZ,	\
 180	.jumbo_tx_csum = B	\
 181}
 182
 183static const struct {
 184	const char *name;
 185	enum rtl_tx_desc_version txd_version;
 186	const char *fw_name;
 187	u16 jumbo_max;
 188	bool jumbo_tx_csum;
 189} rtl_chip_infos[] = {
 190	/* PCI devices. */
 191	[RTL_GIGA_MAC_VER_01] =
 192		_R("RTL8169",		RTL_TD_0, NULL, JUMBO_7K, true),
 193	[RTL_GIGA_MAC_VER_02] =
 194		_R("RTL8169s",		RTL_TD_0, NULL, JUMBO_7K, true),
 195	[RTL_GIGA_MAC_VER_03] =
 196		_R("RTL8110s",		RTL_TD_0, NULL, JUMBO_7K, true),
 197	[RTL_GIGA_MAC_VER_04] =
 198		_R("RTL8169sb/8110sb",	RTL_TD_0, NULL, JUMBO_7K, true),
 199	[RTL_GIGA_MAC_VER_05] =
 200		_R("RTL8169sc/8110sc",	RTL_TD_0, NULL, JUMBO_7K, true),
 201	[RTL_GIGA_MAC_VER_06] =
 202		_R("RTL8169sc/8110sc",	RTL_TD_0, NULL, JUMBO_7K, true),
 203	/* PCI-E devices. */
 204	[RTL_GIGA_MAC_VER_07] =
 205		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
 206	[RTL_GIGA_MAC_VER_08] =
 207		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
 208	[RTL_GIGA_MAC_VER_09] =
 209		_R("RTL8102e",		RTL_TD_1, NULL, JUMBO_1K, true),
 210	[RTL_GIGA_MAC_VER_10] =
 211		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
 212	[RTL_GIGA_MAC_VER_11] =
 213		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
 214	[RTL_GIGA_MAC_VER_12] =
 215		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
 216	[RTL_GIGA_MAC_VER_13] =
 217		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
 218	[RTL_GIGA_MAC_VER_14] =
 219		_R("RTL8100e",		RTL_TD_0, NULL, JUMBO_1K, true),
 220	[RTL_GIGA_MAC_VER_15] =
 221		_R("RTL8100e",		RTL_TD_0, NULL, JUMBO_1K, true),
 222	[RTL_GIGA_MAC_VER_16] =
 223		_R("RTL8101e",		RTL_TD_0, NULL, JUMBO_1K, true),
 224	[RTL_GIGA_MAC_VER_17] =
 225		_R("RTL8168b/8111b",	RTL_TD_0, NULL, JUMBO_4K, false),
 226	[RTL_GIGA_MAC_VER_18] =
 227		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
 228	[RTL_GIGA_MAC_VER_19] =
 229		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 230	[RTL_GIGA_MAC_VER_20] =
 231		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 232	[RTL_GIGA_MAC_VER_21] =
 233		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 234	[RTL_GIGA_MAC_VER_22] =
 235		_R("RTL8168c/8111c",	RTL_TD_1, NULL, JUMBO_6K, false),
 236	[RTL_GIGA_MAC_VER_23] =
 237		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
 238	[RTL_GIGA_MAC_VER_24] =
 239		_R("RTL8168cp/8111cp",	RTL_TD_1, NULL, JUMBO_6K, false),
 240	[RTL_GIGA_MAC_VER_25] =
 241		_R("RTL8168d/8111d",	RTL_TD_1, FIRMWARE_8168D_1,
 242							JUMBO_9K, false),
 243	[RTL_GIGA_MAC_VER_26] =
 244		_R("RTL8168d/8111d",	RTL_TD_1, FIRMWARE_8168D_2,
 245							JUMBO_9K, false),
 246	[RTL_GIGA_MAC_VER_27] =
 247		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
 248	[RTL_GIGA_MAC_VER_28] =
 249		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
 250	[RTL_GIGA_MAC_VER_29] =
 251		_R("RTL8105e",		RTL_TD_1, FIRMWARE_8105E_1,
 252							JUMBO_1K, true),
 253	[RTL_GIGA_MAC_VER_30] =
 254		_R("RTL8105e",		RTL_TD_1, FIRMWARE_8105E_1,
 255							JUMBO_1K, true),
 256	[RTL_GIGA_MAC_VER_31] =
 257		_R("RTL8168dp/8111dp",	RTL_TD_1, NULL, JUMBO_9K, false),
 258	[RTL_GIGA_MAC_VER_32] =
 259		_R("RTL8168e/8111e",	RTL_TD_1, FIRMWARE_8168E_1,
 260							JUMBO_9K, false),
 261	[RTL_GIGA_MAC_VER_33] =
 262		_R("RTL8168e/8111e",	RTL_TD_1, FIRMWARE_8168E_2,
 263							JUMBO_9K, false),
 264	[RTL_GIGA_MAC_VER_34] =
 265		_R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
 266							JUMBO_9K, false),
 267	[RTL_GIGA_MAC_VER_35] =
 268		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_1,
 269							JUMBO_9K, false),
 270	[RTL_GIGA_MAC_VER_36] =
 271		_R("RTL8168f/8111f",	RTL_TD_1, FIRMWARE_8168F_2,
 272							JUMBO_9K, false),
 273	[RTL_GIGA_MAC_VER_37] =
 274		_R("RTL8402",		RTL_TD_1, FIRMWARE_8402_1,
 275							JUMBO_1K, true),
 276	[RTL_GIGA_MAC_VER_38] =
 277		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_1,
 278							JUMBO_9K, false),
 279	[RTL_GIGA_MAC_VER_39] =
 280		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_1,
 281							JUMBO_1K, true),
 282	[RTL_GIGA_MAC_VER_40] =
 283		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_2,
 284							JUMBO_9K, false),
 285	[RTL_GIGA_MAC_VER_41] =
 286		_R("RTL8168g/8111g",	RTL_TD_1, NULL, JUMBO_9K, false),
 287	[RTL_GIGA_MAC_VER_42] =
 288		_R("RTL8168g/8111g",	RTL_TD_1, FIRMWARE_8168G_3,
 289							JUMBO_9K, false),
 290	[RTL_GIGA_MAC_VER_43] =
 291		_R("RTL8106e",		RTL_TD_1, FIRMWARE_8106E_2,
 292							JUMBO_1K, true),
 293	[RTL_GIGA_MAC_VER_44] =
 294		_R("RTL8411",		RTL_TD_1, FIRMWARE_8411_2,
 295							JUMBO_9K, false),
 296	[RTL_GIGA_MAC_VER_45] =
 297		_R("RTL8168h/8111h",	RTL_TD_1, FIRMWARE_8168H_1,
 298							JUMBO_9K, false),
 299	[RTL_GIGA_MAC_VER_46] =
 300		_R("RTL8168h/8111h",	RTL_TD_1, FIRMWARE_8168H_2,
 301							JUMBO_9K, false),
 302	[RTL_GIGA_MAC_VER_47] =
 303		_R("RTL8107e",		RTL_TD_1, FIRMWARE_8107E_1,
 304							JUMBO_1K, false),
 305	[RTL_GIGA_MAC_VER_48] =
 306		_R("RTL8107e",		RTL_TD_1, FIRMWARE_8107E_2,
 307							JUMBO_1K, false),
 308	[RTL_GIGA_MAC_VER_49] =
 309		_R("RTL8168ep/8111ep",	RTL_TD_1, NULL,
 310							JUMBO_9K, false),
 311	[RTL_GIGA_MAC_VER_50] =
 312		_R("RTL8168ep/8111ep",	RTL_TD_1, NULL,
 313							JUMBO_9K, false),
 314	[RTL_GIGA_MAC_VER_51] =
 315		_R("RTL8168ep/8111ep",	RTL_TD_1, NULL,
 316							JUMBO_9K, false),
 317};
 318#undef _R
 319
 320enum cfg_version {
 321	RTL_CFG_0 = 0x00,
 322	RTL_CFG_1,
 323	RTL_CFG_2
 324};
 325
 326static const struct pci_device_id rtl8169_pci_tbl[] = {
 327	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
 328	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
 329	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
 330	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
 331	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
 332	{ PCI_VENDOR_ID_DLINK,			0x4300,
 333		PCI_VENDOR_ID_DLINK, 0x4b10,		 0, 0, RTL_CFG_1 },
 334	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, RTL_CFG_0 },
 335	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4302), 0, 0, RTL_CFG_0 },
 336	{ PCI_DEVICE(PCI_VENDOR_ID_AT,		0xc107), 0, 0, RTL_CFG_0 },
 337	{ PCI_DEVICE(0x16ec,			0x0116), 0, 0, RTL_CFG_0 },
 338	{ PCI_VENDOR_ID_LINKSYS,		0x1032,
 339		PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
 340	{ 0x0001,				0x8168,
 341		PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
 342	{0,},
 343};
 344
 345MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 346
 347static int rx_buf_sz = 16383;
 348static int use_dac;
 349static struct {
 350	u32 msg_enable;
 351} debug = { -1 };
 352
 353enum rtl_registers {
 354	MAC0		= 0,	/* Ethernet hardware address. */
 355	MAC4		= 4,
 356	MAR0		= 8,	/* Multicast filter. */
 357	CounterAddrLow		= 0x10,
 358	CounterAddrHigh		= 0x14,
 359	TxDescStartAddrLow	= 0x20,
 360	TxDescStartAddrHigh	= 0x24,
 361	TxHDescStartAddrLow	= 0x28,
 362	TxHDescStartAddrHigh	= 0x2c,
 363	FLASH		= 0x30,
 364	ERSR		= 0x36,
 365	ChipCmd		= 0x37,
 366	TxPoll		= 0x38,
 367	IntrMask	= 0x3c,
 368	IntrStatus	= 0x3e,
 369
 370	TxConfig	= 0x40,
 371#define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
 372#define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
 373
 374	RxConfig	= 0x44,
 375#define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
 376#define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
 377#define	RXCFG_FIFO_SHIFT		13
 378					/* No threshold before first PCI xfer */
 379#define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
 380#define	RX_EARLY_OFF			(1 << 11)
 381#define	RXCFG_DMA_SHIFT			8
 382					/* Unlimited maximum PCI burst. */
 383#define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
 384
 385	RxMissed	= 0x4c,
 386	Cfg9346		= 0x50,
 387	Config0		= 0x51,
 388	Config1		= 0x52,
 389	Config2		= 0x53,
 390#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
 391
 392	Config3		= 0x54,
 393	Config4		= 0x55,
 394	Config5		= 0x56,
 395	MultiIntr	= 0x5c,
 396	PHYAR		= 0x60,
 397	PHYstatus	= 0x6c,
 398	RxMaxSize	= 0xda,
 399	CPlusCmd	= 0xe0,
 400	IntrMitigate	= 0xe2,
 401	RxDescAddrLow	= 0xe4,
 402	RxDescAddrHigh	= 0xe8,
 403	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
 404
 405#define NoEarlyTx	0x3f	/* Max value : no early transmit. */
 406
 407	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
 408
 409#define TxPacketMax	(8064 >> 7)
 410#define EarlySize	0x27
 411
 412	FuncEvent	= 0xf0,
 413	FuncEventMask	= 0xf4,
 414	FuncPresetState	= 0xf8,
 415	IBCR0           = 0xf8,
 416	IBCR2           = 0xf9,
 417	IBIMR0          = 0xfa,
 418	IBISR0          = 0xfb,
 419	FuncForceEvent	= 0xfc,
 420};
 421
 422enum rtl8110_registers {
 423	TBICSR			= 0x64,
 424	TBI_ANAR		= 0x68,
 425	TBI_LPAR		= 0x6a,
 426};
 427
 428enum rtl8168_8101_registers {
 429	CSIDR			= 0x64,
 430	CSIAR			= 0x68,
 431#define	CSIAR_FLAG			0x80000000
 432#define	CSIAR_WRITE_CMD			0x80000000
 433#define	CSIAR_BYTE_ENABLE		0x0f
 434#define	CSIAR_BYTE_ENABLE_SHIFT		12
 435#define	CSIAR_ADDR_MASK			0x0fff
 436#define CSIAR_FUNC_CARD			0x00000000
 437#define CSIAR_FUNC_SDIO			0x00010000
 438#define CSIAR_FUNC_NIC			0x00020000
 439#define CSIAR_FUNC_NIC2			0x00010000
 440	PMCH			= 0x6f,
 441	EPHYAR			= 0x80,
 442#define	EPHYAR_FLAG			0x80000000
 443#define	EPHYAR_WRITE_CMD		0x80000000
 444#define	EPHYAR_REG_MASK			0x1f
 445#define	EPHYAR_REG_SHIFT		16
 446#define	EPHYAR_DATA_MASK		0xffff
 447	DLLPR			= 0xd0,
 448#define	PFM_EN				(1 << 6)
 449#define	TX_10M_PS_EN			(1 << 7)
 450	DBG_REG			= 0xd1,
 451#define	FIX_NAK_1			(1 << 4)
 452#define	FIX_NAK_2			(1 << 3)
 453	TWSI			= 0xd2,
 454	MCU			= 0xd3,
 455#define	NOW_IS_OOB			(1 << 7)
 456#define	TX_EMPTY			(1 << 5)
 457#define	RX_EMPTY			(1 << 4)
 458#define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
 459#define	EN_NDP				(1 << 3)
 460#define	EN_OOB_RESET			(1 << 2)
 461#define	LINK_LIST_RDY			(1 << 1)
 462	EFUSEAR			= 0xdc,
 463#define	EFUSEAR_FLAG			0x80000000
 464#define	EFUSEAR_WRITE_CMD		0x80000000
 465#define	EFUSEAR_READ_CMD		0x00000000
 466#define	EFUSEAR_REG_MASK		0x03ff
 467#define	EFUSEAR_REG_SHIFT		8
 468#define	EFUSEAR_DATA_MASK		0xff
 469	MISC_1			= 0xf2,
 470#define	PFM_D3COLD_EN			(1 << 6)
 471};
 472
 473enum rtl8168_registers {
 474	LED_FREQ		= 0x1a,
 475	EEE_LED			= 0x1b,
 476	ERIDR			= 0x70,
 477	ERIAR			= 0x74,
 478#define ERIAR_FLAG			0x80000000
 479#define ERIAR_WRITE_CMD			0x80000000
 480#define ERIAR_READ_CMD			0x00000000
 481#define ERIAR_ADDR_BYTE_ALIGN		4
 482#define ERIAR_TYPE_SHIFT		16
 483#define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
 484#define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
 485#define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
 486#define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
 487#define ERIAR_MASK_SHIFT		12
 488#define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
 489#define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
 490#define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
 491#define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
 492#define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
 493	EPHY_RXER_NUM		= 0x7c,
 494	OCPDR			= 0xb0,	/* OCP GPHY access */
 495#define OCPDR_WRITE_CMD			0x80000000
 496#define OCPDR_READ_CMD			0x00000000
 497#define OCPDR_REG_MASK			0x7f
 498#define OCPDR_GPHY_REG_SHIFT		16
 499#define OCPDR_DATA_MASK			0xffff
 500	OCPAR			= 0xb4,
 501#define OCPAR_FLAG			0x80000000
 502#define OCPAR_GPHY_WRITE_CMD		0x8000f060
 503#define OCPAR_GPHY_READ_CMD		0x0000f060
 504	GPHY_OCP		= 0xb8,
 505	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
 506	MISC			= 0xf0,	/* 8168e only. */
 507#define TXPLA_RST			(1 << 29)
 508#define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
 509#define PWM_EN				(1 << 22)
 510#define RXDV_GATED_EN			(1 << 19)
 511#define EARLY_TALLY_EN			(1 << 16)
 512};
 513
 514enum rtl_register_content {
 515	/* InterruptStatusBits */
 516	SYSErr		= 0x8000,
 517	PCSTimeout	= 0x4000,
 518	SWInt		= 0x0100,
 519	TxDescUnavail	= 0x0080,
 520	RxFIFOOver	= 0x0040,
 521	LinkChg		= 0x0020,
 522	RxOverflow	= 0x0010,
 523	TxErr		= 0x0008,
 524	TxOK		= 0x0004,
 525	RxErr		= 0x0002,
 526	RxOK		= 0x0001,
 527
 528	/* RxStatusDesc */
 529	RxBOVF	= (1 << 24),
 530	RxFOVF	= (1 << 23),
 531	RxRWT	= (1 << 22),
 532	RxRES	= (1 << 21),
 533	RxRUNT	= (1 << 20),
 534	RxCRC	= (1 << 19),
 535
 536	/* ChipCmdBits */
 537	StopReq		= 0x80,
 538	CmdReset	= 0x10,
 539	CmdRxEnb	= 0x08,
 540	CmdTxEnb	= 0x04,
 541	RxBufEmpty	= 0x01,
 542
 543	/* TXPoll register p.5 */
 544	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
 545	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
 546	FSWInt		= 0x01,		/* Forced software interrupt */
 547
 548	/* Cfg9346Bits */
 549	Cfg9346_Lock	= 0x00,
 550	Cfg9346_Unlock	= 0xc0,
 551
 552	/* rx_mode_bits */
 553	AcceptErr	= 0x20,
 554	AcceptRunt	= 0x10,
 555	AcceptBroadcast	= 0x08,
 556	AcceptMulticast	= 0x04,
 557	AcceptMyPhys	= 0x02,
 558	AcceptAllPhys	= 0x01,
 559#define RX_CONFIG_ACCEPT_MASK		0x3f
 560
 561	/* TxConfigBits */
 562	TxInterFrameGapShift = 24,
 563	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
 564
 565	/* Config1 register p.24 */
 566	LEDS1		= (1 << 7),
 567	LEDS0		= (1 << 6),
 568	Speed_down	= (1 << 4),
 569	MEMMAP		= (1 << 3),
 570	IOMAP		= (1 << 2),
 571	VPD		= (1 << 1),
 572	PMEnable	= (1 << 0),	/* Power Management Enable */
 573
 574	/* Config2 register p. 25 */
 575	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
 576	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
 577	PCI_Clock_66MHz = 0x01,
 578	PCI_Clock_33MHz = 0x00,
 579
 580	/* Config3 register p.25 */
 581	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
 582	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
 583	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
 584	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
 585	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
 586
 587	/* Config4 register */
 588	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
 589
 590	/* Config5 register p.27 */
 591	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
 592	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
 593	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
 594	Spi_en		= (1 << 3),
 595	LanWake		= (1 << 1),	/* LanWake enable/disable */
 596	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
 597	ASPM_en		= (1 << 0),	/* ASPM enable */
 598
 599	/* TBICSR p.28 */
 600	TBIReset	= 0x80000000,
 601	TBILoopback	= 0x40000000,
 602	TBINwEnable	= 0x20000000,
 603	TBINwRestart	= 0x10000000,
 604	TBILinkOk	= 0x02000000,
 605	TBINwComplete	= 0x01000000,
 606
 607	/* CPlusCmd p.31 */
 608	EnableBist	= (1 << 15),	// 8168 8101
 609	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
 610	Normal_mode	= (1 << 13),	// unused
 611	Force_half_dup	= (1 << 12),	// 8168 8101
 612	Force_rxflow_en	= (1 << 11),	// 8168 8101
 613	Force_txflow_en	= (1 << 10),	// 8168 8101
 614	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
 615	ASF		= (1 << 8),	// 8168 8101
 616	PktCntrDisable	= (1 << 7),	// 8168 8101
 617	Mac_dbgo_sel	= 0x001c,	// 8168
 618	RxVlan		= (1 << 6),
 619	RxChkSum	= (1 << 5),
 620	PCIDAC		= (1 << 4),
 621	PCIMulRW	= (1 << 3),
 622	INTT_0		= 0x0000,	// 8168
 623	INTT_1		= 0x0001,	// 8168
 624	INTT_2		= 0x0002,	// 8168
 625	INTT_3		= 0x0003,	// 8168
 626
 627	/* rtl8169_PHYstatus */
 628	TBI_Enable	= 0x80,
 629	TxFlowCtrl	= 0x40,
 630	RxFlowCtrl	= 0x20,
 631	_1000bpsF	= 0x10,
 632	_100bps		= 0x08,
 633	_10bps		= 0x04,
 634	LinkStatus	= 0x02,
 635	FullDup		= 0x01,
 636
 637	/* _TBICSRBit */
 638	TBILinkOK	= 0x02000000,
 639
 640	/* ResetCounterCommand */
 641	CounterReset	= 0x1,
 642
 643	/* DumpCounterCommand */
 644	CounterDump	= 0x8,
 645
 646	/* magic enable v2 */
 647	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
 648};
 649
 650enum rtl_desc_bit {
 651	/* First doubleword. */
 652	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
 653	RingEnd		= (1 << 30), /* End of descriptor ring */
 654	FirstFrag	= (1 << 29), /* First segment of a packet */
 655	LastFrag	= (1 << 28), /* Final segment of a packet */
 656};
 657
 658/* Generic case. */
 659enum rtl_tx_desc_bit {
 660	/* First doubleword. */
 661	TD_LSO		= (1 << 27),		/* Large Send Offload */
 662#define TD_MSS_MAX			0x07ffu	/* MSS value */
 663
 664	/* Second doubleword. */
 665	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
 666};
 667
 668/* 8169, 8168b and 810x except 8102e. */
 669enum rtl_tx_desc_bit_0 {
 670	/* First doubleword. */
 671#define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
 672	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
 673	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
 674	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
 675};
 676
 677/* 8102e, 8168c and beyond. */
 678enum rtl_tx_desc_bit_1 {
 679	/* First doubleword. */
 680	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
 681	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
 682#define GTTCPHO_SHIFT			18
 683#define GTTCPHO_MAX			0x7fU
 684
 685	/* Second doubleword. */
 686#define TCPHO_SHIFT			18
 687#define TCPHO_MAX			0x3ffU
 688#define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
 689	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
 690	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
 691	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
 692	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
 693};
 694
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695enum rtl_rx_desc_bit {
 696	/* Rx private */
 697	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
 698	PID0		= (1 << 17), /* Protocol ID bit 2/2 */
 699
 700#define RxProtoUDP	(PID1)
 701#define RxProtoTCP	(PID0)
 702#define RxProtoIP	(PID1 | PID0)
 703#define RxProtoMask	RxProtoIP
 704
 705	IPFail		= (1 << 16), /* IP checksum failed */
 706	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
 707	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
 708	RxVlanTag	= (1 << 16), /* VLAN tag available */
 709};
 710
 711#define RsvdMask	0x3fffc000
 712
 713struct TxDesc {
 714	__le32 opts1;
 715	__le32 opts2;
 716	__le64 addr;
 717};
 718
 719struct RxDesc {
 720	__le32 opts1;
 721	__le32 opts2;
 722	__le64 addr;
 723};
 724
 725struct ring_info {
 726	struct sk_buff	*skb;
 727	u32		len;
 728	u8		__pad[sizeof(void *) - sizeof(u32)];
 729};
 730
 731enum features {
 732	RTL_FEATURE_WOL		= (1 << 0),
 733	RTL_FEATURE_MSI		= (1 << 1),
 734	RTL_FEATURE_GMII	= (1 << 2),
 735};
 736
 737struct rtl8169_counters {
 738	__le64	tx_packets;
 739	__le64	rx_packets;
 740	__le64	tx_errors;
 741	__le32	rx_errors;
 742	__le16	rx_missed;
 743	__le16	align_errors;
 744	__le32	tx_one_collision;
 745	__le32	tx_multi_collision;
 746	__le64	rx_unicast;
 747	__le64	rx_broadcast;
 748	__le32	rx_multicast;
 749	__le16	tx_aborted;
 750	__le16	tx_underun;
 751};
 752
 753struct rtl8169_tc_offsets {
 754	bool	inited;
 755	__le64	tx_errors;
 756	__le32	tx_multi_collision;
 757	__le16	tx_aborted;
 758};
 759
 760enum rtl_flag {
 761	RTL_FLAG_TASK_ENABLED,
 762	RTL_FLAG_TASK_SLOW_PENDING,
 763	RTL_FLAG_TASK_RESET_PENDING,
 764	RTL_FLAG_TASK_PHY_PENDING,
 765	RTL_FLAG_MAX
 766};
 767
 768struct rtl8169_stats {
 769	u64			packets;
 770	u64			bytes;
 771	struct u64_stats_sync	syncp;
 772};
 773
 774struct rtl8169_private {
 775	void __iomem *mmio_addr;	/* memory map physical address */
 776	struct pci_dev *pci_dev;
 777	struct net_device *dev;
 778	struct napi_struct napi;
 779	u32 msg_enable;
 780	u16 txd_version;
 781	u16 mac_version;
 782	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
 783	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
 784	u32 dirty_tx;
 785	struct rtl8169_stats rx_stats;
 786	struct rtl8169_stats tx_stats;
 787	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
 788	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
 789	dma_addr_t TxPhyAddr;
 790	dma_addr_t RxPhyAddr;
 791	void *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
 792	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
 793	struct timer_list timer;
 794	u16 cp_cmd;
 795
 796	u16 event_slow;
 797
 798	struct mdio_ops {
 799		void (*write)(struct rtl8169_private *, int, int);
 800		int (*read)(struct rtl8169_private *, int);
 801	} mdio_ops;
 802
 803	struct pll_power_ops {
 804		void (*down)(struct rtl8169_private *);
 805		void (*up)(struct rtl8169_private *);
 806	} pll_power_ops;
 807
 808	struct jumbo_ops {
 809		void (*enable)(struct rtl8169_private *);
 810		void (*disable)(struct rtl8169_private *);
 811	} jumbo_ops;
 812
 813	struct csi_ops {
 814		void (*write)(struct rtl8169_private *, int, int);
 815		u32 (*read)(struct rtl8169_private *, int);
 816	} csi_ops;
 817
 818	int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
 819	int (*get_settings)(struct net_device *, struct ethtool_cmd *);
 820	void (*phy_reset_enable)(struct rtl8169_private *tp);
 821	void (*hw_start)(struct net_device *);
 822	unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
 823	unsigned int (*link_ok)(void __iomem *);
 824	int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
 825	bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
 826
 827	struct {
 828		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
 829		struct mutex mutex;
 830		struct work_struct work;
 831	} wk;
 832
 833	unsigned features;
 834
 835	struct mii_if_info mii;
 836	dma_addr_t counters_phys_addr;
 837	struct rtl8169_counters *counters;
 838	struct rtl8169_tc_offsets tc_offset;
 839	u32 saved_wolopts;
 840	u32 opts1_mask;
 841
 842	struct rtl_fw {
 843		const struct firmware *fw;
 844
 845#define RTL_VER_SIZE		32
 846
 847		char version[RTL_VER_SIZE];
 848
 849		struct rtl_fw_phy_action {
 850			__le32 *code;
 851			size_t size;
 852		} phy_action;
 853	} *rtl_fw;
 854#define RTL_FIRMWARE_UNKNOWN	ERR_PTR(-EAGAIN)
 855
 856	u32 ocp_base;
 857};
 858
 859MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 860MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 861module_param(use_dac, int, 0);
 862MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 863module_param_named(debug, debug.msg_enable, int, 0);
 864MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
 865MODULE_LICENSE("GPL");
 866MODULE_VERSION(RTL8169_VERSION);
 867MODULE_FIRMWARE(FIRMWARE_8168D_1);
 868MODULE_FIRMWARE(FIRMWARE_8168D_2);
 869MODULE_FIRMWARE(FIRMWARE_8168E_1);
 870MODULE_FIRMWARE(FIRMWARE_8168E_2);
 871MODULE_FIRMWARE(FIRMWARE_8168E_3);
 872MODULE_FIRMWARE(FIRMWARE_8105E_1);
 873MODULE_FIRMWARE(FIRMWARE_8168F_1);
 874MODULE_FIRMWARE(FIRMWARE_8168F_2);
 875MODULE_FIRMWARE(FIRMWARE_8402_1);
 876MODULE_FIRMWARE(FIRMWARE_8411_1);
 877MODULE_FIRMWARE(FIRMWARE_8411_2);
 878MODULE_FIRMWARE(FIRMWARE_8106E_1);
 879MODULE_FIRMWARE(FIRMWARE_8106E_2);
 880MODULE_FIRMWARE(FIRMWARE_8168G_2);
 881MODULE_FIRMWARE(FIRMWARE_8168G_3);
 882MODULE_FIRMWARE(FIRMWARE_8168H_1);
 883MODULE_FIRMWARE(FIRMWARE_8168H_2);
 884MODULE_FIRMWARE(FIRMWARE_8107E_1);
 885MODULE_FIRMWARE(FIRMWARE_8107E_2);
 886
 887static void rtl_lock_work(struct rtl8169_private *tp)
 888{
 889	mutex_lock(&tp->wk.mutex);
 890}
 891
 892static void rtl_unlock_work(struct rtl8169_private *tp)
 893{
 894	mutex_unlock(&tp->wk.mutex);
 895}
 896
 897static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
 898{
 899	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 900					   PCI_EXP_DEVCTL_READRQ, force);
 901}
 902
 903struct rtl_cond {
 904	bool (*check)(struct rtl8169_private *);
 905	const char *msg;
 906};
 907
 908static void rtl_udelay(unsigned int d)
 909{
 910	udelay(d);
 911}
 912
 913static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
 914			  void (*delay)(unsigned int), unsigned int d, int n,
 915			  bool high)
 916{
 917	int i;
 918
 919	for (i = 0; i < n; i++) {
 920		delay(d);
 921		if (c->check(tp) == high)
 922			return true;
 923	}
 924	netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
 925		  c->msg, !high, n, d);
 926	return false;
 927}
 928
 929static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
 930				      const struct rtl_cond *c,
 931				      unsigned int d, int n)
 932{
 933	return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
 934}
 935
 936static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
 937				     const struct rtl_cond *c,
 938				     unsigned int d, int n)
 939{
 940	return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
 941}
 942
 943static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
 944				      const struct rtl_cond *c,
 945				      unsigned int d, int n)
 946{
 947	return rtl_loop_wait(tp, c, msleep, d, n, true);
 948}
 949
 950static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
 951				     const struct rtl_cond *c,
 952				     unsigned int d, int n)
 953{
 954	return rtl_loop_wait(tp, c, msleep, d, n, false);
 955}
 956
 957#define DECLARE_RTL_COND(name)				\
 958static bool name ## _check(struct rtl8169_private *);	\
 959							\
 960static const struct rtl_cond name = {			\
 961	.check	= name ## _check,			\
 962	.msg	= #name					\
 963};							\
 964							\
 965static bool name ## _check(struct rtl8169_private *tp)
 966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
 968{
 969	if (reg & 0xffff0001) {
 970		netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
 971		return true;
 972	}
 973	return false;
 974}
 975
 976DECLARE_RTL_COND(rtl_ocp_gphy_cond)
 977{
 978	void __iomem *ioaddr = tp->mmio_addr;
 979
 980	return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
 981}
 982
 983static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
 984{
 985	void __iomem *ioaddr = tp->mmio_addr;
 986
 987	if (rtl_ocp_reg_failure(tp, reg))
 988		return;
 989
 990	RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
 991
 992	rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
 993}
 994
 995static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
 996{
 997	void __iomem *ioaddr = tp->mmio_addr;
 998
 999	if (rtl_ocp_reg_failure(tp, reg))
1000		return 0;
1001
1002	RTL_W32(GPHY_OCP, reg << 15);
1003
1004	return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1005		(RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1006}
1007
1008static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1009{
1010	void __iomem *ioaddr = tp->mmio_addr;
1011
1012	if (rtl_ocp_reg_failure(tp, reg))
1013		return;
1014
1015	RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1016}
1017
1018static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1019{
1020	void __iomem *ioaddr = tp->mmio_addr;
1021
1022	if (rtl_ocp_reg_failure(tp, reg))
1023		return 0;
1024
1025	RTL_W32(OCPDR, reg << 15);
1026
1027	return RTL_R32(OCPDR);
1028}
1029
1030#define OCP_STD_PHY_BASE	0xa400
1031
1032static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1033{
1034	if (reg == 0x1f) {
1035		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1036		return;
1037	}
1038
1039	if (tp->ocp_base != OCP_STD_PHY_BASE)
1040		reg -= 0x10;
1041
1042	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1043}
1044
1045static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1046{
1047	if (tp->ocp_base != OCP_STD_PHY_BASE)
1048		reg -= 0x10;
1049
1050	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1051}
1052
1053static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1054{
1055	if (reg == 0x1f) {
1056		tp->ocp_base = value << 4;
1057		return;
1058	}
1059
1060	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1061}
1062
1063static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1064{
1065	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1066}
1067
1068DECLARE_RTL_COND(rtl_phyar_cond)
1069{
1070	void __iomem *ioaddr = tp->mmio_addr;
1071
1072	return RTL_R32(PHYAR) & 0x80000000;
1073}
1074
1075static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1076{
1077	void __iomem *ioaddr = tp->mmio_addr;
1078
1079	RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1080
1081	rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1082	/*
1083	 * According to hardware specs a 20us delay is required after write
1084	 * complete indication, but before sending next command.
1085	 */
1086	udelay(20);
1087}
1088
1089static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1090{
1091	void __iomem *ioaddr = tp->mmio_addr;
1092	int value;
1093
1094	RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1095
1096	value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1097		RTL_R32(PHYAR) & 0xffff : ~0;
1098
1099	/*
1100	 * According to hardware specs a 20us delay is required after read
1101	 * complete indication, but before sending next command.
1102	 */
1103	udelay(20);
1104
1105	return value;
1106}
1107
1108DECLARE_RTL_COND(rtl_ocpar_cond)
1109{
1110	void __iomem *ioaddr = tp->mmio_addr;
1111
1112	return RTL_R32(OCPAR) & OCPAR_FLAG;
1113}
1114
1115static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1116{
1117	void __iomem *ioaddr = tp->mmio_addr;
1118
1119	RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1120	RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1121	RTL_W32(EPHY_RXER_NUM, 0);
1122
1123	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1124}
1125
1126static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1127{
1128	r8168dp_1_mdio_access(tp, reg,
1129			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1130}
1131
1132static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1133{
1134	void __iomem *ioaddr = tp->mmio_addr;
1135
1136	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1137
1138	mdelay(1);
1139	RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1140	RTL_W32(EPHY_RXER_NUM, 0);
1141
1142	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1143		RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1144}
1145
1146#define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
1147
1148static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1149{
1150	RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1151}
1152
1153static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1154{
1155	RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1156}
1157
1158static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1159{
1160	void __iomem *ioaddr = tp->mmio_addr;
1161
1162	r8168dp_2_mdio_start(ioaddr);
1163
1164	r8169_mdio_write(tp, reg, value);
1165
1166	r8168dp_2_mdio_stop(ioaddr);
1167}
1168
1169static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1170{
1171	void __iomem *ioaddr = tp->mmio_addr;
1172	int value;
1173
1174	r8168dp_2_mdio_start(ioaddr);
1175
1176	value = r8169_mdio_read(tp, reg);
1177
1178	r8168dp_2_mdio_stop(ioaddr);
1179
1180	return value;
1181}
1182
1183static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1184{
1185	tp->mdio_ops.write(tp, location, val);
1186}
1187
1188static int rtl_readphy(struct rtl8169_private *tp, int location)
1189{
1190	return tp->mdio_ops.read(tp, location);
1191}
1192
1193static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1194{
1195	rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1196}
1197
1198static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1199{
1200	int val;
1201
1202	val = rtl_readphy(tp, reg_addr);
1203	rtl_writephy(tp, reg_addr, (val & ~m) | p);
1204}
1205
1206static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1207			   int val)
1208{
1209	struct rtl8169_private *tp = netdev_priv(dev);
1210
1211	rtl_writephy(tp, location, val);
1212}
1213
1214static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1215{
1216	struct rtl8169_private *tp = netdev_priv(dev);
1217
1218	return rtl_readphy(tp, location);
1219}
1220
1221DECLARE_RTL_COND(rtl_ephyar_cond)
1222{
1223	void __iomem *ioaddr = tp->mmio_addr;
1224
1225	return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1226}
1227
1228static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1229{
1230	void __iomem *ioaddr = tp->mmio_addr;
1231
1232	RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1233		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1234
1235	rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1236
1237	udelay(10);
1238}
1239
1240static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1241{
1242	void __iomem *ioaddr = tp->mmio_addr;
1243
1244	RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1245
1246	return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1247		RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1248}
1249
1250DECLARE_RTL_COND(rtl_eriar_cond)
1251{
1252	void __iomem *ioaddr = tp->mmio_addr;
1253
1254	return RTL_R32(ERIAR) & ERIAR_FLAG;
1255}
1256
1257static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1258			  u32 val, int type)
1259{
1260	void __iomem *ioaddr = tp->mmio_addr;
1261
1262	BUG_ON((addr & 3) || (mask == 0));
1263	RTL_W32(ERIDR, val);
1264	RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1265
1266	rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1267}
1268
1269static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1270{
1271	void __iomem *ioaddr = tp->mmio_addr;
1272
1273	RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1274
1275	return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1276		RTL_R32(ERIDR) : ~0;
1277}
1278
1279static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1280			 u32 m, int type)
1281{
1282	u32 val;
1283
1284	val = rtl_eri_read(tp, addr, type);
1285	rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1286}
1287
1288static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1289{
1290	void __iomem *ioaddr = tp->mmio_addr;
1291
1292	RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1293	return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1294		RTL_R32(OCPDR) : ~0;
1295}
1296
1297static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1298{
1299	return rtl_eri_read(tp, reg, ERIAR_OOB);
1300}
1301
1302static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
1303{
1304	switch (tp->mac_version) {
1305	case RTL_GIGA_MAC_VER_27:
1306	case RTL_GIGA_MAC_VER_28:
1307	case RTL_GIGA_MAC_VER_31:
1308		return r8168dp_ocp_read(tp, mask, reg);
1309	case RTL_GIGA_MAC_VER_49:
1310	case RTL_GIGA_MAC_VER_50:
1311	case RTL_GIGA_MAC_VER_51:
1312		return r8168ep_ocp_read(tp, mask, reg);
1313	default:
1314		BUG();
1315		return ~0;
1316	}
1317}
1318
1319static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1320			      u32 data)
1321{
1322	void __iomem *ioaddr = tp->mmio_addr;
1323
1324	RTL_W32(OCPDR, data);
1325	RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1326	rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1327}
1328
1329static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1330			      u32 data)
1331{
1332	rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1333		      data, ERIAR_OOB);
1334}
1335
1336static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
1337{
1338	switch (tp->mac_version) {
1339	case RTL_GIGA_MAC_VER_27:
1340	case RTL_GIGA_MAC_VER_28:
1341	case RTL_GIGA_MAC_VER_31:
1342		r8168dp_ocp_write(tp, mask, reg, data);
1343		break;
1344	case RTL_GIGA_MAC_VER_49:
1345	case RTL_GIGA_MAC_VER_50:
1346	case RTL_GIGA_MAC_VER_51:
1347		r8168ep_ocp_write(tp, mask, reg, data);
1348		break;
1349	default:
1350		BUG();
1351		break;
1352	}
1353}
1354
1355static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
1356{
1357	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
1358
1359	ocp_write(tp, 0x1, 0x30, 0x00000001);
1360}
1361
1362#define OOB_CMD_RESET		0x00
1363#define OOB_CMD_DRIVER_START	0x05
1364#define OOB_CMD_DRIVER_STOP	0x06
1365
1366static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1367{
1368	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1369}
1370
1371DECLARE_RTL_COND(rtl_ocp_read_cond)
1372{
1373	u16 reg;
1374
1375	reg = rtl8168_get_ocp_reg(tp);
1376
1377	return ocp_read(tp, 0x0f, reg) & 0x00000800;
1378}
1379
1380DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1381{
1382	return ocp_read(tp, 0x0f, 0x124) & 0x00000001;
1383}
1384
1385DECLARE_RTL_COND(rtl_ocp_tx_cond)
1386{
1387	void __iomem *ioaddr = tp->mmio_addr;
1388
1389	return RTL_R8(IBISR0) & 0x02;
1390}
1391
1392static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1393{
1394	void __iomem *ioaddr = tp->mmio_addr;
1395
1396	RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
1397	rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
1398	RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
1399	RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
1400}
1401
1402static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1403{
1404	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
1405	rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
1406}
1407
1408static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1409{
1410	ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1411	ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
1412	rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
1413}
1414
1415static void rtl8168_driver_start(struct rtl8169_private *tp)
1416{
1417	switch (tp->mac_version) {
1418	case RTL_GIGA_MAC_VER_27:
1419	case RTL_GIGA_MAC_VER_28:
1420	case RTL_GIGA_MAC_VER_31:
1421		rtl8168dp_driver_start(tp);
1422		break;
1423	case RTL_GIGA_MAC_VER_49:
1424	case RTL_GIGA_MAC_VER_50:
1425	case RTL_GIGA_MAC_VER_51:
1426		rtl8168ep_driver_start(tp);
1427		break;
1428	default:
1429		BUG();
1430		break;
1431	}
1432}
1433
1434static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1435{
1436	rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1437	rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
1438}
1439
1440static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1441{
1442	rtl8168ep_stop_cmac(tp);
1443	ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1444	ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
1445	rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
1446}
1447
1448static void rtl8168_driver_stop(struct rtl8169_private *tp)
1449{
1450	switch (tp->mac_version) {
1451	case RTL_GIGA_MAC_VER_27:
1452	case RTL_GIGA_MAC_VER_28:
1453	case RTL_GIGA_MAC_VER_31:
1454		rtl8168dp_driver_stop(tp);
1455		break;
1456	case RTL_GIGA_MAC_VER_49:
1457	case RTL_GIGA_MAC_VER_50:
1458	case RTL_GIGA_MAC_VER_51:
1459		rtl8168ep_driver_stop(tp);
1460		break;
1461	default:
1462		BUG();
1463		break;
1464	}
1465}
1466
1467static int r8168dp_check_dash(struct rtl8169_private *tp)
1468{
1469	u16 reg = rtl8168_get_ocp_reg(tp);
1470
1471	return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
1472}
1473
1474static int r8168ep_check_dash(struct rtl8169_private *tp)
1475{
1476	return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0;
1477}
1478
1479static int r8168_check_dash(struct rtl8169_private *tp)
1480{
1481	switch (tp->mac_version) {
1482	case RTL_GIGA_MAC_VER_27:
1483	case RTL_GIGA_MAC_VER_28:
1484	case RTL_GIGA_MAC_VER_31:
1485		return r8168dp_check_dash(tp);
1486	case RTL_GIGA_MAC_VER_49:
1487	case RTL_GIGA_MAC_VER_50:
1488	case RTL_GIGA_MAC_VER_51:
1489		return r8168ep_check_dash(tp);
1490	default:
1491		return 0;
1492	}
1493}
1494
1495struct exgmac_reg {
1496	u16 addr;
1497	u16 mask;
1498	u32 val;
1499};
1500
1501static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1502				   const struct exgmac_reg *r, int len)
1503{
1504	while (len-- > 0) {
1505		rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1506		r++;
1507	}
1508}
1509
1510DECLARE_RTL_COND(rtl_efusear_cond)
1511{
1512	void __iomem *ioaddr = tp->mmio_addr;
1513
1514	return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1515}
1516
1517static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1518{
1519	void __iomem *ioaddr = tp->mmio_addr;
1520
1521	RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1522
1523	return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1524		RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1525}
1526
1527static u16 rtl_get_events(struct rtl8169_private *tp)
1528{
1529	void __iomem *ioaddr = tp->mmio_addr;
1530
1531	return RTL_R16(IntrStatus);
1532}
1533
1534static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1535{
1536	void __iomem *ioaddr = tp->mmio_addr;
1537
1538	RTL_W16(IntrStatus, bits);
1539	mmiowb();
1540}
1541
1542static void rtl_irq_disable(struct rtl8169_private *tp)
1543{
1544	void __iomem *ioaddr = tp->mmio_addr;
1545
1546	RTL_W16(IntrMask, 0);
1547	mmiowb();
1548}
1549
1550static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1551{
1552	void __iomem *ioaddr = tp->mmio_addr;
1553
1554	RTL_W16(IntrMask, bits);
1555}
1556
1557#define RTL_EVENT_NAPI_RX	(RxOK | RxErr)
1558#define RTL_EVENT_NAPI_TX	(TxOK | TxErr)
1559#define RTL_EVENT_NAPI		(RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1560
1561static void rtl_irq_enable_all(struct rtl8169_private *tp)
1562{
1563	rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1564}
1565
1566static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1567{
1568	void __iomem *ioaddr = tp->mmio_addr;
1569
1570	rtl_irq_disable(tp);
1571	rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1572	RTL_R8(ChipCmd);
1573}
1574
1575static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1576{
1577	void __iomem *ioaddr = tp->mmio_addr;
1578
1579	return RTL_R32(TBICSR) & TBIReset;
1580}
1581
1582static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1583{
1584	return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1585}
1586
1587static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1588{
1589	return RTL_R32(TBICSR) & TBILinkOk;
1590}
1591
1592static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1593{
1594	return RTL_R8(PHYstatus) & LinkStatus;
1595}
1596
1597static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1598{
1599	void __iomem *ioaddr = tp->mmio_addr;
1600
1601	RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1602}
1603
1604static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1605{
1606	unsigned int val;
1607
1608	val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1609	rtl_writephy(tp, MII_BMCR, val & 0xffff);
1610}
1611
1612static void rtl_link_chg_patch(struct rtl8169_private *tp)
1613{
1614	void __iomem *ioaddr = tp->mmio_addr;
1615	struct net_device *dev = tp->dev;
1616
1617	if (!netif_running(dev))
1618		return;
1619
1620	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1621	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1622		if (RTL_R8(PHYstatus) & _1000bpsF) {
1623			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1624				      ERIAR_EXGMAC);
1625			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1626				      ERIAR_EXGMAC);
1627		} else if (RTL_R8(PHYstatus) & _100bps) {
1628			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1629				      ERIAR_EXGMAC);
1630			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1631				      ERIAR_EXGMAC);
1632		} else {
1633			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1634				      ERIAR_EXGMAC);
1635			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1636				      ERIAR_EXGMAC);
1637		}
1638		/* Reset packet filter */
1639		rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1640			     ERIAR_EXGMAC);
1641		rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1642			     ERIAR_EXGMAC);
1643	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1644		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1645		if (RTL_R8(PHYstatus) & _1000bpsF) {
1646			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1647				      ERIAR_EXGMAC);
1648			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1649				      ERIAR_EXGMAC);
1650		} else {
1651			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1652				      ERIAR_EXGMAC);
1653			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1654				      ERIAR_EXGMAC);
1655		}
1656	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1657		if (RTL_R8(PHYstatus) & _10bps) {
1658			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1659				      ERIAR_EXGMAC);
1660			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1661				      ERIAR_EXGMAC);
1662		} else {
1663			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1664				      ERIAR_EXGMAC);
1665		}
1666	}
1667}
1668
1669static void __rtl8169_check_link_status(struct net_device *dev,
1670					struct rtl8169_private *tp,
1671					void __iomem *ioaddr, bool pm)
1672{
1673	if (tp->link_ok(ioaddr)) {
1674		rtl_link_chg_patch(tp);
1675		/* This is to cancel a scheduled suspend if there's one. */
1676		if (pm)
1677			pm_request_resume(&tp->pci_dev->dev);
1678		netif_carrier_on(dev);
1679		if (net_ratelimit())
1680			netif_info(tp, ifup, dev, "link up\n");
1681	} else {
1682		netif_carrier_off(dev);
1683		netif_info(tp, ifdown, dev, "link down\n");
1684		if (pm)
1685			pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1686	}
1687}
1688
1689static void rtl8169_check_link_status(struct net_device *dev,
1690				      struct rtl8169_private *tp,
1691				      void __iomem *ioaddr)
1692{
1693	__rtl8169_check_link_status(dev, tp, ioaddr, false);
1694}
1695
1696#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1697
1698static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1699{
1700	void __iomem *ioaddr = tp->mmio_addr;
1701	u8 options;
1702	u32 wolopts = 0;
1703
1704	options = RTL_R8(Config1);
1705	if (!(options & PMEnable))
1706		return 0;
1707
1708	options = RTL_R8(Config3);
1709	if (options & LinkUp)
1710		wolopts |= WAKE_PHY;
1711	switch (tp->mac_version) {
1712	case RTL_GIGA_MAC_VER_34:
1713	case RTL_GIGA_MAC_VER_35:
1714	case RTL_GIGA_MAC_VER_36:
1715	case RTL_GIGA_MAC_VER_37:
1716	case RTL_GIGA_MAC_VER_38:
1717	case RTL_GIGA_MAC_VER_40:
1718	case RTL_GIGA_MAC_VER_41:
1719	case RTL_GIGA_MAC_VER_42:
1720	case RTL_GIGA_MAC_VER_43:
1721	case RTL_GIGA_MAC_VER_44:
1722	case RTL_GIGA_MAC_VER_45:
1723	case RTL_GIGA_MAC_VER_46:
1724	case RTL_GIGA_MAC_VER_47:
1725	case RTL_GIGA_MAC_VER_48:
1726	case RTL_GIGA_MAC_VER_49:
1727	case RTL_GIGA_MAC_VER_50:
1728	case RTL_GIGA_MAC_VER_51:
1729		if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
1730			wolopts |= WAKE_MAGIC;
1731		break;
1732	default:
1733		if (options & MagicPacket)
1734			wolopts |= WAKE_MAGIC;
1735		break;
1736	}
1737
1738	options = RTL_R8(Config5);
1739	if (options & UWF)
1740		wolopts |= WAKE_UCAST;
1741	if (options & BWF)
1742		wolopts |= WAKE_BCAST;
1743	if (options & MWF)
1744		wolopts |= WAKE_MCAST;
1745
1746	return wolopts;
1747}
1748
1749static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1750{
1751	struct rtl8169_private *tp = netdev_priv(dev);
1752
1753	rtl_lock_work(tp);
1754
1755	wol->supported = WAKE_ANY;
1756	wol->wolopts = __rtl8169_get_wol(tp);
1757
1758	rtl_unlock_work(tp);
1759}
1760
1761static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1762{
1763	void __iomem *ioaddr = tp->mmio_addr;
1764	unsigned int i, tmp;
1765	static const struct {
1766		u32 opt;
1767		u16 reg;
1768		u8  mask;
1769	} cfg[] = {
1770		{ WAKE_PHY,   Config3, LinkUp },
 
1771		{ WAKE_UCAST, Config5, UWF },
1772		{ WAKE_BCAST, Config5, BWF },
1773		{ WAKE_MCAST, Config5, MWF },
1774		{ WAKE_ANY,   Config5, LanWake },
1775		{ WAKE_MAGIC, Config3, MagicPacket }
1776	};
1777	u8 options;
1778
1779	RTL_W8(Cfg9346, Cfg9346_Unlock);
1780
1781	switch (tp->mac_version) {
1782	case RTL_GIGA_MAC_VER_34:
1783	case RTL_GIGA_MAC_VER_35:
1784	case RTL_GIGA_MAC_VER_36:
1785	case RTL_GIGA_MAC_VER_37:
1786	case RTL_GIGA_MAC_VER_38:
1787	case RTL_GIGA_MAC_VER_40:
1788	case RTL_GIGA_MAC_VER_41:
1789	case RTL_GIGA_MAC_VER_42:
1790	case RTL_GIGA_MAC_VER_43:
1791	case RTL_GIGA_MAC_VER_44:
1792	case RTL_GIGA_MAC_VER_45:
1793	case RTL_GIGA_MAC_VER_46:
1794	case RTL_GIGA_MAC_VER_47:
1795	case RTL_GIGA_MAC_VER_48:
1796	case RTL_GIGA_MAC_VER_49:
1797	case RTL_GIGA_MAC_VER_50:
1798	case RTL_GIGA_MAC_VER_51:
1799		tmp = ARRAY_SIZE(cfg) - 1;
1800		if (wolopts & WAKE_MAGIC)
1801			rtl_w0w1_eri(tp,
1802				     0x0dc,
1803				     ERIAR_MASK_0100,
1804				     MagicPacket_v2,
1805				     0x0000,
1806				     ERIAR_EXGMAC);
1807		else
1808			rtl_w0w1_eri(tp,
1809				     0x0dc,
1810				     ERIAR_MASK_0100,
1811				     0x0000,
1812				     MagicPacket_v2,
1813				     ERIAR_EXGMAC);
1814		break;
1815	default:
1816		tmp = ARRAY_SIZE(cfg);
1817		break;
1818	}
1819
1820	for (i = 0; i < tmp; i++) {
1821		options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1822		if (wolopts & cfg[i].opt)
1823			options |= cfg[i].mask;
1824		RTL_W8(cfg[i].reg, options);
1825	}
1826
1827	switch (tp->mac_version) {
1828	case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1829		options = RTL_R8(Config1) & ~PMEnable;
1830		if (wolopts)
1831			options |= PMEnable;
1832		RTL_W8(Config1, options);
1833		break;
1834	default:
1835		options = RTL_R8(Config2) & ~PME_SIGNAL;
1836		if (wolopts)
1837			options |= PME_SIGNAL;
1838		RTL_W8(Config2, options);
1839		break;
1840	}
1841
1842	RTL_W8(Cfg9346, Cfg9346_Lock);
1843}
1844
1845static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1846{
1847	struct rtl8169_private *tp = netdev_priv(dev);
1848
1849	rtl_lock_work(tp);
1850
1851	if (wol->wolopts)
1852		tp->features |= RTL_FEATURE_WOL;
1853	else
1854		tp->features &= ~RTL_FEATURE_WOL;
1855	__rtl8169_set_wol(tp, wol->wolopts);
1856
1857	rtl_unlock_work(tp);
1858
1859	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1860
1861	return 0;
1862}
1863
1864static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1865{
1866	return rtl_chip_infos[tp->mac_version].fw_name;
1867}
1868
1869static void rtl8169_get_drvinfo(struct net_device *dev,
1870				struct ethtool_drvinfo *info)
1871{
1872	struct rtl8169_private *tp = netdev_priv(dev);
1873	struct rtl_fw *rtl_fw = tp->rtl_fw;
1874
1875	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1876	strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1877	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1878	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1879	if (!IS_ERR_OR_NULL(rtl_fw))
1880		strlcpy(info->fw_version, rtl_fw->version,
1881			sizeof(info->fw_version));
1882}
1883
1884static int rtl8169_get_regs_len(struct net_device *dev)
1885{
1886	return R8169_REGS_SIZE;
1887}
1888
1889static int rtl8169_set_speed_tbi(struct net_device *dev,
1890				 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1891{
1892	struct rtl8169_private *tp = netdev_priv(dev);
1893	void __iomem *ioaddr = tp->mmio_addr;
1894	int ret = 0;
1895	u32 reg;
1896
1897	reg = RTL_R32(TBICSR);
1898	if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1899	    (duplex == DUPLEX_FULL)) {
1900		RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1901	} else if (autoneg == AUTONEG_ENABLE)
1902		RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1903	else {
1904		netif_warn(tp, link, dev,
1905			   "incorrect speed setting refused in TBI mode\n");
1906		ret = -EOPNOTSUPP;
1907	}
1908
1909	return ret;
1910}
1911
1912static int rtl8169_set_speed_xmii(struct net_device *dev,
1913				  u8 autoneg, u16 speed, u8 duplex, u32 adv)
1914{
1915	struct rtl8169_private *tp = netdev_priv(dev);
1916	int giga_ctrl, bmcr;
1917	int rc = -EINVAL;
1918
1919	rtl_writephy(tp, 0x1f, 0x0000);
1920
1921	if (autoneg == AUTONEG_ENABLE) {
1922		int auto_nego;
1923
1924		auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1925		auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1926				ADVERTISE_100HALF | ADVERTISE_100FULL);
1927
1928		if (adv & ADVERTISED_10baseT_Half)
1929			auto_nego |= ADVERTISE_10HALF;
1930		if (adv & ADVERTISED_10baseT_Full)
1931			auto_nego |= ADVERTISE_10FULL;
1932		if (adv & ADVERTISED_100baseT_Half)
1933			auto_nego |= ADVERTISE_100HALF;
1934		if (adv & ADVERTISED_100baseT_Full)
1935			auto_nego |= ADVERTISE_100FULL;
1936
1937		auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1938
1939		giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1940		giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1941
1942		/* The 8100e/8101e/8102e do Fast Ethernet only. */
1943		if (tp->mii.supports_gmii) {
1944			if (adv & ADVERTISED_1000baseT_Half)
1945				giga_ctrl |= ADVERTISE_1000HALF;
1946			if (adv & ADVERTISED_1000baseT_Full)
1947				giga_ctrl |= ADVERTISE_1000FULL;
1948		} else if (adv & (ADVERTISED_1000baseT_Half |
1949				  ADVERTISED_1000baseT_Full)) {
1950			netif_info(tp, link, dev,
1951				   "PHY does not support 1000Mbps\n");
1952			goto out;
1953		}
1954
1955		bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1956
1957		rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1958		rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1959	} else {
1960		giga_ctrl = 0;
1961
1962		if (speed == SPEED_10)
1963			bmcr = 0;
1964		else if (speed == SPEED_100)
1965			bmcr = BMCR_SPEED100;
1966		else
1967			goto out;
1968
1969		if (duplex == DUPLEX_FULL)
1970			bmcr |= BMCR_FULLDPLX;
1971	}
1972
1973	rtl_writephy(tp, MII_BMCR, bmcr);
1974
1975	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1976	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
1977		if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1978			rtl_writephy(tp, 0x17, 0x2138);
1979			rtl_writephy(tp, 0x0e, 0x0260);
1980		} else {
1981			rtl_writephy(tp, 0x17, 0x2108);
1982			rtl_writephy(tp, 0x0e, 0x0000);
1983		}
1984	}
1985
1986	rc = 0;
1987out:
1988	return rc;
1989}
1990
1991static int rtl8169_set_speed(struct net_device *dev,
1992			     u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1993{
1994	struct rtl8169_private *tp = netdev_priv(dev);
1995	int ret;
1996
1997	ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1998	if (ret < 0)
1999		goto out;
2000
2001	if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
2002	    (advertising & ADVERTISED_1000baseT_Full) &&
2003	    !pci_is_pcie(tp->pci_dev)) {
2004		mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
2005	}
2006out:
2007	return ret;
2008}
2009
2010static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2011{
2012	struct rtl8169_private *tp = netdev_priv(dev);
2013	int ret;
2014
2015	del_timer_sync(&tp->timer);
2016
2017	rtl_lock_work(tp);
2018	ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
2019				cmd->duplex, cmd->advertising);
2020	rtl_unlock_work(tp);
2021
2022	return ret;
2023}
2024
2025static netdev_features_t rtl8169_fix_features(struct net_device *dev,
2026	netdev_features_t features)
2027{
2028	struct rtl8169_private *tp = netdev_priv(dev);
2029
2030	if (dev->mtu > TD_MSS_MAX)
2031		features &= ~NETIF_F_ALL_TSO;
2032
2033	if (dev->mtu > JUMBO_1K &&
2034	    !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
2035		features &= ~NETIF_F_IP_CSUM;
2036
2037	return features;
2038}
2039
2040static void __rtl8169_set_features(struct net_device *dev,
2041				   netdev_features_t features)
2042{
2043	struct rtl8169_private *tp = netdev_priv(dev);
 
2044	void __iomem *ioaddr = tp->mmio_addr;
2045	u32 rx_config;
2046
2047	rx_config = RTL_R32(RxConfig);
2048	if (features & NETIF_F_RXALL)
2049		rx_config |= (AcceptErr | AcceptRunt);
2050	else
2051		rx_config &= ~(AcceptErr | AcceptRunt);
2052
2053	RTL_W32(RxConfig, rx_config);
 
 
 
 
2054
2055	if (features & NETIF_F_RXCSUM)
2056		tp->cp_cmd |= RxChkSum;
2057	else
2058		tp->cp_cmd &= ~RxChkSum;
2059
2060	if (features & NETIF_F_HW_VLAN_CTAG_RX)
2061		tp->cp_cmd |= RxVlan;
2062	else
2063		tp->cp_cmd &= ~RxVlan;
2064
2065	tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
2066
2067	RTL_W16(CPlusCmd, tp->cp_cmd);
2068	RTL_R16(CPlusCmd);
2069}
2070
2071static int rtl8169_set_features(struct net_device *dev,
2072				netdev_features_t features)
2073{
2074	struct rtl8169_private *tp = netdev_priv(dev);
2075
2076	features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
2077
2078	rtl_lock_work(tp);
2079	if (features ^ dev->features)
2080		__rtl8169_set_features(dev, features);
2081	rtl_unlock_work(tp);
2082
2083	return 0;
2084}
2085
2086
2087static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
2088{
2089	return (skb_vlan_tag_present(skb)) ?
2090		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
2091}
2092
2093static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
2094{
2095	u32 opts2 = le32_to_cpu(desc->opts2);
2096
2097	if (opts2 & RxVlanTag)
2098		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
2099}
2100
2101static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
2102{
2103	struct rtl8169_private *tp = netdev_priv(dev);
2104	void __iomem *ioaddr = tp->mmio_addr;
2105	u32 status;
2106
2107	cmd->supported =
2108		SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
2109	cmd->port = PORT_FIBRE;
2110	cmd->transceiver = XCVR_INTERNAL;
2111
2112	status = RTL_R32(TBICSR);
2113	cmd->advertising = (status & TBINwEnable) ?  ADVERTISED_Autoneg : 0;
2114	cmd->autoneg = !!(status & TBINwEnable);
2115
2116	ethtool_cmd_speed_set(cmd, SPEED_1000);
2117	cmd->duplex = DUPLEX_FULL; /* Always set */
2118
2119	return 0;
2120}
2121
2122static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
2123{
2124	struct rtl8169_private *tp = netdev_priv(dev);
2125
2126	return mii_ethtool_gset(&tp->mii, cmd);
2127}
2128
2129static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2130{
2131	struct rtl8169_private *tp = netdev_priv(dev);
2132	int rc;
2133
2134	rtl_lock_work(tp);
2135	rc = tp->get_settings(dev, cmd);
2136	rtl_unlock_work(tp);
2137
2138	return rc;
2139}
2140
2141static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2142			     void *p)
2143{
2144	struct rtl8169_private *tp = netdev_priv(dev);
2145	u32 __iomem *data = tp->mmio_addr;
2146	u32 *dw = p;
2147	int i;
2148
2149	rtl_lock_work(tp);
2150	for (i = 0; i < R8169_REGS_SIZE; i += 4)
2151		memcpy_fromio(dw++, data++, 4);
2152	rtl_unlock_work(tp);
2153}
2154
2155static u32 rtl8169_get_msglevel(struct net_device *dev)
2156{
2157	struct rtl8169_private *tp = netdev_priv(dev);
2158
2159	return tp->msg_enable;
2160}
2161
2162static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
2163{
2164	struct rtl8169_private *tp = netdev_priv(dev);
2165
2166	tp->msg_enable = value;
2167}
2168
2169static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
2170	"tx_packets",
2171	"rx_packets",
2172	"tx_errors",
2173	"rx_errors",
2174	"rx_missed",
2175	"align_errors",
2176	"tx_single_collisions",
2177	"tx_multi_collisions",
2178	"unicast",
2179	"broadcast",
2180	"multicast",
2181	"tx_aborted",
2182	"tx_underrun",
2183};
2184
2185static int rtl8169_get_sset_count(struct net_device *dev, int sset)
2186{
2187	switch (sset) {
2188	case ETH_SS_STATS:
2189		return ARRAY_SIZE(rtl8169_gstrings);
2190	default:
2191		return -EOPNOTSUPP;
2192	}
2193}
2194
2195DECLARE_RTL_COND(rtl_counters_cond)
2196{
2197	void __iomem *ioaddr = tp->mmio_addr;
2198
2199	return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
2200}
2201
2202static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
2203{
2204	struct rtl8169_private *tp = netdev_priv(dev);
2205	void __iomem *ioaddr = tp->mmio_addr;
2206	dma_addr_t paddr = tp->counters_phys_addr;
 
 
2207	u32 cmd;
2208	bool ret;
2209
2210	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
2211	cmd = (u64)paddr & DMA_BIT_MASK(32);
2212	RTL_W32(CounterAddrLow, cmd);
2213	RTL_W32(CounterAddrLow, cmd | counter_cmd);
2214
2215	ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
2216
2217	RTL_W32(CounterAddrLow, 0);
2218	RTL_W32(CounterAddrHigh, 0);
2219
2220	return ret;
2221}
2222
2223static bool rtl8169_reset_counters(struct net_device *dev)
2224{
2225	struct rtl8169_private *tp = netdev_priv(dev);
2226
2227	/*
2228	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
2229	 * tally counters.
2230	 */
2231	if (tp->mac_version < RTL_GIGA_MAC_VER_19)
2232		return true;
2233
2234	return rtl8169_do_counters(dev, CounterReset);
2235}
2236
2237static bool rtl8169_update_counters(struct net_device *dev)
2238{
2239	struct rtl8169_private *tp = netdev_priv(dev);
2240	void __iomem *ioaddr = tp->mmio_addr;
2241
2242	/*
2243	 * Some chips are unable to dump tally counters when the receiver
2244	 * is disabled.
2245	 */
2246	if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
2247		return true;
2248
2249	return rtl8169_do_counters(dev, CounterDump);
2250}
 
2251
2252static bool rtl8169_init_counter_offsets(struct net_device *dev)
2253{
2254	struct rtl8169_private *tp = netdev_priv(dev);
2255	struct rtl8169_counters *counters = tp->counters;
2256	bool ret = false;
2257
2258	/*
2259	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
2260	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
2261	 * reset by a power cycle, while the counter values collected by the
2262	 * driver are reset at every driver unload/load cycle.
2263	 *
2264	 * To make sure the HW values returned by @get_stats64 match the SW
2265	 * values, we collect the initial values at first open(*) and use them
2266	 * as offsets to normalize the values returned by @get_stats64.
2267	 *
2268	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
2269	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
2270	 * set at open time by rtl_hw_start.
2271	 */
2272
2273	if (tp->tc_offset.inited)
2274		return true;
2275
2276	/* If both, reset and update fail, propagate to caller. */
2277	if (rtl8169_reset_counters(dev))
2278		ret = true;
2279
2280	if (rtl8169_update_counters(dev))
2281		ret = true;
2282
2283	tp->tc_offset.tx_errors = counters->tx_errors;
2284	tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
2285	tp->tc_offset.tx_aborted = counters->tx_aborted;
2286	tp->tc_offset.inited = true;
2287
2288	return ret;
2289}
2290
2291static void rtl8169_get_ethtool_stats(struct net_device *dev,
2292				      struct ethtool_stats *stats, u64 *data)
2293{
2294	struct rtl8169_private *tp = netdev_priv(dev);
2295	struct rtl8169_counters *counters = tp->counters;
2296
2297	ASSERT_RTNL();
2298
2299	rtl8169_update_counters(dev);
2300
2301	data[0] = le64_to_cpu(counters->tx_packets);
2302	data[1] = le64_to_cpu(counters->rx_packets);
2303	data[2] = le64_to_cpu(counters->tx_errors);
2304	data[3] = le32_to_cpu(counters->rx_errors);
2305	data[4] = le16_to_cpu(counters->rx_missed);
2306	data[5] = le16_to_cpu(counters->align_errors);
2307	data[6] = le32_to_cpu(counters->tx_one_collision);
2308	data[7] = le32_to_cpu(counters->tx_multi_collision);
2309	data[8] = le64_to_cpu(counters->rx_unicast);
2310	data[9] = le64_to_cpu(counters->rx_broadcast);
2311	data[10] = le32_to_cpu(counters->rx_multicast);
2312	data[11] = le16_to_cpu(counters->tx_aborted);
2313	data[12] = le16_to_cpu(counters->tx_underun);
2314}
2315
2316static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2317{
2318	switch(stringset) {
2319	case ETH_SS_STATS:
2320		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
2321		break;
2322	}
2323}
2324
2325static const struct ethtool_ops rtl8169_ethtool_ops = {
2326	.get_drvinfo		= rtl8169_get_drvinfo,
2327	.get_regs_len		= rtl8169_get_regs_len,
2328	.get_link		= ethtool_op_get_link,
2329	.get_settings		= rtl8169_get_settings,
2330	.set_settings		= rtl8169_set_settings,
2331	.get_msglevel		= rtl8169_get_msglevel,
2332	.set_msglevel		= rtl8169_set_msglevel,
2333	.get_regs		= rtl8169_get_regs,
2334	.get_wol		= rtl8169_get_wol,
2335	.set_wol		= rtl8169_set_wol,
2336	.get_strings		= rtl8169_get_strings,
2337	.get_sset_count		= rtl8169_get_sset_count,
2338	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2339	.get_ts_info		= ethtool_op_get_ts_info,
2340};
2341
2342static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2343				    struct net_device *dev, u8 default_version)
2344{
2345	void __iomem *ioaddr = tp->mmio_addr;
2346	/*
2347	 * The driver currently handles the 8168Bf and the 8168Be identically
2348	 * but they can be identified more specifically through the test below
2349	 * if needed:
2350	 *
2351	 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2352	 *
2353	 * Same thing for the 8101Eb and the 8101Ec:
2354	 *
2355	 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2356	 */
2357	static const struct rtl_mac_info {
2358		u32 mask;
2359		u32 val;
2360		int mac_version;
2361	} mac_info[] = {
2362		/* 8168EP family. */
2363		{ 0x7cf00000, 0x50200000,	RTL_GIGA_MAC_VER_51 },
2364		{ 0x7cf00000, 0x50100000,	RTL_GIGA_MAC_VER_50 },
2365		{ 0x7cf00000, 0x50000000,	RTL_GIGA_MAC_VER_49 },
2366
2367		/* 8168H family. */
2368		{ 0x7cf00000, 0x54100000,	RTL_GIGA_MAC_VER_46 },
2369		{ 0x7cf00000, 0x54000000,	RTL_GIGA_MAC_VER_45 },
2370
2371		/* 8168G family. */
2372		{ 0x7cf00000, 0x5c800000,	RTL_GIGA_MAC_VER_44 },
2373		{ 0x7cf00000, 0x50900000,	RTL_GIGA_MAC_VER_42 },
2374		{ 0x7cf00000, 0x4c100000,	RTL_GIGA_MAC_VER_41 },
2375		{ 0x7cf00000, 0x4c000000,	RTL_GIGA_MAC_VER_40 },
2376
2377		/* 8168F family. */
2378		{ 0x7c800000, 0x48800000,	RTL_GIGA_MAC_VER_38 },
2379		{ 0x7cf00000, 0x48100000,	RTL_GIGA_MAC_VER_36 },
2380		{ 0x7cf00000, 0x48000000,	RTL_GIGA_MAC_VER_35 },
2381
2382		/* 8168E family. */
2383		{ 0x7c800000, 0x2c800000,	RTL_GIGA_MAC_VER_34 },
2384		{ 0x7cf00000, 0x2c200000,	RTL_GIGA_MAC_VER_33 },
2385		{ 0x7cf00000, 0x2c100000,	RTL_GIGA_MAC_VER_32 },
2386		{ 0x7c800000, 0x2c000000,	RTL_GIGA_MAC_VER_33 },
2387
2388		/* 8168D family. */
2389		{ 0x7cf00000, 0x28300000,	RTL_GIGA_MAC_VER_26 },
2390		{ 0x7cf00000, 0x28100000,	RTL_GIGA_MAC_VER_25 },
2391		{ 0x7c800000, 0x28000000,	RTL_GIGA_MAC_VER_26 },
2392
2393		/* 8168DP family. */
2394		{ 0x7cf00000, 0x28800000,	RTL_GIGA_MAC_VER_27 },
2395		{ 0x7cf00000, 0x28a00000,	RTL_GIGA_MAC_VER_28 },
2396		{ 0x7cf00000, 0x28b00000,	RTL_GIGA_MAC_VER_31 },
2397
2398		/* 8168C family. */
2399		{ 0x7cf00000, 0x3cb00000,	RTL_GIGA_MAC_VER_24 },
2400		{ 0x7cf00000, 0x3c900000,	RTL_GIGA_MAC_VER_23 },
2401		{ 0x7cf00000, 0x3c800000,	RTL_GIGA_MAC_VER_18 },
2402		{ 0x7c800000, 0x3c800000,	RTL_GIGA_MAC_VER_24 },
2403		{ 0x7cf00000, 0x3c000000,	RTL_GIGA_MAC_VER_19 },
2404		{ 0x7cf00000, 0x3c200000,	RTL_GIGA_MAC_VER_20 },
2405		{ 0x7cf00000, 0x3c300000,	RTL_GIGA_MAC_VER_21 },
2406		{ 0x7cf00000, 0x3c400000,	RTL_GIGA_MAC_VER_22 },
2407		{ 0x7c800000, 0x3c000000,	RTL_GIGA_MAC_VER_22 },
2408
2409		/* 8168B family. */
2410		{ 0x7cf00000, 0x38000000,	RTL_GIGA_MAC_VER_12 },
2411		{ 0x7cf00000, 0x38500000,	RTL_GIGA_MAC_VER_17 },
2412		{ 0x7c800000, 0x38000000,	RTL_GIGA_MAC_VER_17 },
2413		{ 0x7c800000, 0x30000000,	RTL_GIGA_MAC_VER_11 },
2414
2415		/* 8101 family. */
2416		{ 0x7cf00000, 0x44900000,	RTL_GIGA_MAC_VER_39 },
2417		{ 0x7c800000, 0x44800000,	RTL_GIGA_MAC_VER_39 },
2418		{ 0x7c800000, 0x44000000,	RTL_GIGA_MAC_VER_37 },
2419		{ 0x7cf00000, 0x40b00000,	RTL_GIGA_MAC_VER_30 },
2420		{ 0x7cf00000, 0x40a00000,	RTL_GIGA_MAC_VER_30 },
2421		{ 0x7cf00000, 0x40900000,	RTL_GIGA_MAC_VER_29 },
2422		{ 0x7c800000, 0x40800000,	RTL_GIGA_MAC_VER_30 },
2423		{ 0x7cf00000, 0x34a00000,	RTL_GIGA_MAC_VER_09 },
2424		{ 0x7cf00000, 0x24a00000,	RTL_GIGA_MAC_VER_09 },
2425		{ 0x7cf00000, 0x34900000,	RTL_GIGA_MAC_VER_08 },
2426		{ 0x7cf00000, 0x24900000,	RTL_GIGA_MAC_VER_08 },
2427		{ 0x7cf00000, 0x34800000,	RTL_GIGA_MAC_VER_07 },
2428		{ 0x7cf00000, 0x24800000,	RTL_GIGA_MAC_VER_07 },
2429		{ 0x7cf00000, 0x34000000,	RTL_GIGA_MAC_VER_13 },
2430		{ 0x7cf00000, 0x34300000,	RTL_GIGA_MAC_VER_10 },
2431		{ 0x7cf00000, 0x34200000,	RTL_GIGA_MAC_VER_16 },
2432		{ 0x7c800000, 0x34800000,	RTL_GIGA_MAC_VER_09 },
2433		{ 0x7c800000, 0x24800000,	RTL_GIGA_MAC_VER_09 },
2434		{ 0x7c800000, 0x34000000,	RTL_GIGA_MAC_VER_16 },
2435		/* FIXME: where did these entries come from ? -- FR */
2436		{ 0xfc800000, 0x38800000,	RTL_GIGA_MAC_VER_15 },
2437		{ 0xfc800000, 0x30800000,	RTL_GIGA_MAC_VER_14 },
2438
2439		/* 8110 family. */
2440		{ 0xfc800000, 0x98000000,	RTL_GIGA_MAC_VER_06 },
2441		{ 0xfc800000, 0x18000000,	RTL_GIGA_MAC_VER_05 },
2442		{ 0xfc800000, 0x10000000,	RTL_GIGA_MAC_VER_04 },
2443		{ 0xfc800000, 0x04000000,	RTL_GIGA_MAC_VER_03 },
2444		{ 0xfc800000, 0x00800000,	RTL_GIGA_MAC_VER_02 },
2445		{ 0xfc800000, 0x00000000,	RTL_GIGA_MAC_VER_01 },
2446
2447		/* Catch-all */
2448		{ 0x00000000, 0x00000000,	RTL_GIGA_MAC_NONE   }
2449	};
2450	const struct rtl_mac_info *p = mac_info;
2451	u32 reg;
2452
2453	reg = RTL_R32(TxConfig);
2454	while ((reg & p->mask) != p->val)
2455		p++;
2456	tp->mac_version = p->mac_version;
2457
2458	if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2459		netif_notice(tp, probe, dev,
2460			     "unknown MAC, using family default\n");
2461		tp->mac_version = default_version;
2462	} else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
2463		tp->mac_version = tp->mii.supports_gmii ?
2464				  RTL_GIGA_MAC_VER_42 :
2465				  RTL_GIGA_MAC_VER_43;
2466	} else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
2467		tp->mac_version = tp->mii.supports_gmii ?
2468				  RTL_GIGA_MAC_VER_45 :
2469				  RTL_GIGA_MAC_VER_47;
2470	} else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
2471		tp->mac_version = tp->mii.supports_gmii ?
2472				  RTL_GIGA_MAC_VER_46 :
2473				  RTL_GIGA_MAC_VER_48;
2474	}
2475}
2476
2477static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2478{
2479	dprintk("mac_version = 0x%02x\n", tp->mac_version);
2480}
2481
2482struct phy_reg {
2483	u16 reg;
2484	u16 val;
2485};
2486
2487static void rtl_writephy_batch(struct rtl8169_private *tp,
2488			       const struct phy_reg *regs, int len)
2489{
2490	while (len-- > 0) {
2491		rtl_writephy(tp, regs->reg, regs->val);
2492		regs++;
2493	}
2494}
2495
2496#define PHY_READ		0x00000000
2497#define PHY_DATA_OR		0x10000000
2498#define PHY_DATA_AND		0x20000000
2499#define PHY_BJMPN		0x30000000
2500#define PHY_MDIO_CHG		0x40000000
2501#define PHY_CLEAR_READCOUNT	0x70000000
2502#define PHY_WRITE		0x80000000
2503#define PHY_READCOUNT_EQ_SKIP	0x90000000
2504#define PHY_COMP_EQ_SKIPN	0xa0000000
2505#define PHY_COMP_NEQ_SKIPN	0xb0000000
2506#define PHY_WRITE_PREVIOUS	0xc0000000
2507#define PHY_SKIPN		0xd0000000
2508#define PHY_DELAY_MS		0xe0000000
2509
2510struct fw_info {
2511	u32	magic;
2512	char	version[RTL_VER_SIZE];
2513	__le32	fw_start;
2514	__le32	fw_len;
2515	u8	chksum;
2516} __packed;
2517
2518#define FW_OPCODE_SIZE	sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2519
2520static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2521{
2522	const struct firmware *fw = rtl_fw->fw;
2523	struct fw_info *fw_info = (struct fw_info *)fw->data;
2524	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2525	char *version = rtl_fw->version;
2526	bool rc = false;
2527
2528	if (fw->size < FW_OPCODE_SIZE)
2529		goto out;
2530
2531	if (!fw_info->magic) {
2532		size_t i, size, start;
2533		u8 checksum = 0;
2534
2535		if (fw->size < sizeof(*fw_info))
2536			goto out;
2537
2538		for (i = 0; i < fw->size; i++)
2539			checksum += fw->data[i];
2540		if (checksum != 0)
2541			goto out;
2542
2543		start = le32_to_cpu(fw_info->fw_start);
2544		if (start > fw->size)
2545			goto out;
2546
2547		size = le32_to_cpu(fw_info->fw_len);
2548		if (size > (fw->size - start) / FW_OPCODE_SIZE)
2549			goto out;
2550
2551		memcpy(version, fw_info->version, RTL_VER_SIZE);
2552
2553		pa->code = (__le32 *)(fw->data + start);
2554		pa->size = size;
2555	} else {
2556		if (fw->size % FW_OPCODE_SIZE)
2557			goto out;
2558
2559		strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2560
2561		pa->code = (__le32 *)fw->data;
2562		pa->size = fw->size / FW_OPCODE_SIZE;
2563	}
2564	version[RTL_VER_SIZE - 1] = 0;
2565
2566	rc = true;
2567out:
2568	return rc;
2569}
2570
2571static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2572			   struct rtl_fw_phy_action *pa)
2573{
2574	bool rc = false;
2575	size_t index;
2576
2577	for (index = 0; index < pa->size; index++) {
2578		u32 action = le32_to_cpu(pa->code[index]);
2579		u32 regno = (action & 0x0fff0000) >> 16;
2580
2581		switch(action & 0xf0000000) {
2582		case PHY_READ:
2583		case PHY_DATA_OR:
2584		case PHY_DATA_AND:
2585		case PHY_MDIO_CHG:
2586		case PHY_CLEAR_READCOUNT:
2587		case PHY_WRITE:
2588		case PHY_WRITE_PREVIOUS:
2589		case PHY_DELAY_MS:
2590			break;
2591
2592		case PHY_BJMPN:
2593			if (regno > index) {
2594				netif_err(tp, ifup, tp->dev,
2595					  "Out of range of firmware\n");
2596				goto out;
2597			}
2598			break;
2599		case PHY_READCOUNT_EQ_SKIP:
2600			if (index + 2 >= pa->size) {
2601				netif_err(tp, ifup, tp->dev,
2602					  "Out of range of firmware\n");
2603				goto out;
2604			}
2605			break;
2606		case PHY_COMP_EQ_SKIPN:
2607		case PHY_COMP_NEQ_SKIPN:
2608		case PHY_SKIPN:
2609			if (index + 1 + regno >= pa->size) {
2610				netif_err(tp, ifup, tp->dev,
2611					  "Out of range of firmware\n");
2612				goto out;
2613			}
2614			break;
2615
2616		default:
2617			netif_err(tp, ifup, tp->dev,
2618				  "Invalid action 0x%08x\n", action);
2619			goto out;
2620		}
2621	}
2622	rc = true;
2623out:
2624	return rc;
2625}
2626
2627static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2628{
2629	struct net_device *dev = tp->dev;
2630	int rc = -EINVAL;
2631
2632	if (!rtl_fw_format_ok(tp, rtl_fw)) {
2633		netif_err(tp, ifup, dev, "invalid firmware\n");
2634		goto out;
2635	}
2636
2637	if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2638		rc = 0;
2639out:
2640	return rc;
2641}
2642
2643static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2644{
2645	struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2646	struct mdio_ops org, *ops = &tp->mdio_ops;
2647	u32 predata, count;
2648	size_t index;
2649
2650	predata = count = 0;
2651	org.write = ops->write;
2652	org.read = ops->read;
2653
2654	for (index = 0; index < pa->size; ) {
2655		u32 action = le32_to_cpu(pa->code[index]);
2656		u32 data = action & 0x0000ffff;
2657		u32 regno = (action & 0x0fff0000) >> 16;
2658
2659		if (!action)
2660			break;
2661
2662		switch(action & 0xf0000000) {
2663		case PHY_READ:
2664			predata = rtl_readphy(tp, regno);
2665			count++;
2666			index++;
2667			break;
2668		case PHY_DATA_OR:
2669			predata |= data;
2670			index++;
2671			break;
2672		case PHY_DATA_AND:
2673			predata &= data;
2674			index++;
2675			break;
2676		case PHY_BJMPN:
2677			index -= regno;
2678			break;
2679		case PHY_MDIO_CHG:
2680			if (data == 0) {
2681				ops->write = org.write;
2682				ops->read = org.read;
2683			} else if (data == 1) {
2684				ops->write = mac_mcu_write;
2685				ops->read = mac_mcu_read;
2686			}
2687
2688			index++;
2689			break;
2690		case PHY_CLEAR_READCOUNT:
2691			count = 0;
2692			index++;
2693			break;
2694		case PHY_WRITE:
2695			rtl_writephy(tp, regno, data);
2696			index++;
2697			break;
2698		case PHY_READCOUNT_EQ_SKIP:
2699			index += (count == data) ? 2 : 1;
2700			break;
2701		case PHY_COMP_EQ_SKIPN:
2702			if (predata == data)
2703				index += regno;
2704			index++;
2705			break;
2706		case PHY_COMP_NEQ_SKIPN:
2707			if (predata != data)
2708				index += regno;
2709			index++;
2710			break;
2711		case PHY_WRITE_PREVIOUS:
2712			rtl_writephy(tp, regno, predata);
2713			index++;
2714			break;
2715		case PHY_SKIPN:
2716			index += regno + 1;
2717			break;
2718		case PHY_DELAY_MS:
2719			mdelay(data);
2720			index++;
2721			break;
2722
2723		default:
2724			BUG();
2725		}
2726	}
2727
2728	ops->write = org.write;
2729	ops->read = org.read;
2730}
2731
2732static void rtl_release_firmware(struct rtl8169_private *tp)
2733{
2734	if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2735		release_firmware(tp->rtl_fw->fw);
2736		kfree(tp->rtl_fw);
2737	}
2738	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2739}
2740
2741static void rtl_apply_firmware(struct rtl8169_private *tp)
2742{
2743	struct rtl_fw *rtl_fw = tp->rtl_fw;
2744
2745	/* TODO: release firmware once rtl_phy_write_fw signals failures. */
2746	if (!IS_ERR_OR_NULL(rtl_fw))
2747		rtl_phy_write_fw(tp, rtl_fw);
2748}
2749
2750static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2751{
2752	if (rtl_readphy(tp, reg) != val)
2753		netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2754	else
2755		rtl_apply_firmware(tp);
2756}
2757
2758static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2759{
2760	static const struct phy_reg phy_reg_init[] = {
2761		{ 0x1f, 0x0001 },
2762		{ 0x06, 0x006e },
2763		{ 0x08, 0x0708 },
2764		{ 0x15, 0x4000 },
2765		{ 0x18, 0x65c7 },
2766
2767		{ 0x1f, 0x0001 },
2768		{ 0x03, 0x00a1 },
2769		{ 0x02, 0x0008 },
2770		{ 0x01, 0x0120 },
2771		{ 0x00, 0x1000 },
2772		{ 0x04, 0x0800 },
2773		{ 0x04, 0x0000 },
2774
2775		{ 0x03, 0xff41 },
2776		{ 0x02, 0xdf60 },
2777		{ 0x01, 0x0140 },
2778		{ 0x00, 0x0077 },
2779		{ 0x04, 0x7800 },
2780		{ 0x04, 0x7000 },
2781
2782		{ 0x03, 0x802f },
2783		{ 0x02, 0x4f02 },
2784		{ 0x01, 0x0409 },
2785		{ 0x00, 0xf0f9 },
2786		{ 0x04, 0x9800 },
2787		{ 0x04, 0x9000 },
2788
2789		{ 0x03, 0xdf01 },
2790		{ 0x02, 0xdf20 },
2791		{ 0x01, 0xff95 },
2792		{ 0x00, 0xba00 },
2793		{ 0x04, 0xa800 },
2794		{ 0x04, 0xa000 },
2795
2796		{ 0x03, 0xff41 },
2797		{ 0x02, 0xdf20 },
2798		{ 0x01, 0x0140 },
2799		{ 0x00, 0x00bb },
2800		{ 0x04, 0xb800 },
2801		{ 0x04, 0xb000 },
2802
2803		{ 0x03, 0xdf41 },
2804		{ 0x02, 0xdc60 },
2805		{ 0x01, 0x6340 },
2806		{ 0x00, 0x007d },
2807		{ 0x04, 0xd800 },
2808		{ 0x04, 0xd000 },
2809
2810		{ 0x03, 0xdf01 },
2811		{ 0x02, 0xdf20 },
2812		{ 0x01, 0x100a },
2813		{ 0x00, 0xa0ff },
2814		{ 0x04, 0xf800 },
2815		{ 0x04, 0xf000 },
2816
2817		{ 0x1f, 0x0000 },
2818		{ 0x0b, 0x0000 },
2819		{ 0x00, 0x9200 }
2820	};
2821
2822	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2823}
2824
2825static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2826{
2827	static const struct phy_reg phy_reg_init[] = {
2828		{ 0x1f, 0x0002 },
2829		{ 0x01, 0x90d0 },
2830		{ 0x1f, 0x0000 }
2831	};
2832
2833	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2834}
2835
2836static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2837{
2838	struct pci_dev *pdev = tp->pci_dev;
2839
2840	if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2841	    (pdev->subsystem_device != 0xe000))
2842		return;
2843
2844	rtl_writephy(tp, 0x1f, 0x0001);
2845	rtl_writephy(tp, 0x10, 0xf01b);
2846	rtl_writephy(tp, 0x1f, 0x0000);
2847}
2848
2849static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2850{
2851	static const struct phy_reg phy_reg_init[] = {
2852		{ 0x1f, 0x0001 },
2853		{ 0x04, 0x0000 },
2854		{ 0x03, 0x00a1 },
2855		{ 0x02, 0x0008 },
2856		{ 0x01, 0x0120 },
2857		{ 0x00, 0x1000 },
2858		{ 0x04, 0x0800 },
2859		{ 0x04, 0x9000 },
2860		{ 0x03, 0x802f },
2861		{ 0x02, 0x4f02 },
2862		{ 0x01, 0x0409 },
2863		{ 0x00, 0xf099 },
2864		{ 0x04, 0x9800 },
2865		{ 0x04, 0xa000 },
2866		{ 0x03, 0xdf01 },
2867		{ 0x02, 0xdf20 },
2868		{ 0x01, 0xff95 },
2869		{ 0x00, 0xba00 },
2870		{ 0x04, 0xa800 },
2871		{ 0x04, 0xf000 },
2872		{ 0x03, 0xdf01 },
2873		{ 0x02, 0xdf20 },
2874		{ 0x01, 0x101a },
2875		{ 0x00, 0xa0ff },
2876		{ 0x04, 0xf800 },
2877		{ 0x04, 0x0000 },
2878		{ 0x1f, 0x0000 },
2879
2880		{ 0x1f, 0x0001 },
2881		{ 0x10, 0xf41b },
2882		{ 0x14, 0xfb54 },
2883		{ 0x18, 0xf5c7 },
2884		{ 0x1f, 0x0000 },
2885
2886		{ 0x1f, 0x0001 },
2887		{ 0x17, 0x0cc0 },
2888		{ 0x1f, 0x0000 }
2889	};
2890
2891	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2892
2893	rtl8169scd_hw_phy_config_quirk(tp);
2894}
2895
2896static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2897{
2898	static const struct phy_reg phy_reg_init[] = {
2899		{ 0x1f, 0x0001 },
2900		{ 0x04, 0x0000 },
2901		{ 0x03, 0x00a1 },
2902		{ 0x02, 0x0008 },
2903		{ 0x01, 0x0120 },
2904		{ 0x00, 0x1000 },
2905		{ 0x04, 0x0800 },
2906		{ 0x04, 0x9000 },
2907		{ 0x03, 0x802f },
2908		{ 0x02, 0x4f02 },
2909		{ 0x01, 0x0409 },
2910		{ 0x00, 0xf099 },
2911		{ 0x04, 0x9800 },
2912		{ 0x04, 0xa000 },
2913		{ 0x03, 0xdf01 },
2914		{ 0x02, 0xdf20 },
2915		{ 0x01, 0xff95 },
2916		{ 0x00, 0xba00 },
2917		{ 0x04, 0xa800 },
2918		{ 0x04, 0xf000 },
2919		{ 0x03, 0xdf01 },
2920		{ 0x02, 0xdf20 },
2921		{ 0x01, 0x101a },
2922		{ 0x00, 0xa0ff },
2923		{ 0x04, 0xf800 },
2924		{ 0x04, 0x0000 },
2925		{ 0x1f, 0x0000 },
2926
2927		{ 0x1f, 0x0001 },
2928		{ 0x0b, 0x8480 },
2929		{ 0x1f, 0x0000 },
2930
2931		{ 0x1f, 0x0001 },
2932		{ 0x18, 0x67c7 },
2933		{ 0x04, 0x2000 },
2934		{ 0x03, 0x002f },
2935		{ 0x02, 0x4360 },
2936		{ 0x01, 0x0109 },
2937		{ 0x00, 0x3022 },
2938		{ 0x04, 0x2800 },
2939		{ 0x1f, 0x0000 },
2940
2941		{ 0x1f, 0x0001 },
2942		{ 0x17, 0x0cc0 },
2943		{ 0x1f, 0x0000 }
2944	};
2945
2946	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2947}
2948
2949static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2950{
2951	static const struct phy_reg phy_reg_init[] = {
2952		{ 0x10, 0xf41b },
2953		{ 0x1f, 0x0000 }
2954	};
2955
2956	rtl_writephy(tp, 0x1f, 0x0001);
2957	rtl_patchphy(tp, 0x16, 1 << 0);
2958
2959	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2960}
2961
2962static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2963{
2964	static const struct phy_reg phy_reg_init[] = {
2965		{ 0x1f, 0x0001 },
2966		{ 0x10, 0xf41b },
2967		{ 0x1f, 0x0000 }
2968	};
2969
2970	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2971}
2972
2973static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2974{
2975	static const struct phy_reg phy_reg_init[] = {
2976		{ 0x1f, 0x0000 },
2977		{ 0x1d, 0x0f00 },
2978		{ 0x1f, 0x0002 },
2979		{ 0x0c, 0x1ec8 },
2980		{ 0x1f, 0x0000 }
2981	};
2982
2983	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2984}
2985
2986static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2987{
2988	static const struct phy_reg phy_reg_init[] = {
2989		{ 0x1f, 0x0001 },
2990		{ 0x1d, 0x3d98 },
2991		{ 0x1f, 0x0000 }
2992	};
2993
2994	rtl_writephy(tp, 0x1f, 0x0000);
2995	rtl_patchphy(tp, 0x14, 1 << 5);
2996	rtl_patchphy(tp, 0x0d, 1 << 5);
2997
2998	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2999}
3000
3001static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
3002{
3003	static const struct phy_reg phy_reg_init[] = {
3004		{ 0x1f, 0x0001 },
3005		{ 0x12, 0x2300 },
3006		{ 0x1f, 0x0002 },
3007		{ 0x00, 0x88d4 },
3008		{ 0x01, 0x82b1 },
3009		{ 0x03, 0x7002 },
3010		{ 0x08, 0x9e30 },
3011		{ 0x09, 0x01f0 },
3012		{ 0x0a, 0x5500 },
3013		{ 0x0c, 0x00c8 },
3014		{ 0x1f, 0x0003 },
3015		{ 0x12, 0xc096 },
3016		{ 0x16, 0x000a },
3017		{ 0x1f, 0x0000 },
3018		{ 0x1f, 0x0000 },
3019		{ 0x09, 0x2000 },
3020		{ 0x09, 0x0000 }
3021	};
3022
3023	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3024
3025	rtl_patchphy(tp, 0x14, 1 << 5);
3026	rtl_patchphy(tp, 0x0d, 1 << 5);
3027	rtl_writephy(tp, 0x1f, 0x0000);
3028}
3029
3030static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
3031{
3032	static const struct phy_reg phy_reg_init[] = {
3033		{ 0x1f, 0x0001 },
3034		{ 0x12, 0x2300 },
3035		{ 0x03, 0x802f },
3036		{ 0x02, 0x4f02 },
3037		{ 0x01, 0x0409 },
3038		{ 0x00, 0xf099 },
3039		{ 0x04, 0x9800 },
3040		{ 0x04, 0x9000 },
3041		{ 0x1d, 0x3d98 },
3042		{ 0x1f, 0x0002 },
3043		{ 0x0c, 0x7eb8 },
3044		{ 0x06, 0x0761 },
3045		{ 0x1f, 0x0003 },
3046		{ 0x16, 0x0f0a },
3047		{ 0x1f, 0x0000 }
3048	};
3049
3050	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3051
3052	rtl_patchphy(tp, 0x16, 1 << 0);
3053	rtl_patchphy(tp, 0x14, 1 << 5);
3054	rtl_patchphy(tp, 0x0d, 1 << 5);
3055	rtl_writephy(tp, 0x1f, 0x0000);
3056}
3057
3058static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
3059{
3060	static const struct phy_reg phy_reg_init[] = {
3061		{ 0x1f, 0x0001 },
3062		{ 0x12, 0x2300 },
3063		{ 0x1d, 0x3d98 },
3064		{ 0x1f, 0x0002 },
3065		{ 0x0c, 0x7eb8 },
3066		{ 0x06, 0x5461 },
3067		{ 0x1f, 0x0003 },
3068		{ 0x16, 0x0f0a },
3069		{ 0x1f, 0x0000 }
3070	};
3071
3072	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3073
3074	rtl_patchphy(tp, 0x16, 1 << 0);
3075	rtl_patchphy(tp, 0x14, 1 << 5);
3076	rtl_patchphy(tp, 0x0d, 1 << 5);
3077	rtl_writephy(tp, 0x1f, 0x0000);
3078}
3079
3080static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
3081{
3082	rtl8168c_3_hw_phy_config(tp);
3083}
3084
3085static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
3086{
3087	static const struct phy_reg phy_reg_init_0[] = {
3088		/* Channel Estimation */
3089		{ 0x1f, 0x0001 },
3090		{ 0x06, 0x4064 },
3091		{ 0x07, 0x2863 },
3092		{ 0x08, 0x059c },
3093		{ 0x09, 0x26b4 },
3094		{ 0x0a, 0x6a19 },
3095		{ 0x0b, 0xdcc8 },
3096		{ 0x10, 0xf06d },
3097		{ 0x14, 0x7f68 },
3098		{ 0x18, 0x7fd9 },
3099		{ 0x1c, 0xf0ff },
3100		{ 0x1d, 0x3d9c },
3101		{ 0x1f, 0x0003 },
3102		{ 0x12, 0xf49f },
3103		{ 0x13, 0x070b },
3104		{ 0x1a, 0x05ad },
3105		{ 0x14, 0x94c0 },
3106
3107		/*
3108		 * Tx Error Issue
3109		 * Enhance line driver power
3110		 */
3111		{ 0x1f, 0x0002 },
3112		{ 0x06, 0x5561 },
3113		{ 0x1f, 0x0005 },
3114		{ 0x05, 0x8332 },
3115		{ 0x06, 0x5561 },
3116
3117		/*
3118		 * Can not link to 1Gbps with bad cable
3119		 * Decrease SNR threshold form 21.07dB to 19.04dB
3120		 */
3121		{ 0x1f, 0x0001 },
3122		{ 0x17, 0x0cc0 },
3123
3124		{ 0x1f, 0x0000 },
3125		{ 0x0d, 0xf880 }
3126	};
3127
3128	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
3129
3130	/*
3131	 * Rx Error Issue
3132	 * Fine Tune Switching regulator parameter
3133	 */
3134	rtl_writephy(tp, 0x1f, 0x0002);
3135	rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef);
3136	rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00);
3137
3138	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
3139		static const struct phy_reg phy_reg_init[] = {
3140			{ 0x1f, 0x0002 },
3141			{ 0x05, 0x669a },
3142			{ 0x1f, 0x0005 },
3143			{ 0x05, 0x8330 },
3144			{ 0x06, 0x669a },
3145			{ 0x1f, 0x0002 }
3146		};
3147		int val;
3148
3149		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3150
3151		val = rtl_readphy(tp, 0x0d);
3152
3153		if ((val & 0x00ff) != 0x006c) {
3154			static const u32 set[] = {
3155				0x0065, 0x0066, 0x0067, 0x0068,
3156				0x0069, 0x006a, 0x006b, 0x006c
3157			};
3158			int i;
3159
3160			rtl_writephy(tp, 0x1f, 0x0002);
3161
3162			val &= 0xff00;
3163			for (i = 0; i < ARRAY_SIZE(set); i++)
3164				rtl_writephy(tp, 0x0d, val | set[i]);
3165		}
3166	} else {
3167		static const struct phy_reg phy_reg_init[] = {
3168			{ 0x1f, 0x0002 },
3169			{ 0x05, 0x6662 },
3170			{ 0x1f, 0x0005 },
3171			{ 0x05, 0x8330 },
3172			{ 0x06, 0x6662 }
3173		};
3174
3175		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3176	}
3177
3178	/* RSET couple improve */
3179	rtl_writephy(tp, 0x1f, 0x0002);
3180	rtl_patchphy(tp, 0x0d, 0x0300);
3181	rtl_patchphy(tp, 0x0f, 0x0010);
3182
3183	/* Fine tune PLL performance */
3184	rtl_writephy(tp, 0x1f, 0x0002);
3185	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
3186	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
3187
3188	rtl_writephy(tp, 0x1f, 0x0005);
3189	rtl_writephy(tp, 0x05, 0x001b);
3190
3191	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
3192
3193	rtl_writephy(tp, 0x1f, 0x0000);
3194}
3195
3196static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
3197{
3198	static const struct phy_reg phy_reg_init_0[] = {
3199		/* Channel Estimation */
3200		{ 0x1f, 0x0001 },
3201		{ 0x06, 0x4064 },
3202		{ 0x07, 0x2863 },
3203		{ 0x08, 0x059c },
3204		{ 0x09, 0x26b4 },
3205		{ 0x0a, 0x6a19 },
3206		{ 0x0b, 0xdcc8 },
3207		{ 0x10, 0xf06d },
3208		{ 0x14, 0x7f68 },
3209		{ 0x18, 0x7fd9 },
3210		{ 0x1c, 0xf0ff },
3211		{ 0x1d, 0x3d9c },
3212		{ 0x1f, 0x0003 },
3213		{ 0x12, 0xf49f },
3214		{ 0x13, 0x070b },
3215		{ 0x1a, 0x05ad },
3216		{ 0x14, 0x94c0 },
3217
3218		/*
3219		 * Tx Error Issue
3220		 * Enhance line driver power
3221		 */
3222		{ 0x1f, 0x0002 },
3223		{ 0x06, 0x5561 },
3224		{ 0x1f, 0x0005 },
3225		{ 0x05, 0x8332 },
3226		{ 0x06, 0x5561 },
3227
3228		/*
3229		 * Can not link to 1Gbps with bad cable
3230		 * Decrease SNR threshold form 21.07dB to 19.04dB
3231		 */
3232		{ 0x1f, 0x0001 },
3233		{ 0x17, 0x0cc0 },
3234
3235		{ 0x1f, 0x0000 },
3236		{ 0x0d, 0xf880 }
3237	};
3238
3239	rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
3240
3241	if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
3242		static const struct phy_reg phy_reg_init[] = {
3243			{ 0x1f, 0x0002 },
3244			{ 0x05, 0x669a },
3245			{ 0x1f, 0x0005 },
3246			{ 0x05, 0x8330 },
3247			{ 0x06, 0x669a },
3248
3249			{ 0x1f, 0x0002 }
3250		};
3251		int val;
3252
3253		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3254
3255		val = rtl_readphy(tp, 0x0d);
3256		if ((val & 0x00ff) != 0x006c) {
3257			static const u32 set[] = {
3258				0x0065, 0x0066, 0x0067, 0x0068,
3259				0x0069, 0x006a, 0x006b, 0x006c
3260			};
3261			int i;
3262
3263			rtl_writephy(tp, 0x1f, 0x0002);
3264
3265			val &= 0xff00;
3266			for (i = 0; i < ARRAY_SIZE(set); i++)
3267				rtl_writephy(tp, 0x0d, val | set[i]);
3268		}
3269	} else {
3270		static const struct phy_reg phy_reg_init[] = {
3271			{ 0x1f, 0x0002 },
3272			{ 0x05, 0x2642 },
3273			{ 0x1f, 0x0005 },
3274			{ 0x05, 0x8330 },
3275			{ 0x06, 0x2642 }
3276		};
3277
3278		rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3279	}
3280
3281	/* Fine tune PLL performance */
3282	rtl_writephy(tp, 0x1f, 0x0002);
3283	rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600);
3284	rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000);
3285
3286	/* Switching regulator Slew rate */
3287	rtl_writephy(tp, 0x1f, 0x0002);
3288	rtl_patchphy(tp, 0x0f, 0x0017);
3289
3290	rtl_writephy(tp, 0x1f, 0x0005);
3291	rtl_writephy(tp, 0x05, 0x001b);
3292
3293	rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
3294
3295	rtl_writephy(tp, 0x1f, 0x0000);
3296}
3297
3298static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
3299{
3300	static const struct phy_reg phy_reg_init[] = {
3301		{ 0x1f, 0x0002 },
3302		{ 0x10, 0x0008 },
3303		{ 0x0d, 0x006c },
3304
3305		{ 0x1f, 0x0000 },
3306		{ 0x0d, 0xf880 },
3307
3308		{ 0x1f, 0x0001 },
3309		{ 0x17, 0x0cc0 },
3310
3311		{ 0x1f, 0x0001 },
3312		{ 0x0b, 0xa4d8 },
3313		{ 0x09, 0x281c },
3314		{ 0x07, 0x2883 },
3315		{ 0x0a, 0x6b35 },
3316		{ 0x1d, 0x3da4 },
3317		{ 0x1c, 0xeffd },
3318		{ 0x14, 0x7f52 },
3319		{ 0x18, 0x7fc6 },
3320		{ 0x08, 0x0601 },
3321		{ 0x06, 0x4063 },
3322		{ 0x10, 0xf074 },
3323		{ 0x1f, 0x0003 },
3324		{ 0x13, 0x0789 },
3325		{ 0x12, 0xf4bd },
3326		{ 0x1a, 0x04fd },
3327		{ 0x14, 0x84b0 },
3328		{ 0x1f, 0x0000 },
3329		{ 0x00, 0x9200 },
3330
3331		{ 0x1f, 0x0005 },
3332		{ 0x01, 0x0340 },
3333		{ 0x1f, 0x0001 },
3334		{ 0x04, 0x4000 },
3335		{ 0x03, 0x1d21 },
3336		{ 0x02, 0x0c32 },
3337		{ 0x01, 0x0200 },
3338		{ 0x00, 0x5554 },
3339		{ 0x04, 0x4800 },
3340		{ 0x04, 0x4000 },
3341		{ 0x04, 0xf000 },
3342		{ 0x03, 0xdf01 },
3343		{ 0x02, 0xdf20 },
3344		{ 0x01, 0x101a },
3345		{ 0x00, 0xa0ff },
3346		{ 0x04, 0xf800 },
3347		{ 0x04, 0xf000 },
3348		{ 0x1f, 0x0000 },
3349
3350		{ 0x1f, 0x0007 },
3351		{ 0x1e, 0x0023 },
3352		{ 0x16, 0x0000 },
3353		{ 0x1f, 0x0000 }
3354	};
3355
3356	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3357}
3358
3359static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3360{
3361	static const struct phy_reg phy_reg_init[] = {
3362		{ 0x1f, 0x0001 },
3363		{ 0x17, 0x0cc0 },
3364
3365		{ 0x1f, 0x0007 },
3366		{ 0x1e, 0x002d },
3367		{ 0x18, 0x0040 },
3368		{ 0x1f, 0x0000 }
3369	};
3370
3371	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3372	rtl_patchphy(tp, 0x0d, 1 << 5);
3373}
3374
3375static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3376{
3377	static const struct phy_reg phy_reg_init[] = {
3378		/* Enable Delay cap */
3379		{ 0x1f, 0x0005 },
3380		{ 0x05, 0x8b80 },
3381		{ 0x06, 0xc896 },
3382		{ 0x1f, 0x0000 },
3383
3384		/* Channel estimation fine tune */
3385		{ 0x1f, 0x0001 },
3386		{ 0x0b, 0x6c20 },
3387		{ 0x07, 0x2872 },
3388		{ 0x1c, 0xefff },
3389		{ 0x1f, 0x0003 },
3390		{ 0x14, 0x6420 },
3391		{ 0x1f, 0x0000 },
3392
3393		/* Update PFM & 10M TX idle timer */
3394		{ 0x1f, 0x0007 },
3395		{ 0x1e, 0x002f },
3396		{ 0x15, 0x1919 },
3397		{ 0x1f, 0x0000 },
3398
3399		{ 0x1f, 0x0007 },
3400		{ 0x1e, 0x00ac },
3401		{ 0x18, 0x0006 },
3402		{ 0x1f, 0x0000 }
3403	};
3404
3405	rtl_apply_firmware(tp);
3406
3407	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3408
3409	/* DCO enable for 10M IDLE Power */
3410	rtl_writephy(tp, 0x1f, 0x0007);
3411	rtl_writephy(tp, 0x1e, 0x0023);
3412	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3413	rtl_writephy(tp, 0x1f, 0x0000);
3414
3415	/* For impedance matching */
3416	rtl_writephy(tp, 0x1f, 0x0002);
3417	rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00);
3418	rtl_writephy(tp, 0x1f, 0x0000);
3419
3420	/* PHY auto speed down */
3421	rtl_writephy(tp, 0x1f, 0x0007);
3422	rtl_writephy(tp, 0x1e, 0x002d);
3423	rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000);
3424	rtl_writephy(tp, 0x1f, 0x0000);
3425	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3426
3427	rtl_writephy(tp, 0x1f, 0x0005);
3428	rtl_writephy(tp, 0x05, 0x8b86);
3429	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3430	rtl_writephy(tp, 0x1f, 0x0000);
3431
3432	rtl_writephy(tp, 0x1f, 0x0005);
3433	rtl_writephy(tp, 0x05, 0x8b85);
3434	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3435	rtl_writephy(tp, 0x1f, 0x0007);
3436	rtl_writephy(tp, 0x1e, 0x0020);
3437	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100);
3438	rtl_writephy(tp, 0x1f, 0x0006);
3439	rtl_writephy(tp, 0x00, 0x5a00);
3440	rtl_writephy(tp, 0x1f, 0x0000);
3441	rtl_writephy(tp, 0x0d, 0x0007);
3442	rtl_writephy(tp, 0x0e, 0x003c);
3443	rtl_writephy(tp, 0x0d, 0x4007);
3444	rtl_writephy(tp, 0x0e, 0x0000);
3445	rtl_writephy(tp, 0x0d, 0x0000);
3446}
3447
3448static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3449{
3450	const u16 w[] = {
3451		addr[0] | (addr[1] << 8),
3452		addr[2] | (addr[3] << 8),
3453		addr[4] | (addr[5] << 8)
3454	};
3455	const struct exgmac_reg e[] = {
3456		{ .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3457		{ .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3458		{ .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3459		{ .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3460	};
3461
3462	rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3463}
3464
3465static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3466{
3467	static const struct phy_reg phy_reg_init[] = {
3468		/* Enable Delay cap */
3469		{ 0x1f, 0x0004 },
3470		{ 0x1f, 0x0007 },
3471		{ 0x1e, 0x00ac },
3472		{ 0x18, 0x0006 },
3473		{ 0x1f, 0x0002 },
3474		{ 0x1f, 0x0000 },
3475		{ 0x1f, 0x0000 },
3476
3477		/* Channel estimation fine tune */
3478		{ 0x1f, 0x0003 },
3479		{ 0x09, 0xa20f },
3480		{ 0x1f, 0x0000 },
3481		{ 0x1f, 0x0000 },
3482
3483		/* Green Setting */
3484		{ 0x1f, 0x0005 },
3485		{ 0x05, 0x8b5b },
3486		{ 0x06, 0x9222 },
3487		{ 0x05, 0x8b6d },
3488		{ 0x06, 0x8000 },
3489		{ 0x05, 0x8b76 },
3490		{ 0x06, 0x8000 },
3491		{ 0x1f, 0x0000 }
3492	};
3493
3494	rtl_apply_firmware(tp);
3495
3496	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3497
3498	/* For 4-corner performance improve */
3499	rtl_writephy(tp, 0x1f, 0x0005);
3500	rtl_writephy(tp, 0x05, 0x8b80);
3501	rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000);
3502	rtl_writephy(tp, 0x1f, 0x0000);
3503
3504	/* PHY auto speed down */
3505	rtl_writephy(tp, 0x1f, 0x0004);
3506	rtl_writephy(tp, 0x1f, 0x0007);
3507	rtl_writephy(tp, 0x1e, 0x002d);
3508	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3509	rtl_writephy(tp, 0x1f, 0x0002);
3510	rtl_writephy(tp, 0x1f, 0x0000);
3511	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3512
3513	/* improve 10M EEE waveform */
3514	rtl_writephy(tp, 0x1f, 0x0005);
3515	rtl_writephy(tp, 0x05, 0x8b86);
3516	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3517	rtl_writephy(tp, 0x1f, 0x0000);
3518
3519	/* Improve 2-pair detection performance */
3520	rtl_writephy(tp, 0x1f, 0x0005);
3521	rtl_writephy(tp, 0x05, 0x8b85);
3522	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3523	rtl_writephy(tp, 0x1f, 0x0000);
3524
3525	/* EEE setting */
3526	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3527	rtl_writephy(tp, 0x1f, 0x0005);
3528	rtl_writephy(tp, 0x05, 0x8b85);
3529	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3530	rtl_writephy(tp, 0x1f, 0x0004);
3531	rtl_writephy(tp, 0x1f, 0x0007);
3532	rtl_writephy(tp, 0x1e, 0x0020);
3533	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
3534	rtl_writephy(tp, 0x1f, 0x0002);
3535	rtl_writephy(tp, 0x1f, 0x0000);
3536	rtl_writephy(tp, 0x0d, 0x0007);
3537	rtl_writephy(tp, 0x0e, 0x003c);
3538	rtl_writephy(tp, 0x0d, 0x4007);
3539	rtl_writephy(tp, 0x0e, 0x0000);
3540	rtl_writephy(tp, 0x0d, 0x0000);
3541
3542	/* Green feature */
3543	rtl_writephy(tp, 0x1f, 0x0003);
3544	rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3545	rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3546	rtl_writephy(tp, 0x1f, 0x0000);
3547
3548	/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3549	rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3550}
3551
3552static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3553{
3554	/* For 4-corner performance improve */
3555	rtl_writephy(tp, 0x1f, 0x0005);
3556	rtl_writephy(tp, 0x05, 0x8b80);
3557	rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000);
3558	rtl_writephy(tp, 0x1f, 0x0000);
3559
3560	/* PHY auto speed down */
3561	rtl_writephy(tp, 0x1f, 0x0007);
3562	rtl_writephy(tp, 0x1e, 0x002d);
3563	rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000);
3564	rtl_writephy(tp, 0x1f, 0x0000);
3565	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3566
3567	/* Improve 10M EEE waveform */
3568	rtl_writephy(tp, 0x1f, 0x0005);
3569	rtl_writephy(tp, 0x05, 0x8b86);
3570	rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000);
3571	rtl_writephy(tp, 0x1f, 0x0000);
3572}
3573
3574static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3575{
3576	static const struct phy_reg phy_reg_init[] = {
3577		/* Channel estimation fine tune */
3578		{ 0x1f, 0x0003 },
3579		{ 0x09, 0xa20f },
3580		{ 0x1f, 0x0000 },
3581
3582		/* Modify green table for giga & fnet */
3583		{ 0x1f, 0x0005 },
3584		{ 0x05, 0x8b55 },
3585		{ 0x06, 0x0000 },
3586		{ 0x05, 0x8b5e },
3587		{ 0x06, 0x0000 },
3588		{ 0x05, 0x8b67 },
3589		{ 0x06, 0x0000 },
3590		{ 0x05, 0x8b70 },
3591		{ 0x06, 0x0000 },
3592		{ 0x1f, 0x0000 },
3593		{ 0x1f, 0x0007 },
3594		{ 0x1e, 0x0078 },
3595		{ 0x17, 0x0000 },
3596		{ 0x19, 0x00fb },
3597		{ 0x1f, 0x0000 },
3598
3599		/* Modify green table for 10M */
3600		{ 0x1f, 0x0005 },
3601		{ 0x05, 0x8b79 },
3602		{ 0x06, 0xaa00 },
3603		{ 0x1f, 0x0000 },
3604
3605		/* Disable hiimpedance detection (RTCT) */
3606		{ 0x1f, 0x0003 },
3607		{ 0x01, 0x328a },
3608		{ 0x1f, 0x0000 }
3609	};
3610
3611	rtl_apply_firmware(tp);
3612
3613	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3614
3615	rtl8168f_hw_phy_config(tp);
3616
3617	/* Improve 2-pair detection performance */
3618	rtl_writephy(tp, 0x1f, 0x0005);
3619	rtl_writephy(tp, 0x05, 0x8b85);
3620	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3621	rtl_writephy(tp, 0x1f, 0x0000);
3622}
3623
3624static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3625{
3626	rtl_apply_firmware(tp);
3627
3628	rtl8168f_hw_phy_config(tp);
3629}
3630
3631static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3632{
3633	static const struct phy_reg phy_reg_init[] = {
3634		/* Channel estimation fine tune */
3635		{ 0x1f, 0x0003 },
3636		{ 0x09, 0xa20f },
3637		{ 0x1f, 0x0000 },
3638
3639		/* Modify green table for giga & fnet */
3640		{ 0x1f, 0x0005 },
3641		{ 0x05, 0x8b55 },
3642		{ 0x06, 0x0000 },
3643		{ 0x05, 0x8b5e },
3644		{ 0x06, 0x0000 },
3645		{ 0x05, 0x8b67 },
3646		{ 0x06, 0x0000 },
3647		{ 0x05, 0x8b70 },
3648		{ 0x06, 0x0000 },
3649		{ 0x1f, 0x0000 },
3650		{ 0x1f, 0x0007 },
3651		{ 0x1e, 0x0078 },
3652		{ 0x17, 0x0000 },
3653		{ 0x19, 0x00aa },
3654		{ 0x1f, 0x0000 },
3655
3656		/* Modify green table for 10M */
3657		{ 0x1f, 0x0005 },
3658		{ 0x05, 0x8b79 },
3659		{ 0x06, 0xaa00 },
3660		{ 0x1f, 0x0000 },
3661
3662		/* Disable hiimpedance detection (RTCT) */
3663		{ 0x1f, 0x0003 },
3664		{ 0x01, 0x328a },
3665		{ 0x1f, 0x0000 }
3666	};
3667
3668
3669	rtl_apply_firmware(tp);
3670
3671	rtl8168f_hw_phy_config(tp);
3672
3673	/* Improve 2-pair detection performance */
3674	rtl_writephy(tp, 0x1f, 0x0005);
3675	rtl_writephy(tp, 0x05, 0x8b85);
3676	rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
3677	rtl_writephy(tp, 0x1f, 0x0000);
3678
3679	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3680
3681	/* Modify green table for giga */
3682	rtl_writephy(tp, 0x1f, 0x0005);
3683	rtl_writephy(tp, 0x05, 0x8b54);
3684	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3685	rtl_writephy(tp, 0x05, 0x8b5d);
3686	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800);
3687	rtl_writephy(tp, 0x05, 0x8a7c);
3688	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3689	rtl_writephy(tp, 0x05, 0x8a7f);
3690	rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000);
3691	rtl_writephy(tp, 0x05, 0x8a82);
3692	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3693	rtl_writephy(tp, 0x05, 0x8a85);
3694	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3695	rtl_writephy(tp, 0x05, 0x8a88);
3696	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100);
3697	rtl_writephy(tp, 0x1f, 0x0000);
3698
3699	/* uc same-seed solution */
3700	rtl_writephy(tp, 0x1f, 0x0005);
3701	rtl_writephy(tp, 0x05, 0x8b85);
3702	rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000);
3703	rtl_writephy(tp, 0x1f, 0x0000);
3704
3705	/* eee setting */
3706	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3707	rtl_writephy(tp, 0x1f, 0x0005);
3708	rtl_writephy(tp, 0x05, 0x8b85);
3709	rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
3710	rtl_writephy(tp, 0x1f, 0x0004);
3711	rtl_writephy(tp, 0x1f, 0x0007);
3712	rtl_writephy(tp, 0x1e, 0x0020);
3713	rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
3714	rtl_writephy(tp, 0x1f, 0x0000);
3715	rtl_writephy(tp, 0x0d, 0x0007);
3716	rtl_writephy(tp, 0x0e, 0x003c);
3717	rtl_writephy(tp, 0x0d, 0x4007);
3718	rtl_writephy(tp, 0x0e, 0x0000);
3719	rtl_writephy(tp, 0x0d, 0x0000);
3720
3721	/* Green feature */
3722	rtl_writephy(tp, 0x1f, 0x0003);
3723	rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
3724	rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
3725	rtl_writephy(tp, 0x1f, 0x0000);
3726}
3727
3728static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3729{
3730	rtl_apply_firmware(tp);
3731
3732	rtl_writephy(tp, 0x1f, 0x0a46);
3733	if (rtl_readphy(tp, 0x10) & 0x0100) {
3734		rtl_writephy(tp, 0x1f, 0x0bcc);
3735		rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000);
3736	} else {
3737		rtl_writephy(tp, 0x1f, 0x0bcc);
3738		rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000);
3739	}
3740
3741	rtl_writephy(tp, 0x1f, 0x0a46);
3742	if (rtl_readphy(tp, 0x13) & 0x0100) {
3743		rtl_writephy(tp, 0x1f, 0x0c41);
3744		rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000);
3745	} else {
3746		rtl_writephy(tp, 0x1f, 0x0c41);
3747		rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002);
3748	}
3749
3750	/* Enable PHY auto speed down */
3751	rtl_writephy(tp, 0x1f, 0x0a44);
3752	rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
3753
3754	rtl_writephy(tp, 0x1f, 0x0bcc);
3755	rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000);
3756	rtl_writephy(tp, 0x1f, 0x0a44);
3757	rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
3758	rtl_writephy(tp, 0x1f, 0x0a43);
3759	rtl_writephy(tp, 0x13, 0x8084);
3760	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
3761	rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
3762
3763	/* EEE auto-fallback function */
3764	rtl_writephy(tp, 0x1f, 0x0a4b);
3765	rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
3766
3767	/* Enable UC LPF tune function */
3768	rtl_writephy(tp, 0x1f, 0x0a43);
3769	rtl_writephy(tp, 0x13, 0x8012);
3770	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
3771
3772	rtl_writephy(tp, 0x1f, 0x0c42);
3773	rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
3774
3775	/* Improve SWR Efficiency */
3776	rtl_writephy(tp, 0x1f, 0x0bcd);
3777	rtl_writephy(tp, 0x14, 0x5065);
3778	rtl_writephy(tp, 0x14, 0xd065);
3779	rtl_writephy(tp, 0x1f, 0x0bc8);
3780	rtl_writephy(tp, 0x11, 0x5655);
3781	rtl_writephy(tp, 0x1f, 0x0bcd);
3782	rtl_writephy(tp, 0x14, 0x1065);
3783	rtl_writephy(tp, 0x14, 0x9065);
3784	rtl_writephy(tp, 0x14, 0x1065);
3785
3786	/* Check ALDPS bit, disable it if enabled */
3787	rtl_writephy(tp, 0x1f, 0x0a43);
3788	if (rtl_readphy(tp, 0x10) & 0x0004)
3789		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3790
3791	rtl_writephy(tp, 0x1f, 0x0000);
3792}
3793
3794static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3795{
3796	rtl_apply_firmware(tp);
3797}
3798
3799static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
3800{
3801	u16 dout_tapbin;
3802	u32 data;
3803
3804	rtl_apply_firmware(tp);
3805
3806	/* CHN EST parameters adjust - giga master */
3807	rtl_writephy(tp, 0x1f, 0x0a43);
3808	rtl_writephy(tp, 0x13, 0x809b);
3809	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800);
3810	rtl_writephy(tp, 0x13, 0x80a2);
3811	rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00);
3812	rtl_writephy(tp, 0x13, 0x80a4);
3813	rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00);
3814	rtl_writephy(tp, 0x13, 0x809c);
3815	rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00);
3816	rtl_writephy(tp, 0x1f, 0x0000);
3817
3818	/* CHN EST parameters adjust - giga slave */
3819	rtl_writephy(tp, 0x1f, 0x0a43);
3820	rtl_writephy(tp, 0x13, 0x80ad);
3821	rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800);
3822	rtl_writephy(tp, 0x13, 0x80b4);
3823	rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00);
3824	rtl_writephy(tp, 0x13, 0x80ac);
3825	rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00);
3826	rtl_writephy(tp, 0x1f, 0x0000);
3827
3828	/* CHN EST parameters adjust - fnet */
3829	rtl_writephy(tp, 0x1f, 0x0a43);
3830	rtl_writephy(tp, 0x13, 0x808e);
3831	rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00);
3832	rtl_writephy(tp, 0x13, 0x8090);
3833	rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00);
3834	rtl_writephy(tp, 0x13, 0x8092);
3835	rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00);
3836	rtl_writephy(tp, 0x1f, 0x0000);
3837
3838	/* enable R-tune & PGA-retune function */
3839	dout_tapbin = 0;
3840	rtl_writephy(tp, 0x1f, 0x0a46);
3841	data = rtl_readphy(tp, 0x13);
3842	data &= 3;
3843	data <<= 2;
3844	dout_tapbin |= data;
3845	data = rtl_readphy(tp, 0x12);
3846	data &= 0xc000;
3847	data >>= 14;
3848	dout_tapbin |= data;
3849	dout_tapbin = ~(dout_tapbin^0x08);
3850	dout_tapbin <<= 12;
3851	dout_tapbin &= 0xf000;
3852	rtl_writephy(tp, 0x1f, 0x0a43);
3853	rtl_writephy(tp, 0x13, 0x827a);
3854	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3855	rtl_writephy(tp, 0x13, 0x827b);
3856	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3857	rtl_writephy(tp, 0x13, 0x827c);
3858	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3859	rtl_writephy(tp, 0x13, 0x827d);
3860	rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000);
3861
3862	rtl_writephy(tp, 0x1f, 0x0a43);
3863	rtl_writephy(tp, 0x13, 0x0811);
3864	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3865	rtl_writephy(tp, 0x1f, 0x0a42);
3866	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3867	rtl_writephy(tp, 0x1f, 0x0000);
3868
3869	/* enable GPHY 10M */
3870	rtl_writephy(tp, 0x1f, 0x0a44);
3871	rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
3872	rtl_writephy(tp, 0x1f, 0x0000);
3873
3874	/* SAR ADC performance */
3875	rtl_writephy(tp, 0x1f, 0x0bca);
3876	rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000);
3877	rtl_writephy(tp, 0x1f, 0x0000);
3878
3879	rtl_writephy(tp, 0x1f, 0x0a43);
3880	rtl_writephy(tp, 0x13, 0x803f);
3881	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3882	rtl_writephy(tp, 0x13, 0x8047);
3883	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3884	rtl_writephy(tp, 0x13, 0x804f);
3885	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3886	rtl_writephy(tp, 0x13, 0x8057);
3887	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3888	rtl_writephy(tp, 0x13, 0x805f);
3889	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3890	rtl_writephy(tp, 0x13, 0x8067);
3891	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3892	rtl_writephy(tp, 0x13, 0x806f);
3893	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000);
3894	rtl_writephy(tp, 0x1f, 0x0000);
3895
3896	/* disable phy pfm mode */
3897	rtl_writephy(tp, 0x1f, 0x0a44);
3898	rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
3899	rtl_writephy(tp, 0x1f, 0x0000);
3900
3901	/* Check ALDPS bit, disable it if enabled */
3902	rtl_writephy(tp, 0x1f, 0x0a43);
3903	if (rtl_readphy(tp, 0x10) & 0x0004)
3904		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3905
3906	rtl_writephy(tp, 0x1f, 0x0000);
3907}
3908
3909static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp)
3910{
3911	u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0;
3912	u16 rlen;
3913	u32 data;
3914
3915	rtl_apply_firmware(tp);
3916
3917	/* CHIN EST parameter update */
3918	rtl_writephy(tp, 0x1f, 0x0a43);
3919	rtl_writephy(tp, 0x13, 0x808a);
3920	rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f);
3921	rtl_writephy(tp, 0x1f, 0x0000);
3922
3923	/* enable R-tune & PGA-retune function */
3924	rtl_writephy(tp, 0x1f, 0x0a43);
3925	rtl_writephy(tp, 0x13, 0x0811);
3926	rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000);
3927	rtl_writephy(tp, 0x1f, 0x0a42);
3928	rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000);
3929	rtl_writephy(tp, 0x1f, 0x0000);
3930
3931	/* enable GPHY 10M */
3932	rtl_writephy(tp, 0x1f, 0x0a44);
3933	rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000);
3934	rtl_writephy(tp, 0x1f, 0x0000);
3935
3936	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
3937	data = r8168_mac_ocp_read(tp, 0xdd02);
3938	ioffset_p3 = ((data & 0x80)>>7);
3939	ioffset_p3 <<= 3;
3940
3941	data = r8168_mac_ocp_read(tp, 0xdd00);
3942	ioffset_p3 |= ((data & (0xe000))>>13);
3943	ioffset_p2 = ((data & (0x1e00))>>9);
3944	ioffset_p1 = ((data & (0x01e0))>>5);
3945	ioffset_p0 = ((data & 0x0010)>>4);
3946	ioffset_p0 <<= 3;
3947	ioffset_p0 |= (data & (0x07));
3948	data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0);
3949
3950	if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) ||
3951	    (ioffset_p1 != 0x0f) || (ioffset_p0 != 0x0f)) {
3952		rtl_writephy(tp, 0x1f, 0x0bcf);
3953		rtl_writephy(tp, 0x16, data);
3954		rtl_writephy(tp, 0x1f, 0x0000);
3955	}
3956
3957	/* Modify rlen (TX LPF corner frequency) level */
3958	rtl_writephy(tp, 0x1f, 0x0bcd);
3959	data = rtl_readphy(tp, 0x16);
3960	data &= 0x000f;
3961	rlen = 0;
3962	if (data > 3)
3963		rlen = data - 3;
3964	data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12);
3965	rtl_writephy(tp, 0x17, data);
3966	rtl_writephy(tp, 0x1f, 0x0bcd);
3967	rtl_writephy(tp, 0x1f, 0x0000);
3968
3969	/* disable phy pfm mode */
3970	rtl_writephy(tp, 0x1f, 0x0a44);
3971	rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080);
3972	rtl_writephy(tp, 0x1f, 0x0000);
3973
3974	/* Check ALDPS bit, disable it if enabled */
3975	rtl_writephy(tp, 0x1f, 0x0a43);
3976	if (rtl_readphy(tp, 0x10) & 0x0004)
3977		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
3978
3979	rtl_writephy(tp, 0x1f, 0x0000);
3980}
3981
3982static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp)
3983{
3984	/* Enable PHY auto speed down */
3985	rtl_writephy(tp, 0x1f, 0x0a44);
3986	rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000);
3987	rtl_writephy(tp, 0x1f, 0x0000);
3988
3989	/* patch 10M & ALDPS */
3990	rtl_writephy(tp, 0x1f, 0x0bcc);
3991	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
3992	rtl_writephy(tp, 0x1f, 0x0a44);
3993	rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
3994	rtl_writephy(tp, 0x1f, 0x0a43);
3995	rtl_writephy(tp, 0x13, 0x8084);
3996	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
3997	rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
3998	rtl_writephy(tp, 0x1f, 0x0000);
3999
4000	/* Enable EEE auto-fallback function */
4001	rtl_writephy(tp, 0x1f, 0x0a4b);
4002	rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000);
4003	rtl_writephy(tp, 0x1f, 0x0000);
4004
4005	/* Enable UC LPF tune function */
4006	rtl_writephy(tp, 0x1f, 0x0a43);
4007	rtl_writephy(tp, 0x13, 0x8012);
4008	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
4009	rtl_writephy(tp, 0x1f, 0x0000);
4010
4011	/* set rg_sel_sdm_rate */
4012	rtl_writephy(tp, 0x1f, 0x0c42);
4013	rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
4014	rtl_writephy(tp, 0x1f, 0x0000);
4015
4016	/* Check ALDPS bit, disable it if enabled */
4017	rtl_writephy(tp, 0x1f, 0x0a43);
4018	if (rtl_readphy(tp, 0x10) & 0x0004)
4019		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
4020
4021	rtl_writephy(tp, 0x1f, 0x0000);
4022}
4023
4024static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp)
4025{
4026	/* patch 10M & ALDPS */
4027	rtl_writephy(tp, 0x1f, 0x0bcc);
4028	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100);
4029	rtl_writephy(tp, 0x1f, 0x0a44);
4030	rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000);
4031	rtl_writephy(tp, 0x1f, 0x0a43);
4032	rtl_writephy(tp, 0x13, 0x8084);
4033	rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000);
4034	rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000);
4035	rtl_writephy(tp, 0x1f, 0x0000);
4036
4037	/* Enable UC LPF tune function */
4038	rtl_writephy(tp, 0x1f, 0x0a43);
4039	rtl_writephy(tp, 0x13, 0x8012);
4040	rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000);
4041	rtl_writephy(tp, 0x1f, 0x0000);
4042
4043	/* Set rg_sel_sdm_rate */
4044	rtl_writephy(tp, 0x1f, 0x0c42);
4045	rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000);
4046	rtl_writephy(tp, 0x1f, 0x0000);
4047
4048	/* Channel estimation parameters */
4049	rtl_writephy(tp, 0x1f, 0x0a43);
4050	rtl_writephy(tp, 0x13, 0x80f3);
4051	rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff);
4052	rtl_writephy(tp, 0x13, 0x80f0);
4053	rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff);
4054	rtl_writephy(tp, 0x13, 0x80ef);
4055	rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff);
4056	rtl_writephy(tp, 0x13, 0x80f6);
4057	rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff);
4058	rtl_writephy(tp, 0x13, 0x80ec);
4059	rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff);
4060	rtl_writephy(tp, 0x13, 0x80ed);
4061	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
4062	rtl_writephy(tp, 0x13, 0x80f2);
4063	rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff);
4064	rtl_writephy(tp, 0x13, 0x80f4);
4065	rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff);
4066	rtl_writephy(tp, 0x1f, 0x0a43);
4067	rtl_writephy(tp, 0x13, 0x8110);
4068	rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff);
4069	rtl_writephy(tp, 0x13, 0x810f);
4070	rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff);
4071	rtl_writephy(tp, 0x13, 0x8111);
4072	rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff);
4073	rtl_writephy(tp, 0x13, 0x8113);
4074	rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff);
4075	rtl_writephy(tp, 0x13, 0x8115);
4076	rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff);
4077	rtl_writephy(tp, 0x13, 0x810e);
4078	rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff);
4079	rtl_writephy(tp, 0x13, 0x810c);
4080	rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff);
4081	rtl_writephy(tp, 0x13, 0x810b);
4082	rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff);
4083	rtl_writephy(tp, 0x1f, 0x0a43);
4084	rtl_writephy(tp, 0x13, 0x80d1);
4085	rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff);
4086	rtl_writephy(tp, 0x13, 0x80cd);
4087	rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff);
4088	rtl_writephy(tp, 0x13, 0x80d3);
4089	rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff);
4090	rtl_writephy(tp, 0x13, 0x80d5);
4091	rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff);
4092	rtl_writephy(tp, 0x13, 0x80d7);
4093	rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff);
4094
4095	/* Force PWM-mode */
4096	rtl_writephy(tp, 0x1f, 0x0bcd);
4097	rtl_writephy(tp, 0x14, 0x5065);
4098	rtl_writephy(tp, 0x14, 0xd065);
4099	rtl_writephy(tp, 0x1f, 0x0bc8);
4100	rtl_writephy(tp, 0x12, 0x00ed);
4101	rtl_writephy(tp, 0x1f, 0x0bcd);
4102	rtl_writephy(tp, 0x14, 0x1065);
4103	rtl_writephy(tp, 0x14, 0x9065);
4104	rtl_writephy(tp, 0x14, 0x1065);
4105	rtl_writephy(tp, 0x1f, 0x0000);
4106
4107	/* Check ALDPS bit, disable it if enabled */
4108	rtl_writephy(tp, 0x1f, 0x0a43);
4109	if (rtl_readphy(tp, 0x10) & 0x0004)
4110		rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004);
4111
4112	rtl_writephy(tp, 0x1f, 0x0000);
4113}
4114
4115static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
4116{
4117	static const struct phy_reg phy_reg_init[] = {
4118		{ 0x1f, 0x0003 },
4119		{ 0x08, 0x441d },
4120		{ 0x01, 0x9100 },
4121		{ 0x1f, 0x0000 }
4122	};
4123
4124	rtl_writephy(tp, 0x1f, 0x0000);
4125	rtl_patchphy(tp, 0x11, 1 << 12);
4126	rtl_patchphy(tp, 0x19, 1 << 13);
4127	rtl_patchphy(tp, 0x10, 1 << 15);
4128
4129	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
4130}
4131
4132static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
4133{
4134	static const struct phy_reg phy_reg_init[] = {
4135		{ 0x1f, 0x0005 },
4136		{ 0x1a, 0x0000 },
4137		{ 0x1f, 0x0000 },
4138
4139		{ 0x1f, 0x0004 },
4140		{ 0x1c, 0x0000 },
4141		{ 0x1f, 0x0000 },
4142
4143		{ 0x1f, 0x0001 },
4144		{ 0x15, 0x7701 },
4145		{ 0x1f, 0x0000 }
4146	};
4147
4148	/* Disable ALDPS before ram code */
4149	rtl_writephy(tp, 0x1f, 0x0000);
4150	rtl_writephy(tp, 0x18, 0x0310);
4151	msleep(100);
4152
4153	rtl_apply_firmware(tp);
4154
4155	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
4156}
4157
4158static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
4159{
4160	/* Disable ALDPS before setting firmware */
4161	rtl_writephy(tp, 0x1f, 0x0000);
4162	rtl_writephy(tp, 0x18, 0x0310);
4163	msleep(20);
4164
4165	rtl_apply_firmware(tp);
4166
4167	/* EEE setting */
4168	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4169	rtl_writephy(tp, 0x1f, 0x0004);
4170	rtl_writephy(tp, 0x10, 0x401f);
4171	rtl_writephy(tp, 0x19, 0x7030);
4172	rtl_writephy(tp, 0x1f, 0x0000);
4173}
4174
4175static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
4176{
4177	static const struct phy_reg phy_reg_init[] = {
4178		{ 0x1f, 0x0004 },
4179		{ 0x10, 0xc07f },
4180		{ 0x19, 0x7030 },
4181		{ 0x1f, 0x0000 }
4182	};
4183
4184	/* Disable ALDPS before ram code */
4185	rtl_writephy(tp, 0x1f, 0x0000);
4186	rtl_writephy(tp, 0x18, 0x0310);
4187	msleep(100);
4188
4189	rtl_apply_firmware(tp);
4190
4191	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4192	rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
4193
4194	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
4195}
4196
4197static void rtl_hw_phy_config(struct net_device *dev)
4198{
4199	struct rtl8169_private *tp = netdev_priv(dev);
4200
4201	rtl8169_print_mac_version(tp);
4202
4203	switch (tp->mac_version) {
4204	case RTL_GIGA_MAC_VER_01:
4205		break;
4206	case RTL_GIGA_MAC_VER_02:
4207	case RTL_GIGA_MAC_VER_03:
4208		rtl8169s_hw_phy_config(tp);
4209		break;
4210	case RTL_GIGA_MAC_VER_04:
4211		rtl8169sb_hw_phy_config(tp);
4212		break;
4213	case RTL_GIGA_MAC_VER_05:
4214		rtl8169scd_hw_phy_config(tp);
4215		break;
4216	case RTL_GIGA_MAC_VER_06:
4217		rtl8169sce_hw_phy_config(tp);
4218		break;
4219	case RTL_GIGA_MAC_VER_07:
4220	case RTL_GIGA_MAC_VER_08:
4221	case RTL_GIGA_MAC_VER_09:
4222		rtl8102e_hw_phy_config(tp);
4223		break;
4224	case RTL_GIGA_MAC_VER_11:
4225		rtl8168bb_hw_phy_config(tp);
4226		break;
4227	case RTL_GIGA_MAC_VER_12:
4228		rtl8168bef_hw_phy_config(tp);
4229		break;
4230	case RTL_GIGA_MAC_VER_17:
4231		rtl8168bef_hw_phy_config(tp);
4232		break;
4233	case RTL_GIGA_MAC_VER_18:
4234		rtl8168cp_1_hw_phy_config(tp);
4235		break;
4236	case RTL_GIGA_MAC_VER_19:
4237		rtl8168c_1_hw_phy_config(tp);
4238		break;
4239	case RTL_GIGA_MAC_VER_20:
4240		rtl8168c_2_hw_phy_config(tp);
4241		break;
4242	case RTL_GIGA_MAC_VER_21:
4243		rtl8168c_3_hw_phy_config(tp);
4244		break;
4245	case RTL_GIGA_MAC_VER_22:
4246		rtl8168c_4_hw_phy_config(tp);
4247		break;
4248	case RTL_GIGA_MAC_VER_23:
4249	case RTL_GIGA_MAC_VER_24:
4250		rtl8168cp_2_hw_phy_config(tp);
4251		break;
4252	case RTL_GIGA_MAC_VER_25:
4253		rtl8168d_1_hw_phy_config(tp);
4254		break;
4255	case RTL_GIGA_MAC_VER_26:
4256		rtl8168d_2_hw_phy_config(tp);
4257		break;
4258	case RTL_GIGA_MAC_VER_27:
4259		rtl8168d_3_hw_phy_config(tp);
4260		break;
4261	case RTL_GIGA_MAC_VER_28:
4262		rtl8168d_4_hw_phy_config(tp);
4263		break;
4264	case RTL_GIGA_MAC_VER_29:
4265	case RTL_GIGA_MAC_VER_30:
4266		rtl8105e_hw_phy_config(tp);
4267		break;
4268	case RTL_GIGA_MAC_VER_31:
4269		/* None. */
4270		break;
4271	case RTL_GIGA_MAC_VER_32:
4272	case RTL_GIGA_MAC_VER_33:
4273		rtl8168e_1_hw_phy_config(tp);
4274		break;
4275	case RTL_GIGA_MAC_VER_34:
4276		rtl8168e_2_hw_phy_config(tp);
4277		break;
4278	case RTL_GIGA_MAC_VER_35:
4279		rtl8168f_1_hw_phy_config(tp);
4280		break;
4281	case RTL_GIGA_MAC_VER_36:
4282		rtl8168f_2_hw_phy_config(tp);
4283		break;
4284
4285	case RTL_GIGA_MAC_VER_37:
4286		rtl8402_hw_phy_config(tp);
4287		break;
4288
4289	case RTL_GIGA_MAC_VER_38:
4290		rtl8411_hw_phy_config(tp);
4291		break;
4292
4293	case RTL_GIGA_MAC_VER_39:
4294		rtl8106e_hw_phy_config(tp);
4295		break;
4296
4297	case RTL_GIGA_MAC_VER_40:
4298		rtl8168g_1_hw_phy_config(tp);
4299		break;
4300	case RTL_GIGA_MAC_VER_42:
4301	case RTL_GIGA_MAC_VER_43:
4302	case RTL_GIGA_MAC_VER_44:
4303		rtl8168g_2_hw_phy_config(tp);
4304		break;
4305	case RTL_GIGA_MAC_VER_45:
4306	case RTL_GIGA_MAC_VER_47:
4307		rtl8168h_1_hw_phy_config(tp);
4308		break;
4309	case RTL_GIGA_MAC_VER_46:
4310	case RTL_GIGA_MAC_VER_48:
4311		rtl8168h_2_hw_phy_config(tp);
4312		break;
4313
4314	case RTL_GIGA_MAC_VER_49:
4315		rtl8168ep_1_hw_phy_config(tp);
4316		break;
4317	case RTL_GIGA_MAC_VER_50:
4318	case RTL_GIGA_MAC_VER_51:
4319		rtl8168ep_2_hw_phy_config(tp);
4320		break;
4321
4322	case RTL_GIGA_MAC_VER_41:
4323	default:
4324		break;
4325	}
4326}
4327
4328static void rtl_phy_work(struct rtl8169_private *tp)
4329{
4330	struct timer_list *timer = &tp->timer;
4331	void __iomem *ioaddr = tp->mmio_addr;
4332	unsigned long timeout = RTL8169_PHY_TIMEOUT;
4333
4334	assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
4335
4336	if (tp->phy_reset_pending(tp)) {
4337		/*
4338		 * A busy loop could burn quite a few cycles on nowadays CPU.
4339		 * Let's delay the execution of the timer for a few ticks.
4340		 */
4341		timeout = HZ/10;
4342		goto out_mod_timer;
4343	}
4344
4345	if (tp->link_ok(ioaddr))
4346		return;
4347
4348	netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
4349
4350	tp->phy_reset_enable(tp);
4351
4352out_mod_timer:
4353	mod_timer(timer, jiffies + timeout);
4354}
4355
4356static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
4357{
4358	if (!test_and_set_bit(flag, tp->wk.flags))
4359		schedule_work(&tp->wk.work);
4360}
4361
4362static void rtl8169_phy_timer(unsigned long __opaque)
4363{
4364	struct net_device *dev = (struct net_device *)__opaque;
4365	struct rtl8169_private *tp = netdev_priv(dev);
4366
4367	rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
4368}
4369
4370static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
4371				  void __iomem *ioaddr)
4372{
4373	iounmap(ioaddr);
4374	pci_release_regions(pdev);
4375	pci_clear_mwi(pdev);
4376	pci_disable_device(pdev);
4377	free_netdev(dev);
4378}
4379
4380DECLARE_RTL_COND(rtl_phy_reset_cond)
4381{
4382	return tp->phy_reset_pending(tp);
4383}
4384
4385static void rtl8169_phy_reset(struct net_device *dev,
4386			      struct rtl8169_private *tp)
4387{
4388	tp->phy_reset_enable(tp);
4389	rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
4390}
4391
4392static bool rtl_tbi_enabled(struct rtl8169_private *tp)
4393{
4394	void __iomem *ioaddr = tp->mmio_addr;
4395
4396	return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
4397	    (RTL_R8(PHYstatus) & TBI_Enable);
4398}
4399
4400static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
4401{
4402	void __iomem *ioaddr = tp->mmio_addr;
4403
4404	rtl_hw_phy_config(dev);
4405
4406	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
4407		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4408		RTL_W8(0x82, 0x01);
4409	}
4410
4411	pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
4412
4413	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
4414		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
4415
4416	if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
4417		dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
4418		RTL_W8(0x82, 0x01);
4419		dprintk("Set PHY Reg 0x0bh = 0x00h\n");
4420		rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
4421	}
4422
4423	rtl8169_phy_reset(dev, tp);
4424
4425	rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
4426			  ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4427			  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4428			  (tp->mii.supports_gmii ?
4429			   ADVERTISED_1000baseT_Half |
4430			   ADVERTISED_1000baseT_Full : 0));
4431
4432	if (rtl_tbi_enabled(tp))
4433		netif_info(tp, link, dev, "TBI auto-negotiating\n");
4434}
4435
4436static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
4437{
4438	void __iomem *ioaddr = tp->mmio_addr;
4439
4440	rtl_lock_work(tp);
4441
4442	RTL_W8(Cfg9346, Cfg9346_Unlock);
4443
4444	RTL_W32(MAC4, addr[4] | addr[5] << 8);
4445	RTL_R32(MAC4);
4446
4447	RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
4448	RTL_R32(MAC0);
4449
4450	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
4451		rtl_rar_exgmac_set(tp, addr);
4452
4453	RTL_W8(Cfg9346, Cfg9346_Lock);
4454
4455	rtl_unlock_work(tp);
4456}
4457
4458static int rtl_set_mac_address(struct net_device *dev, void *p)
4459{
4460	struct rtl8169_private *tp = netdev_priv(dev);
4461	struct sockaddr *addr = p;
4462
4463	if (!is_valid_ether_addr(addr->sa_data))
4464		return -EADDRNOTAVAIL;
4465
4466	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4467
4468	rtl_rar_set(tp, dev->dev_addr);
4469
4470	return 0;
4471}
4472
4473static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4474{
4475	struct rtl8169_private *tp = netdev_priv(dev);
4476	struct mii_ioctl_data *data = if_mii(ifr);
4477
4478	return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
4479}
4480
4481static int rtl_xmii_ioctl(struct rtl8169_private *tp,
4482			  struct mii_ioctl_data *data, int cmd)
4483{
4484	switch (cmd) {
4485	case SIOCGMIIPHY:
4486		data->phy_id = 32; /* Internal PHY */
4487		return 0;
4488
4489	case SIOCGMIIREG:
4490		data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
4491		return 0;
4492
4493	case SIOCSMIIREG:
4494		rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
4495		return 0;
4496	}
4497	return -EOPNOTSUPP;
4498}
4499
4500static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
4501{
4502	return -EOPNOTSUPP;
4503}
4504
4505static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
4506{
4507	if (tp->features & RTL_FEATURE_MSI) {
4508		pci_disable_msi(pdev);
4509		tp->features &= ~RTL_FEATURE_MSI;
4510	}
4511}
4512
4513static void rtl_init_mdio_ops(struct rtl8169_private *tp)
4514{
4515	struct mdio_ops *ops = &tp->mdio_ops;
4516
4517	switch (tp->mac_version) {
4518	case RTL_GIGA_MAC_VER_27:
4519		ops->write	= r8168dp_1_mdio_write;
4520		ops->read	= r8168dp_1_mdio_read;
4521		break;
4522	case RTL_GIGA_MAC_VER_28:
4523	case RTL_GIGA_MAC_VER_31:
4524		ops->write	= r8168dp_2_mdio_write;
4525		ops->read	= r8168dp_2_mdio_read;
4526		break;
4527	case RTL_GIGA_MAC_VER_40:
4528	case RTL_GIGA_MAC_VER_41:
4529	case RTL_GIGA_MAC_VER_42:
4530	case RTL_GIGA_MAC_VER_43:
4531	case RTL_GIGA_MAC_VER_44:
4532	case RTL_GIGA_MAC_VER_45:
4533	case RTL_GIGA_MAC_VER_46:
4534	case RTL_GIGA_MAC_VER_47:
4535	case RTL_GIGA_MAC_VER_48:
4536	case RTL_GIGA_MAC_VER_49:
4537	case RTL_GIGA_MAC_VER_50:
4538	case RTL_GIGA_MAC_VER_51:
4539		ops->write	= r8168g_mdio_write;
4540		ops->read	= r8168g_mdio_read;
4541		break;
4542	default:
4543		ops->write	= r8169_mdio_write;
4544		ops->read	= r8169_mdio_read;
4545		break;
4546	}
4547}
4548
4549static void rtl_speed_down(struct rtl8169_private *tp)
4550{
4551	u32 adv;
4552	int lpa;
4553
4554	rtl_writephy(tp, 0x1f, 0x0000);
4555	lpa = rtl_readphy(tp, MII_LPA);
4556
4557	if (lpa & (LPA_10HALF | LPA_10FULL))
4558		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
4559	else if (lpa & (LPA_100HALF | LPA_100FULL))
4560		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4561		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4562	else
4563		adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4564		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
4565		      (tp->mii.supports_gmii ?
4566		       ADVERTISED_1000baseT_Half |
4567		       ADVERTISED_1000baseT_Full : 0);
4568
4569	rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
4570			  adv);
4571}
4572
4573static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
4574{
4575	void __iomem *ioaddr = tp->mmio_addr;
4576
4577	switch (tp->mac_version) {
4578	case RTL_GIGA_MAC_VER_25:
4579	case RTL_GIGA_MAC_VER_26:
4580	case RTL_GIGA_MAC_VER_29:
4581	case RTL_GIGA_MAC_VER_30:
4582	case RTL_GIGA_MAC_VER_32:
4583	case RTL_GIGA_MAC_VER_33:
4584	case RTL_GIGA_MAC_VER_34:
4585	case RTL_GIGA_MAC_VER_37:
4586	case RTL_GIGA_MAC_VER_38:
4587	case RTL_GIGA_MAC_VER_39:
4588	case RTL_GIGA_MAC_VER_40:
4589	case RTL_GIGA_MAC_VER_41:
4590	case RTL_GIGA_MAC_VER_42:
4591	case RTL_GIGA_MAC_VER_43:
4592	case RTL_GIGA_MAC_VER_44:
4593	case RTL_GIGA_MAC_VER_45:
4594	case RTL_GIGA_MAC_VER_46:
4595	case RTL_GIGA_MAC_VER_47:
4596	case RTL_GIGA_MAC_VER_48:
4597	case RTL_GIGA_MAC_VER_49:
4598	case RTL_GIGA_MAC_VER_50:
4599	case RTL_GIGA_MAC_VER_51:
4600		RTL_W32(RxConfig, RTL_R32(RxConfig) |
4601			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
4602		break;
4603	default:
4604		break;
4605	}
4606}
4607
4608static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
4609{
4610	if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
4611		return false;
4612
4613	rtl_speed_down(tp);
4614	rtl_wol_suspend_quirk(tp);
4615
4616	return true;
4617}
4618
4619static void r810x_phy_power_down(struct rtl8169_private *tp)
4620{
4621	rtl_writephy(tp, 0x1f, 0x0000);
4622	rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4623}
4624
4625static void r810x_phy_power_up(struct rtl8169_private *tp)
4626{
4627	rtl_writephy(tp, 0x1f, 0x0000);
4628	rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4629}
4630
4631static void r810x_pll_power_down(struct rtl8169_private *tp)
4632{
4633	void __iomem *ioaddr = tp->mmio_addr;
4634
4635	if (rtl_wol_pll_power_down(tp))
4636		return;
4637
4638	r810x_phy_power_down(tp);
4639
4640	switch (tp->mac_version) {
4641	case RTL_GIGA_MAC_VER_07:
4642	case RTL_GIGA_MAC_VER_08:
4643	case RTL_GIGA_MAC_VER_09:
4644	case RTL_GIGA_MAC_VER_10:
4645	case RTL_GIGA_MAC_VER_13:
4646	case RTL_GIGA_MAC_VER_16:
4647		break;
4648	default:
4649		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4650		break;
4651	}
4652}
4653
4654static void r810x_pll_power_up(struct rtl8169_private *tp)
4655{
4656	void __iomem *ioaddr = tp->mmio_addr;
4657
4658	r810x_phy_power_up(tp);
4659
4660	switch (tp->mac_version) {
4661	case RTL_GIGA_MAC_VER_07:
4662	case RTL_GIGA_MAC_VER_08:
4663	case RTL_GIGA_MAC_VER_09:
4664	case RTL_GIGA_MAC_VER_10:
4665	case RTL_GIGA_MAC_VER_13:
4666	case RTL_GIGA_MAC_VER_16:
4667		break;
4668	case RTL_GIGA_MAC_VER_47:
4669	case RTL_GIGA_MAC_VER_48:
4670		RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4671		break;
4672	default:
4673		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4674		break;
4675	}
4676}
4677
4678static void r8168_phy_power_up(struct rtl8169_private *tp)
4679{
4680	rtl_writephy(tp, 0x1f, 0x0000);
4681	switch (tp->mac_version) {
4682	case RTL_GIGA_MAC_VER_11:
4683	case RTL_GIGA_MAC_VER_12:
4684	case RTL_GIGA_MAC_VER_17:
4685	case RTL_GIGA_MAC_VER_18:
4686	case RTL_GIGA_MAC_VER_19:
4687	case RTL_GIGA_MAC_VER_20:
4688	case RTL_GIGA_MAC_VER_21:
4689	case RTL_GIGA_MAC_VER_22:
4690	case RTL_GIGA_MAC_VER_23:
4691	case RTL_GIGA_MAC_VER_24:
4692	case RTL_GIGA_MAC_VER_25:
4693	case RTL_GIGA_MAC_VER_26:
4694	case RTL_GIGA_MAC_VER_27:
4695	case RTL_GIGA_MAC_VER_28:
4696	case RTL_GIGA_MAC_VER_31:
4697		rtl_writephy(tp, 0x0e, 0x0000);
4698		break;
4699	default:
4700		break;
4701	}
4702	rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
4703}
4704
4705static void r8168_phy_power_down(struct rtl8169_private *tp)
4706{
4707	rtl_writephy(tp, 0x1f, 0x0000);
4708	switch (tp->mac_version) {
4709	case RTL_GIGA_MAC_VER_32:
4710	case RTL_GIGA_MAC_VER_33:
4711	case RTL_GIGA_MAC_VER_40:
4712	case RTL_GIGA_MAC_VER_41:
4713		rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
4714		break;
4715
4716	case RTL_GIGA_MAC_VER_11:
4717	case RTL_GIGA_MAC_VER_12:
4718	case RTL_GIGA_MAC_VER_17:
4719	case RTL_GIGA_MAC_VER_18:
4720	case RTL_GIGA_MAC_VER_19:
4721	case RTL_GIGA_MAC_VER_20:
4722	case RTL_GIGA_MAC_VER_21:
4723	case RTL_GIGA_MAC_VER_22:
4724	case RTL_GIGA_MAC_VER_23:
4725	case RTL_GIGA_MAC_VER_24:
4726	case RTL_GIGA_MAC_VER_25:
4727	case RTL_GIGA_MAC_VER_26:
4728	case RTL_GIGA_MAC_VER_27:
4729	case RTL_GIGA_MAC_VER_28:
4730	case RTL_GIGA_MAC_VER_31:
4731		rtl_writephy(tp, 0x0e, 0x0200);
4732	default:
4733		rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
4734		break;
4735	}
4736}
4737
4738static void r8168_pll_power_down(struct rtl8169_private *tp)
4739{
4740	void __iomem *ioaddr = tp->mmio_addr;
4741
4742	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4743	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4744	     tp->mac_version == RTL_GIGA_MAC_VER_31 ||
4745	     tp->mac_version == RTL_GIGA_MAC_VER_49 ||
4746	     tp->mac_version == RTL_GIGA_MAC_VER_50 ||
4747	     tp->mac_version == RTL_GIGA_MAC_VER_51) &&
4748	    r8168_check_dash(tp)) {
4749		return;
4750	}
4751
4752	if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4753	     tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4754	    (RTL_R16(CPlusCmd) & ASF)) {
4755		return;
4756	}
4757
4758	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4759	    tp->mac_version == RTL_GIGA_MAC_VER_33)
4760		rtl_ephy_write(tp, 0x19, 0xff64);
4761
4762	if (rtl_wol_pll_power_down(tp))
4763		return;
4764
4765	r8168_phy_power_down(tp);
4766
4767	switch (tp->mac_version) {
4768	case RTL_GIGA_MAC_VER_25:
4769	case RTL_GIGA_MAC_VER_26:
4770	case RTL_GIGA_MAC_VER_27:
4771	case RTL_GIGA_MAC_VER_28:
4772	case RTL_GIGA_MAC_VER_31:
4773	case RTL_GIGA_MAC_VER_32:
4774	case RTL_GIGA_MAC_VER_33:
4775	case RTL_GIGA_MAC_VER_44:
4776	case RTL_GIGA_MAC_VER_45:
4777	case RTL_GIGA_MAC_VER_46:
4778	case RTL_GIGA_MAC_VER_50:
4779	case RTL_GIGA_MAC_VER_51:
4780		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4781		break;
4782	case RTL_GIGA_MAC_VER_40:
4783	case RTL_GIGA_MAC_VER_41:
4784	case RTL_GIGA_MAC_VER_49:
4785		rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
4786			     0xfc000000, ERIAR_EXGMAC);
4787		RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4788		break;
4789	}
4790}
4791
4792static void r8168_pll_power_up(struct rtl8169_private *tp)
4793{
4794	void __iomem *ioaddr = tp->mmio_addr;
4795
4796	switch (tp->mac_version) {
4797	case RTL_GIGA_MAC_VER_25:
4798	case RTL_GIGA_MAC_VER_26:
4799	case RTL_GIGA_MAC_VER_27:
4800	case RTL_GIGA_MAC_VER_28:
4801	case RTL_GIGA_MAC_VER_31:
4802	case RTL_GIGA_MAC_VER_32:
4803	case RTL_GIGA_MAC_VER_33:
4804		RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4805		break;
4806	case RTL_GIGA_MAC_VER_44:
4807	case RTL_GIGA_MAC_VER_45:
4808	case RTL_GIGA_MAC_VER_46:
4809	case RTL_GIGA_MAC_VER_50:
4810	case RTL_GIGA_MAC_VER_51:
4811		RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4812		break;
4813	case RTL_GIGA_MAC_VER_40:
4814	case RTL_GIGA_MAC_VER_41:
4815	case RTL_GIGA_MAC_VER_49:
4816		RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
4817		rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
4818			     0x00000000, ERIAR_EXGMAC);
4819		break;
4820	}
4821
4822	r8168_phy_power_up(tp);
4823}
4824
4825static void rtl_generic_op(struct rtl8169_private *tp,
4826			   void (*op)(struct rtl8169_private *))
4827{
4828	if (op)
4829		op(tp);
4830}
4831
4832static void rtl_pll_power_down(struct rtl8169_private *tp)
4833{
4834	rtl_generic_op(tp, tp->pll_power_ops.down);
4835}
4836
4837static void rtl_pll_power_up(struct rtl8169_private *tp)
4838{
4839	rtl_generic_op(tp, tp->pll_power_ops.up);
4840}
4841
4842static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4843{
4844	struct pll_power_ops *ops = &tp->pll_power_ops;
4845
4846	switch (tp->mac_version) {
4847	case RTL_GIGA_MAC_VER_07:
4848	case RTL_GIGA_MAC_VER_08:
4849	case RTL_GIGA_MAC_VER_09:
4850	case RTL_GIGA_MAC_VER_10:
4851	case RTL_GIGA_MAC_VER_16:
4852	case RTL_GIGA_MAC_VER_29:
4853	case RTL_GIGA_MAC_VER_30:
4854	case RTL_GIGA_MAC_VER_37:
4855	case RTL_GIGA_MAC_VER_39:
4856	case RTL_GIGA_MAC_VER_43:
4857	case RTL_GIGA_MAC_VER_47:
4858	case RTL_GIGA_MAC_VER_48:
4859		ops->down	= r810x_pll_power_down;
4860		ops->up		= r810x_pll_power_up;
4861		break;
4862
4863	case RTL_GIGA_MAC_VER_11:
4864	case RTL_GIGA_MAC_VER_12:
4865	case RTL_GIGA_MAC_VER_17:
4866	case RTL_GIGA_MAC_VER_18:
4867	case RTL_GIGA_MAC_VER_19:
4868	case RTL_GIGA_MAC_VER_20:
4869	case RTL_GIGA_MAC_VER_21:
4870	case RTL_GIGA_MAC_VER_22:
4871	case RTL_GIGA_MAC_VER_23:
4872	case RTL_GIGA_MAC_VER_24:
4873	case RTL_GIGA_MAC_VER_25:
4874	case RTL_GIGA_MAC_VER_26:
4875	case RTL_GIGA_MAC_VER_27:
4876	case RTL_GIGA_MAC_VER_28:
4877	case RTL_GIGA_MAC_VER_31:
4878	case RTL_GIGA_MAC_VER_32:
4879	case RTL_GIGA_MAC_VER_33:
4880	case RTL_GIGA_MAC_VER_34:
4881	case RTL_GIGA_MAC_VER_35:
4882	case RTL_GIGA_MAC_VER_36:
4883	case RTL_GIGA_MAC_VER_38:
4884	case RTL_GIGA_MAC_VER_40:
4885	case RTL_GIGA_MAC_VER_41:
4886	case RTL_GIGA_MAC_VER_42:
4887	case RTL_GIGA_MAC_VER_44:
4888	case RTL_GIGA_MAC_VER_45:
4889	case RTL_GIGA_MAC_VER_46:
4890	case RTL_GIGA_MAC_VER_49:
4891	case RTL_GIGA_MAC_VER_50:
4892	case RTL_GIGA_MAC_VER_51:
4893		ops->down	= r8168_pll_power_down;
4894		ops->up		= r8168_pll_power_up;
4895		break;
4896
4897	default:
4898		ops->down	= NULL;
4899		ops->up		= NULL;
4900		break;
4901	}
4902}
4903
4904static void rtl_init_rxcfg(struct rtl8169_private *tp)
4905{
4906	void __iomem *ioaddr = tp->mmio_addr;
4907
4908	switch (tp->mac_version) {
4909	case RTL_GIGA_MAC_VER_01:
4910	case RTL_GIGA_MAC_VER_02:
4911	case RTL_GIGA_MAC_VER_03:
4912	case RTL_GIGA_MAC_VER_04:
4913	case RTL_GIGA_MAC_VER_05:
4914	case RTL_GIGA_MAC_VER_06:
4915	case RTL_GIGA_MAC_VER_10:
4916	case RTL_GIGA_MAC_VER_11:
4917	case RTL_GIGA_MAC_VER_12:
4918	case RTL_GIGA_MAC_VER_13:
4919	case RTL_GIGA_MAC_VER_14:
4920	case RTL_GIGA_MAC_VER_15:
4921	case RTL_GIGA_MAC_VER_16:
4922	case RTL_GIGA_MAC_VER_17:
4923		RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4924		break;
4925	case RTL_GIGA_MAC_VER_18:
4926	case RTL_GIGA_MAC_VER_19:
4927	case RTL_GIGA_MAC_VER_20:
4928	case RTL_GIGA_MAC_VER_21:
4929	case RTL_GIGA_MAC_VER_22:
4930	case RTL_GIGA_MAC_VER_23:
4931	case RTL_GIGA_MAC_VER_24:
4932	case RTL_GIGA_MAC_VER_34:
4933	case RTL_GIGA_MAC_VER_35:
4934		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4935		break;
4936	case RTL_GIGA_MAC_VER_40:
4937	case RTL_GIGA_MAC_VER_41:
4938	case RTL_GIGA_MAC_VER_42:
4939	case RTL_GIGA_MAC_VER_43:
4940	case RTL_GIGA_MAC_VER_44:
4941	case RTL_GIGA_MAC_VER_45:
4942	case RTL_GIGA_MAC_VER_46:
4943	case RTL_GIGA_MAC_VER_47:
4944	case RTL_GIGA_MAC_VER_48:
4945	case RTL_GIGA_MAC_VER_49:
4946	case RTL_GIGA_MAC_VER_50:
4947	case RTL_GIGA_MAC_VER_51:
4948		RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4949		break;
4950	default:
4951		RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4952		break;
4953	}
4954}
4955
4956static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4957{
4958	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4959}
4960
4961static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4962{
4963	void __iomem *ioaddr = tp->mmio_addr;
4964
4965	RTL_W8(Cfg9346, Cfg9346_Unlock);
4966	rtl_generic_op(tp, tp->jumbo_ops.enable);
4967	RTL_W8(Cfg9346, Cfg9346_Lock);
4968}
4969
4970static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4971{
4972	void __iomem *ioaddr = tp->mmio_addr;
4973
4974	RTL_W8(Cfg9346, Cfg9346_Unlock);
4975	rtl_generic_op(tp, tp->jumbo_ops.disable);
4976	RTL_W8(Cfg9346, Cfg9346_Lock);
4977}
4978
4979static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4980{
4981	void __iomem *ioaddr = tp->mmio_addr;
4982
4983	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4984	RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4985	rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
4986}
4987
4988static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4989{
4990	void __iomem *ioaddr = tp->mmio_addr;
4991
4992	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4993	RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4994	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4995}
4996
4997static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4998{
4999	void __iomem *ioaddr = tp->mmio_addr;
5000
5001	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
5002}
5003
5004static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
5005{
5006	void __iomem *ioaddr = tp->mmio_addr;
5007
5008	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
5009}
5010
5011static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
5012{
5013	void __iomem *ioaddr = tp->mmio_addr;
5014
5015	RTL_W8(MaxTxPacketSize, 0x3f);
5016	RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
5017	RTL_W8(Config4, RTL_R8(Config4) | 0x01);
5018	rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
5019}
5020
5021static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
5022{
5023	void __iomem *ioaddr = tp->mmio_addr;
5024
5025	RTL_W8(MaxTxPacketSize, 0x0c);
5026	RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
5027	RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
5028	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5029}
5030
5031static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
5032{
5033	rtl_tx_performance_tweak(tp->pci_dev,
5034		PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN);
5035}
5036
5037static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
5038{
5039	rtl_tx_performance_tweak(tp->pci_dev,
5040		(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
5041}
5042
5043static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
5044{
5045	void __iomem *ioaddr = tp->mmio_addr;
5046
5047	r8168b_0_hw_jumbo_enable(tp);
5048
5049	RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
5050}
5051
5052static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
5053{
5054	void __iomem *ioaddr = tp->mmio_addr;
5055
5056	r8168b_0_hw_jumbo_disable(tp);
5057
5058	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
5059}
5060
5061static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
5062{
5063	struct jumbo_ops *ops = &tp->jumbo_ops;
5064
5065	switch (tp->mac_version) {
5066	case RTL_GIGA_MAC_VER_11:
5067		ops->disable	= r8168b_0_hw_jumbo_disable;
5068		ops->enable	= r8168b_0_hw_jumbo_enable;
5069		break;
5070	case RTL_GIGA_MAC_VER_12:
5071	case RTL_GIGA_MAC_VER_17:
5072		ops->disable	= r8168b_1_hw_jumbo_disable;
5073		ops->enable	= r8168b_1_hw_jumbo_enable;
5074		break;
5075	case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
5076	case RTL_GIGA_MAC_VER_19:
5077	case RTL_GIGA_MAC_VER_20:
5078	case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
5079	case RTL_GIGA_MAC_VER_22:
5080	case RTL_GIGA_MAC_VER_23:
5081	case RTL_GIGA_MAC_VER_24:
5082	case RTL_GIGA_MAC_VER_25:
5083	case RTL_GIGA_MAC_VER_26:
5084		ops->disable	= r8168c_hw_jumbo_disable;
5085		ops->enable	= r8168c_hw_jumbo_enable;
5086		break;
5087	case RTL_GIGA_MAC_VER_27:
5088	case RTL_GIGA_MAC_VER_28:
5089		ops->disable	= r8168dp_hw_jumbo_disable;
5090		ops->enable	= r8168dp_hw_jumbo_enable;
5091		break;
5092	case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
5093	case RTL_GIGA_MAC_VER_32:
5094	case RTL_GIGA_MAC_VER_33:
5095	case RTL_GIGA_MAC_VER_34:
5096		ops->disable	= r8168e_hw_jumbo_disable;
5097		ops->enable	= r8168e_hw_jumbo_enable;
5098		break;
5099
5100	/*
5101	 * No action needed for jumbo frames with 8169.
5102	 * No jumbo for 810x at all.
5103	 */
5104	case RTL_GIGA_MAC_VER_40:
5105	case RTL_GIGA_MAC_VER_41:
5106	case RTL_GIGA_MAC_VER_42:
5107	case RTL_GIGA_MAC_VER_43:
5108	case RTL_GIGA_MAC_VER_44:
5109	case RTL_GIGA_MAC_VER_45:
5110	case RTL_GIGA_MAC_VER_46:
5111	case RTL_GIGA_MAC_VER_47:
5112	case RTL_GIGA_MAC_VER_48:
5113	case RTL_GIGA_MAC_VER_49:
5114	case RTL_GIGA_MAC_VER_50:
5115	case RTL_GIGA_MAC_VER_51:
5116	default:
5117		ops->disable	= NULL;
5118		ops->enable	= NULL;
5119		break;
5120	}
5121}
5122
5123DECLARE_RTL_COND(rtl_chipcmd_cond)
5124{
5125	void __iomem *ioaddr = tp->mmio_addr;
5126
5127	return RTL_R8(ChipCmd) & CmdReset;
5128}
5129
5130static void rtl_hw_reset(struct rtl8169_private *tp)
5131{
5132	void __iomem *ioaddr = tp->mmio_addr;
5133
5134	RTL_W8(ChipCmd, CmdReset);
5135
5136	rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5137}
5138
5139static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
5140{
5141	struct rtl_fw *rtl_fw;
5142	const char *name;
5143	int rc = -ENOMEM;
5144
5145	name = rtl_lookup_firmware_name(tp);
5146	if (!name)
5147		goto out_no_firmware;
5148
5149	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
5150	if (!rtl_fw)
5151		goto err_warn;
5152
5153	rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
5154	if (rc < 0)
5155		goto err_free;
5156
5157	rc = rtl_check_firmware(tp, rtl_fw);
5158	if (rc < 0)
5159		goto err_release_firmware;
5160
5161	tp->rtl_fw = rtl_fw;
5162out:
5163	return;
5164
5165err_release_firmware:
5166	release_firmware(rtl_fw->fw);
5167err_free:
5168	kfree(rtl_fw);
5169err_warn:
5170	netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
5171		   name, rc);
5172out_no_firmware:
5173	tp->rtl_fw = NULL;
5174	goto out;
5175}
5176
5177static void rtl_request_firmware(struct rtl8169_private *tp)
5178{
5179	if (IS_ERR(tp->rtl_fw))
5180		rtl_request_uncached_firmware(tp);
5181}
5182
5183static void rtl_rx_close(struct rtl8169_private *tp)
5184{
5185	void __iomem *ioaddr = tp->mmio_addr;
5186
5187	RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
5188}
5189
5190DECLARE_RTL_COND(rtl_npq_cond)
5191{
5192	void __iomem *ioaddr = tp->mmio_addr;
5193
5194	return RTL_R8(TxPoll) & NPQ;
5195}
5196
5197DECLARE_RTL_COND(rtl_txcfg_empty_cond)
5198{
5199	void __iomem *ioaddr = tp->mmio_addr;
5200
5201	return RTL_R32(TxConfig) & TXCFG_EMPTY;
5202}
5203
5204static void rtl8169_hw_reset(struct rtl8169_private *tp)
5205{
5206	void __iomem *ioaddr = tp->mmio_addr;
5207
5208	/* Disable interrupts */
5209	rtl8169_irq_mask_and_ack(tp);
5210
5211	rtl_rx_close(tp);
5212
5213	if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
5214	    tp->mac_version == RTL_GIGA_MAC_VER_28 ||
5215	    tp->mac_version == RTL_GIGA_MAC_VER_31) {
5216		rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
5217	} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
5218		   tp->mac_version == RTL_GIGA_MAC_VER_35 ||
5219		   tp->mac_version == RTL_GIGA_MAC_VER_36 ||
5220		   tp->mac_version == RTL_GIGA_MAC_VER_37 ||
5221		   tp->mac_version == RTL_GIGA_MAC_VER_38 ||
5222		   tp->mac_version == RTL_GIGA_MAC_VER_40 ||
5223		   tp->mac_version == RTL_GIGA_MAC_VER_41 ||
5224		   tp->mac_version == RTL_GIGA_MAC_VER_42 ||
5225		   tp->mac_version == RTL_GIGA_MAC_VER_43 ||
5226		   tp->mac_version == RTL_GIGA_MAC_VER_44 ||
5227		   tp->mac_version == RTL_GIGA_MAC_VER_45 ||
5228		   tp->mac_version == RTL_GIGA_MAC_VER_46 ||
5229		   tp->mac_version == RTL_GIGA_MAC_VER_47 ||
5230		   tp->mac_version == RTL_GIGA_MAC_VER_48 ||
5231		   tp->mac_version == RTL_GIGA_MAC_VER_49 ||
5232		   tp->mac_version == RTL_GIGA_MAC_VER_50 ||
5233		   tp->mac_version == RTL_GIGA_MAC_VER_51) {
5234		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
5235		rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
5236	} else {
5237		RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
5238		udelay(100);
5239	}
5240
5241	rtl_hw_reset(tp);
5242}
5243
5244static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
5245{
5246	void __iomem *ioaddr = tp->mmio_addr;
5247
5248	/* Set DMA burst size and Interframe Gap Time */
5249	RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5250		(InterFrameGap << TxInterFrameGapShift));
5251}
5252
5253static void rtl_hw_start(struct net_device *dev)
5254{
5255	struct rtl8169_private *tp = netdev_priv(dev);
5256
5257	tp->hw_start(dev);
5258
5259	rtl_irq_enable_all(tp);
5260}
5261
5262static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
5263					 void __iomem *ioaddr)
5264{
5265	/*
5266	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
5267	 * register to be written before TxDescAddrLow to work.
5268	 * Switching from MMIO to I/O access fixes the issue as well.
5269	 */
5270	RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
5271	RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
5272	RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
5273	RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
5274}
5275
5276static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
5277{
5278	u16 cmd;
5279
5280	cmd = RTL_R16(CPlusCmd);
5281	RTL_W16(CPlusCmd, cmd);
5282	return cmd;
5283}
5284
5285static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
5286{
5287	/* Low hurts. Let's disable the filtering. */
5288	RTL_W16(RxMaxSize, rx_buf_sz + 1);
5289}
5290
5291static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
5292{
5293	static const struct rtl_cfg2_info {
5294		u32 mac_version;
5295		u32 clk;
5296		u32 val;
5297	} cfg2_info [] = {
5298		{ RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
5299		{ RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
5300		{ RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
5301		{ RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
5302	};
5303	const struct rtl_cfg2_info *p = cfg2_info;
5304	unsigned int i;
5305	u32 clk;
5306
5307	clk = RTL_R8(Config2) & PCI_Clock_66MHz;
5308	for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
5309		if ((p->mac_version == mac_version) && (p->clk == clk)) {
5310			RTL_W32(0x7c, p->val);
5311			break;
5312		}
5313	}
5314}
5315
5316static void rtl_set_rx_mode(struct net_device *dev)
5317{
5318	struct rtl8169_private *tp = netdev_priv(dev);
5319	void __iomem *ioaddr = tp->mmio_addr;
5320	u32 mc_filter[2];	/* Multicast hash filter */
5321	int rx_mode;
5322	u32 tmp = 0;
5323
5324	if (dev->flags & IFF_PROMISC) {
5325		/* Unconditionally log net taps. */
5326		netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
5327		rx_mode =
5328		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
5329		    AcceptAllPhys;
5330		mc_filter[1] = mc_filter[0] = 0xffffffff;
5331	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
5332		   (dev->flags & IFF_ALLMULTI)) {
5333		/* Too many to filter perfectly -- accept all multicasts. */
5334		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
5335		mc_filter[1] = mc_filter[0] = 0xffffffff;
5336	} else {
5337		struct netdev_hw_addr *ha;
5338
5339		rx_mode = AcceptBroadcast | AcceptMyPhys;
5340		mc_filter[1] = mc_filter[0] = 0;
5341		netdev_for_each_mc_addr(ha, dev) {
5342			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
5343			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
5344			rx_mode |= AcceptMulticast;
5345		}
5346	}
5347
5348	if (dev->features & NETIF_F_RXALL)
5349		rx_mode |= (AcceptErr | AcceptRunt);
5350
5351	tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
5352
5353	if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
5354		u32 data = mc_filter[0];
5355
5356		mc_filter[0] = swab32(mc_filter[1]);
5357		mc_filter[1] = swab32(data);
5358	}
5359
5360	if (tp->mac_version == RTL_GIGA_MAC_VER_35)
5361		mc_filter[1] = mc_filter[0] = 0xffffffff;
5362
5363	RTL_W32(MAR0 + 4, mc_filter[1]);
5364	RTL_W32(MAR0 + 0, mc_filter[0]);
5365
5366	RTL_W32(RxConfig, tmp);
5367}
5368
5369static void rtl_hw_start_8169(struct net_device *dev)
5370{
5371	struct rtl8169_private *tp = netdev_priv(dev);
5372	void __iomem *ioaddr = tp->mmio_addr;
5373	struct pci_dev *pdev = tp->pci_dev;
5374
5375	if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
5376		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
5377		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
5378	}
5379
5380	RTL_W8(Cfg9346, Cfg9346_Unlock);
5381	if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
5382	    tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5383	    tp->mac_version == RTL_GIGA_MAC_VER_03 ||
5384	    tp->mac_version == RTL_GIGA_MAC_VER_04)
5385		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5386
5387	rtl_init_rxcfg(tp);
5388
5389	RTL_W8(EarlyTxThres, NoEarlyTx);
5390
5391	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5392
5393	if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
5394	    tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5395	    tp->mac_version == RTL_GIGA_MAC_VER_03 ||
5396	    tp->mac_version == RTL_GIGA_MAC_VER_04)
5397		rtl_set_rx_tx_config_registers(tp);
5398
5399	tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
5400
5401	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
5402	    tp->mac_version == RTL_GIGA_MAC_VER_03) {
5403		dprintk("Set MAC Reg C+CR Offset 0xe0. "
5404			"Bit-3 and bit-14 MUST be 1\n");
5405		tp->cp_cmd |= (1 << 14);
5406	}
5407
5408	RTL_W16(CPlusCmd, tp->cp_cmd);
5409
5410	rtl8169_set_magic_reg(ioaddr, tp->mac_version);
5411
5412	/*
5413	 * Undocumented corner. Supposedly:
5414	 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
5415	 */
5416	RTL_W16(IntrMitigate, 0x0000);
5417
5418	rtl_set_rx_tx_desc_registers(tp, ioaddr);
5419
5420	if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
5421	    tp->mac_version != RTL_GIGA_MAC_VER_02 &&
5422	    tp->mac_version != RTL_GIGA_MAC_VER_03 &&
5423	    tp->mac_version != RTL_GIGA_MAC_VER_04) {
5424		RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5425		rtl_set_rx_tx_config_registers(tp);
5426	}
5427
5428	RTL_W8(Cfg9346, Cfg9346_Lock);
5429
5430	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
5431	RTL_R8(IntrMask);
5432
5433	RTL_W32(RxMissed, 0);
5434
5435	rtl_set_rx_mode(dev);
5436
5437	/* no early-rx interrupts */
5438	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5439}
5440
5441static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
5442{
5443	if (tp->csi_ops.write)
5444		tp->csi_ops.write(tp, addr, value);
5445}
5446
5447static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
5448{
5449	return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
5450}
5451
5452static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
5453{
5454	u32 csi;
5455
5456	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
5457	rtl_csi_write(tp, 0x070c, csi | bits);
5458}
5459
5460static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
5461{
5462	rtl_csi_access_enable(tp, 0x17000000);
5463}
5464
5465static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
5466{
5467	rtl_csi_access_enable(tp, 0x27000000);
5468}
5469
5470DECLARE_RTL_COND(rtl_csiar_cond)
5471{
5472	void __iomem *ioaddr = tp->mmio_addr;
5473
5474	return RTL_R32(CSIAR) & CSIAR_FLAG;
5475}
5476
5477static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
5478{
5479	void __iomem *ioaddr = tp->mmio_addr;
5480
5481	RTL_W32(CSIDR, value);
5482	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5483		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5484
5485	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5486}
5487
5488static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
5489{
5490	void __iomem *ioaddr = tp->mmio_addr;
5491
5492	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
5493		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5494
5495	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5496		RTL_R32(CSIDR) : ~0;
5497}
5498
5499static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
5500{
5501	void __iomem *ioaddr = tp->mmio_addr;
5502
5503	RTL_W32(CSIDR, value);
5504	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5505		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5506		CSIAR_FUNC_NIC);
5507
5508	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5509}
5510
5511static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
5512{
5513	void __iomem *ioaddr = tp->mmio_addr;
5514
5515	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
5516		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5517
5518	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5519		RTL_R32(CSIDR) : ~0;
5520}
5521
5522static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
5523{
5524	void __iomem *ioaddr = tp->mmio_addr;
5525
5526	RTL_W32(CSIDR, value);
5527	RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
5528		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
5529		CSIAR_FUNC_NIC2);
5530
5531	rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
5532}
5533
5534static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
5535{
5536	void __iomem *ioaddr = tp->mmio_addr;
5537
5538	RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
5539		CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
5540
5541	return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
5542		RTL_R32(CSIDR) : ~0;
5543}
5544
5545static void rtl_init_csi_ops(struct rtl8169_private *tp)
5546{
5547	struct csi_ops *ops = &tp->csi_ops;
5548
5549	switch (tp->mac_version) {
5550	case RTL_GIGA_MAC_VER_01:
5551	case RTL_GIGA_MAC_VER_02:
5552	case RTL_GIGA_MAC_VER_03:
5553	case RTL_GIGA_MAC_VER_04:
5554	case RTL_GIGA_MAC_VER_05:
5555	case RTL_GIGA_MAC_VER_06:
5556	case RTL_GIGA_MAC_VER_10:
5557	case RTL_GIGA_MAC_VER_11:
5558	case RTL_GIGA_MAC_VER_12:
5559	case RTL_GIGA_MAC_VER_13:
5560	case RTL_GIGA_MAC_VER_14:
5561	case RTL_GIGA_MAC_VER_15:
5562	case RTL_GIGA_MAC_VER_16:
5563	case RTL_GIGA_MAC_VER_17:
5564		ops->write	= NULL;
5565		ops->read	= NULL;
5566		break;
5567
5568	case RTL_GIGA_MAC_VER_37:
5569	case RTL_GIGA_MAC_VER_38:
5570		ops->write	= r8402_csi_write;
5571		ops->read	= r8402_csi_read;
5572		break;
5573
5574	case RTL_GIGA_MAC_VER_44:
5575		ops->write	= r8411_csi_write;
5576		ops->read	= r8411_csi_read;
5577		break;
5578
5579	default:
5580		ops->write	= r8169_csi_write;
5581		ops->read	= r8169_csi_read;
5582		break;
5583	}
5584}
5585
5586struct ephy_info {
5587	unsigned int offset;
5588	u16 mask;
5589	u16 bits;
5590};
5591
5592static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
5593			  int len)
5594{
5595	u16 w;
5596
5597	while (len-- > 0) {
5598		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
5599		rtl_ephy_write(tp, e->offset, w);
5600		e++;
5601	}
5602}
5603
5604static void rtl_disable_clock_request(struct pci_dev *pdev)
5605{
5606	pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
5607				   PCI_EXP_LNKCTL_CLKREQ_EN);
5608}
5609
5610static void rtl_enable_clock_request(struct pci_dev *pdev)
5611{
5612	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
5613				 PCI_EXP_LNKCTL_CLKREQ_EN);
5614}
5615
5616static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
5617{
5618	void __iomem *ioaddr = tp->mmio_addr;
5619	u8 data;
5620
5621	data = RTL_R8(Config3);
5622
5623	if (enable)
5624		data |= Rdy_to_L23;
5625	else
5626		data &= ~Rdy_to_L23;
5627
5628	RTL_W8(Config3, data);
5629}
5630
5631#define R8168_CPCMD_QUIRK_MASK (\
5632	EnableBist | \
5633	Mac_dbgo_oe | \
5634	Force_half_dup | \
5635	Force_rxflow_en | \
5636	Force_txflow_en | \
5637	Cxpl_dbg_sel | \
5638	ASF | \
5639	PktCntrDisable | \
5640	Mac_dbgo_sel)
5641
5642static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
5643{
5644	void __iomem *ioaddr = tp->mmio_addr;
5645	struct pci_dev *pdev = tp->pci_dev;
5646
5647	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5648
5649	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5650
5651	if (tp->dev->mtu <= ETH_DATA_LEN) {
5652		rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
5653					 PCI_EXP_DEVCTL_NOSNOOP_EN);
5654	}
5655}
5656
5657static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
5658{
5659	void __iomem *ioaddr = tp->mmio_addr;
5660
5661	rtl_hw_start_8168bb(tp);
5662
5663	RTL_W8(MaxTxPacketSize, TxPacketMax);
5664
5665	RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
5666}
5667
5668static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
5669{
5670	void __iomem *ioaddr = tp->mmio_addr;
5671	struct pci_dev *pdev = tp->pci_dev;
5672
5673	RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
5674
5675	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5676
5677	if (tp->dev->mtu <= ETH_DATA_LEN)
5678		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5679
5680	rtl_disable_clock_request(pdev);
5681
5682	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5683}
5684
5685static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
5686{
5687	static const struct ephy_info e_info_8168cp[] = {
5688		{ 0x01, 0,	0x0001 },
5689		{ 0x02, 0x0800,	0x1000 },
5690		{ 0x03, 0,	0x0042 },
5691		{ 0x06, 0x0080,	0x0000 },
5692		{ 0x07, 0,	0x2000 }
5693	};
5694
5695	rtl_csi_access_enable_2(tp);
5696
5697	rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
5698
5699	__rtl_hw_start_8168cp(tp);
5700}
5701
5702static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
5703{
5704	void __iomem *ioaddr = tp->mmio_addr;
5705	struct pci_dev *pdev = tp->pci_dev;
5706
5707	rtl_csi_access_enable_2(tp);
5708
5709	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5710
5711	if (tp->dev->mtu <= ETH_DATA_LEN)
5712		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5713
5714	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5715}
5716
5717static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
5718{
5719	void __iomem *ioaddr = tp->mmio_addr;
5720	struct pci_dev *pdev = tp->pci_dev;
5721
5722	rtl_csi_access_enable_2(tp);
5723
5724	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5725
5726	/* Magic. */
5727	RTL_W8(DBG_REG, 0x20);
5728
5729	RTL_W8(MaxTxPacketSize, TxPacketMax);
5730
5731	if (tp->dev->mtu <= ETH_DATA_LEN)
5732		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5733
5734	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5735}
5736
5737static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
5738{
5739	void __iomem *ioaddr = tp->mmio_addr;
5740	static const struct ephy_info e_info_8168c_1[] = {
5741		{ 0x02, 0x0800,	0x1000 },
5742		{ 0x03, 0,	0x0002 },
5743		{ 0x06, 0x0080,	0x0000 }
5744	};
5745
5746	rtl_csi_access_enable_2(tp);
5747
5748	RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
5749
5750	rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
5751
5752	__rtl_hw_start_8168cp(tp);
5753}
5754
5755static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
5756{
5757	static const struct ephy_info e_info_8168c_2[] = {
5758		{ 0x01, 0,	0x0001 },
5759		{ 0x03, 0x0400,	0x0220 }
5760	};
5761
5762	rtl_csi_access_enable_2(tp);
5763
5764	rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
5765
5766	__rtl_hw_start_8168cp(tp);
5767}
5768
5769static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
5770{
5771	rtl_hw_start_8168c_2(tp);
5772}
5773
5774static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
5775{
5776	rtl_csi_access_enable_2(tp);
5777
5778	__rtl_hw_start_8168cp(tp);
5779}
5780
5781static void rtl_hw_start_8168d(struct rtl8169_private *tp)
5782{
5783	void __iomem *ioaddr = tp->mmio_addr;
5784	struct pci_dev *pdev = tp->pci_dev;
5785
5786	rtl_csi_access_enable_2(tp);
5787
5788	rtl_disable_clock_request(pdev);
5789
5790	RTL_W8(MaxTxPacketSize, TxPacketMax);
5791
5792	if (tp->dev->mtu <= ETH_DATA_LEN)
5793		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5794
5795	RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
5796}
5797
5798static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
5799{
5800	void __iomem *ioaddr = tp->mmio_addr;
5801	struct pci_dev *pdev = tp->pci_dev;
5802
5803	rtl_csi_access_enable_1(tp);
5804
5805	if (tp->dev->mtu <= ETH_DATA_LEN)
5806		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5807
5808	RTL_W8(MaxTxPacketSize, TxPacketMax);
5809
5810	rtl_disable_clock_request(pdev);
5811}
5812
5813static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
5814{
5815	void __iomem *ioaddr = tp->mmio_addr;
5816	struct pci_dev *pdev = tp->pci_dev;
5817	static const struct ephy_info e_info_8168d_4[] = {
5818		{ 0x0b, 0x0000,	0x0048 },
5819		{ 0x19, 0x0020,	0x0050 },
5820		{ 0x0c, 0x0100,	0x0020 }
5821	};
 
5822
5823	rtl_csi_access_enable_1(tp);
5824
5825	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5826
5827	RTL_W8(MaxTxPacketSize, TxPacketMax);
5828
5829	rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4));
 
 
 
 
 
 
5830
5831	rtl_enable_clock_request(pdev);
5832}
5833
5834static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
5835{
5836	void __iomem *ioaddr = tp->mmio_addr;
5837	struct pci_dev *pdev = tp->pci_dev;
5838	static const struct ephy_info e_info_8168e_1[] = {
5839		{ 0x00, 0x0200,	0x0100 },
5840		{ 0x00, 0x0000,	0x0004 },
5841		{ 0x06, 0x0002,	0x0001 },
5842		{ 0x06, 0x0000,	0x0030 },
5843		{ 0x07, 0x0000,	0x2000 },
5844		{ 0x00, 0x0000,	0x0020 },
5845		{ 0x03, 0x5800,	0x2000 },
5846		{ 0x03, 0x0000,	0x0001 },
5847		{ 0x01, 0x0800,	0x1000 },
5848		{ 0x07, 0x0000,	0x4000 },
5849		{ 0x1e, 0x0000,	0x2000 },
5850		{ 0x19, 0xffff,	0xfe6c },
5851		{ 0x0a, 0x0000,	0x0040 }
5852	};
5853
5854	rtl_csi_access_enable_2(tp);
5855
5856	rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5857
5858	if (tp->dev->mtu <= ETH_DATA_LEN)
5859		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5860
5861	RTL_W8(MaxTxPacketSize, TxPacketMax);
5862
5863	rtl_disable_clock_request(pdev);
5864
5865	/* Reset tx FIFO pointer */
5866	RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5867	RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5868
5869	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5870}
5871
5872static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5873{
5874	void __iomem *ioaddr = tp->mmio_addr;
5875	struct pci_dev *pdev = tp->pci_dev;
5876	static const struct ephy_info e_info_8168e_2[] = {
5877		{ 0x09, 0x0000,	0x0080 },
5878		{ 0x19, 0x0000,	0x0224 }
5879	};
5880
5881	rtl_csi_access_enable_1(tp);
5882
5883	rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5884
5885	if (tp->dev->mtu <= ETH_DATA_LEN)
5886		rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5887
5888	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5889	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5890	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5891	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5892	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5893	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5894	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5895	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5896
5897	RTL_W8(MaxTxPacketSize, EarlySize);
5898
5899	rtl_disable_clock_request(pdev);
5900
5901	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5902	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5903
5904	/* Adjust EEE LED frequency */
5905	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5906
5907	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5908	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5909	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5910}
5911
5912static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5913{
5914	void __iomem *ioaddr = tp->mmio_addr;
5915	struct pci_dev *pdev = tp->pci_dev;
5916
5917	rtl_csi_access_enable_2(tp);
5918
5919	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5920
5921	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5922	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5923	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5924	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5925	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5926	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5927	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5928	rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5929	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5930	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5931
5932	RTL_W8(MaxTxPacketSize, EarlySize);
5933
5934	rtl_disable_clock_request(pdev);
5935
5936	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5937	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5938	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5939	RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5940	RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5941}
5942
5943static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5944{
5945	void __iomem *ioaddr = tp->mmio_addr;
5946	static const struct ephy_info e_info_8168f_1[] = {
5947		{ 0x06, 0x00c0,	0x0020 },
5948		{ 0x08, 0x0001,	0x0002 },
5949		{ 0x09, 0x0000,	0x0080 },
5950		{ 0x19, 0x0000,	0x0224 }
5951	};
5952
5953	rtl_hw_start_8168f(tp);
5954
5955	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5956
5957	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5958
5959	/* Adjust EEE LED frequency */
5960	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5961}
5962
5963static void rtl_hw_start_8411(struct rtl8169_private *tp)
5964{
5965	static const struct ephy_info e_info_8168f_1[] = {
5966		{ 0x06, 0x00c0,	0x0020 },
5967		{ 0x0f, 0xffff,	0x5200 },
5968		{ 0x1e, 0x0000,	0x4000 },
5969		{ 0x19, 0x0000,	0x0224 }
5970	};
5971
5972	rtl_hw_start_8168f(tp);
5973	rtl_pcie_state_l2l3_enable(tp, false);
5974
5975	rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5976
5977	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5978}
5979
5980static void rtl_hw_start_8168g(struct rtl8169_private *tp)
5981{
5982	void __iomem *ioaddr = tp->mmio_addr;
5983	struct pci_dev *pdev = tp->pci_dev;
5984
5985	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5986
5987	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5988	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5989	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5990	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5991
5992	rtl_csi_access_enable_1(tp);
5993
5994	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5995
5996	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5997	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5998	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
5999
 
6000	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
6001	RTL_W8(MaxTxPacketSize, EarlySize);
6002
6003	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6004	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6005
6006	/* Adjust EEE LED frequency */
6007	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6008
6009	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
6010	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
6011
6012	rtl_pcie_state_l2l3_enable(tp, false);
6013}
6014
6015static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
6016{
6017	void __iomem *ioaddr = tp->mmio_addr;
6018	static const struct ephy_info e_info_8168g_1[] = {
6019		{ 0x00, 0x0000,	0x0008 },
6020		{ 0x0c, 0x37d0,	0x0820 },
6021		{ 0x1e, 0x0000,	0x0001 },
6022		{ 0x19, 0x8000,	0x0000 }
6023	};
6024
6025	rtl_hw_start_8168g(tp);
6026
6027	/* disable aspm and clock request before access ephy */
6028	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6029	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6030	rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
6031}
6032
6033static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
6034{
6035	void __iomem *ioaddr = tp->mmio_addr;
6036	static const struct ephy_info e_info_8168g_2[] = {
6037		{ 0x00, 0x0000,	0x0008 },
6038		{ 0x0c, 0x3df0,	0x0200 },
6039		{ 0x19, 0xffff,	0xfc00 },
6040		{ 0x1e, 0xffff,	0x20eb }
6041	};
6042
6043	rtl_hw_start_8168g(tp);
6044
6045	/* disable aspm and clock request before access ephy */
6046	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6047	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6048	rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
6049}
6050
6051static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
6052{
6053	void __iomem *ioaddr = tp->mmio_addr;
6054	static const struct ephy_info e_info_8411_2[] = {
6055		{ 0x00, 0x0000,	0x0008 },
6056		{ 0x0c, 0x3df0,	0x0200 },
6057		{ 0x0f, 0xffff,	0x5200 },
6058		{ 0x19, 0x0020,	0x0000 },
6059		{ 0x1e, 0x0000,	0x2000 }
6060	};
6061
6062	rtl_hw_start_8168g(tp);
6063
6064	/* disable aspm and clock request before access ephy */
6065	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6066	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6067	rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
6068}
6069
6070static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6071{
6072	void __iomem *ioaddr = tp->mmio_addr;
6073	struct pci_dev *pdev = tp->pci_dev;
6074	int rg_saw_cnt;
6075	u32 data;
6076	static const struct ephy_info e_info_8168h_1[] = {
6077		{ 0x1e, 0x0800,	0x0001 },
6078		{ 0x1d, 0x0000,	0x0800 },
6079		{ 0x05, 0xffff,	0x2089 },
6080		{ 0x06, 0xffff,	0x5881 },
6081		{ 0x04, 0xffff,	0x154a },
6082		{ 0x01, 0xffff,	0x068b }
6083	};
6084
6085	/* disable aspm and clock request before access ephy */
6086	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6087	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6088	rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
6089
6090	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6091
6092	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
6093	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
6094	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
6095	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
6096
6097	rtl_csi_access_enable_1(tp);
6098
6099	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6100
6101	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6102	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6103
6104	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
6105
6106	rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
6107
6108	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
6109
6110	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
6111	RTL_W8(MaxTxPacketSize, EarlySize);
6112
6113	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6114	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6115
6116	/* Adjust EEE LED frequency */
6117	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6118
6119	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6120	RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
6121
6122	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
6123
6124	rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
6125
6126	rtl_pcie_state_l2l3_enable(tp, false);
6127
6128	rtl_writephy(tp, 0x1f, 0x0c42);
6129	rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff);
6130	rtl_writephy(tp, 0x1f, 0x0000);
6131	if (rg_saw_cnt > 0) {
6132		u16 sw_cnt_1ms_ini;
6133
6134		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6135		sw_cnt_1ms_ini &= 0x0fff;
6136		data = r8168_mac_ocp_read(tp, 0xd412);
6137		data &= ~0x0fff;
6138		data |= sw_cnt_1ms_ini;
6139		r8168_mac_ocp_write(tp, 0xd412, data);
6140	}
6141
6142	data = r8168_mac_ocp_read(tp, 0xe056);
6143	data &= ~0xf0;
6144	data |= 0x70;
6145	r8168_mac_ocp_write(tp, 0xe056, data);
6146
6147	data = r8168_mac_ocp_read(tp, 0xe052);
6148	data &= ~0x6000;
6149	data |= 0x8008;
6150	r8168_mac_ocp_write(tp, 0xe052, data);
6151
6152	data = r8168_mac_ocp_read(tp, 0xe0d6);
6153	data &= ~0x01ff;
6154	data |= 0x017f;
6155	r8168_mac_ocp_write(tp, 0xe0d6, data);
6156
6157	data = r8168_mac_ocp_read(tp, 0xd420);
6158	data &= ~0x0fff;
6159	data |= 0x047f;
6160	r8168_mac_ocp_write(tp, 0xd420, data);
6161
6162	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
6163	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
6164	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
6165	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
6166}
6167
6168static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
6169{
6170	void __iomem *ioaddr = tp->mmio_addr;
6171	struct pci_dev *pdev = tp->pci_dev;
6172
6173	rtl8168ep_stop_cmac(tp);
6174
6175	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6176
6177	rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
6178	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
6179	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
6180	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
6181
6182	rtl_csi_access_enable_1(tp);
6183
6184	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6185
6186	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6187	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6188
6189	rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC);
6190
6191	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
6192
6193	RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
6194	RTL_W8(MaxTxPacketSize, EarlySize);
6195
6196	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6197	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6198
6199	/* Adjust EEE LED frequency */
6200	RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
6201
6202	rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
6203
6204	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
6205
6206	rtl_pcie_state_l2l3_enable(tp, false);
6207}
6208
6209static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
6210{
6211	void __iomem *ioaddr = tp->mmio_addr;
6212	static const struct ephy_info e_info_8168ep_1[] = {
6213		{ 0x00, 0xffff,	0x10ab },
6214		{ 0x06, 0xffff,	0xf030 },
6215		{ 0x08, 0xffff,	0x2006 },
6216		{ 0x0d, 0xffff,	0x1666 },
6217		{ 0x0c, 0x3ff0,	0x0000 }
6218	};
6219
6220	/* disable aspm and clock request before access ephy */
6221	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6222	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6223	rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
6224
6225	rtl_hw_start_8168ep(tp);
6226}
6227
6228static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
6229{
6230	void __iomem *ioaddr = tp->mmio_addr;
6231	static const struct ephy_info e_info_8168ep_2[] = {
6232		{ 0x00, 0xffff,	0x10a3 },
6233		{ 0x19, 0xffff,	0xfc00 },
6234		{ 0x1e, 0xffff,	0x20ea }
6235	};
6236
6237	/* disable aspm and clock request before access ephy */
6238	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6239	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6240	rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
6241
6242	rtl_hw_start_8168ep(tp);
6243
6244	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6245	RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
6246}
6247
6248static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
6249{
6250	void __iomem *ioaddr = tp->mmio_addr;
6251	u32 data;
6252	static const struct ephy_info e_info_8168ep_3[] = {
6253		{ 0x00, 0xffff,	0x10a3 },
6254		{ 0x19, 0xffff,	0x7c00 },
6255		{ 0x1e, 0xffff,	0x20eb },
6256		{ 0x0d, 0xffff,	0x1666 }
6257	};
6258
6259	/* disable aspm and clock request before access ephy */
6260	RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
6261	RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
6262	rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
6263
6264	rtl_hw_start_8168ep(tp);
6265
6266	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6267	RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
6268
6269	data = r8168_mac_ocp_read(tp, 0xd3e2);
6270	data &= 0xf000;
6271	data |= 0x0271;
6272	r8168_mac_ocp_write(tp, 0xd3e2, data);
6273
6274	data = r8168_mac_ocp_read(tp, 0xd3e4);
6275	data &= 0xff00;
6276	r8168_mac_ocp_write(tp, 0xd3e4, data);
6277
6278	data = r8168_mac_ocp_read(tp, 0xe860);
6279	data |= 0x0080;
6280	r8168_mac_ocp_write(tp, 0xe860, data);
6281}
6282
6283static void rtl_hw_start_8168(struct net_device *dev)
6284{
6285	struct rtl8169_private *tp = netdev_priv(dev);
6286	void __iomem *ioaddr = tp->mmio_addr;
6287
6288	RTL_W8(Cfg9346, Cfg9346_Unlock);
6289
6290	RTL_W8(MaxTxPacketSize, TxPacketMax);
6291
6292	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
6293
6294	tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
6295
6296	RTL_W16(CPlusCmd, tp->cp_cmd);
6297
6298	RTL_W16(IntrMitigate, 0x5151);
6299
6300	/* Work around for RxFIFO overflow. */
6301	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
6302		tp->event_slow |= RxFIFOOver | PCSTimeout;
6303		tp->event_slow &= ~RxOverflow;
6304	}
6305
6306	rtl_set_rx_tx_desc_registers(tp, ioaddr);
6307
6308	rtl_set_rx_tx_config_registers(tp);
6309
6310	RTL_R8(IntrMask);
6311
6312	switch (tp->mac_version) {
6313	case RTL_GIGA_MAC_VER_11:
6314		rtl_hw_start_8168bb(tp);
6315		break;
6316
6317	case RTL_GIGA_MAC_VER_12:
6318	case RTL_GIGA_MAC_VER_17:
6319		rtl_hw_start_8168bef(tp);
6320		break;
6321
6322	case RTL_GIGA_MAC_VER_18:
6323		rtl_hw_start_8168cp_1(tp);
6324		break;
6325
6326	case RTL_GIGA_MAC_VER_19:
6327		rtl_hw_start_8168c_1(tp);
6328		break;
6329
6330	case RTL_GIGA_MAC_VER_20:
6331		rtl_hw_start_8168c_2(tp);
6332		break;
6333
6334	case RTL_GIGA_MAC_VER_21:
6335		rtl_hw_start_8168c_3(tp);
6336		break;
6337
6338	case RTL_GIGA_MAC_VER_22:
6339		rtl_hw_start_8168c_4(tp);
6340		break;
6341
6342	case RTL_GIGA_MAC_VER_23:
6343		rtl_hw_start_8168cp_2(tp);
6344		break;
6345
6346	case RTL_GIGA_MAC_VER_24:
6347		rtl_hw_start_8168cp_3(tp);
6348		break;
6349
6350	case RTL_GIGA_MAC_VER_25:
6351	case RTL_GIGA_MAC_VER_26:
6352	case RTL_GIGA_MAC_VER_27:
6353		rtl_hw_start_8168d(tp);
6354		break;
6355
6356	case RTL_GIGA_MAC_VER_28:
6357		rtl_hw_start_8168d_4(tp);
6358		break;
6359
6360	case RTL_GIGA_MAC_VER_31:
6361		rtl_hw_start_8168dp(tp);
6362		break;
6363
6364	case RTL_GIGA_MAC_VER_32:
6365	case RTL_GIGA_MAC_VER_33:
6366		rtl_hw_start_8168e_1(tp);
6367		break;
6368	case RTL_GIGA_MAC_VER_34:
6369		rtl_hw_start_8168e_2(tp);
6370		break;
6371
6372	case RTL_GIGA_MAC_VER_35:
6373	case RTL_GIGA_MAC_VER_36:
6374		rtl_hw_start_8168f_1(tp);
6375		break;
6376
6377	case RTL_GIGA_MAC_VER_38:
6378		rtl_hw_start_8411(tp);
6379		break;
6380
6381	case RTL_GIGA_MAC_VER_40:
6382	case RTL_GIGA_MAC_VER_41:
6383		rtl_hw_start_8168g_1(tp);
6384		break;
6385	case RTL_GIGA_MAC_VER_42:
6386		rtl_hw_start_8168g_2(tp);
6387		break;
6388
6389	case RTL_GIGA_MAC_VER_44:
6390		rtl_hw_start_8411_2(tp);
6391		break;
6392
6393	case RTL_GIGA_MAC_VER_45:
6394	case RTL_GIGA_MAC_VER_46:
6395		rtl_hw_start_8168h_1(tp);
6396		break;
6397
6398	case RTL_GIGA_MAC_VER_49:
6399		rtl_hw_start_8168ep_1(tp);
6400		break;
6401
6402	case RTL_GIGA_MAC_VER_50:
6403		rtl_hw_start_8168ep_2(tp);
6404		break;
6405
6406	case RTL_GIGA_MAC_VER_51:
6407		rtl_hw_start_8168ep_3(tp);
6408		break;
6409
6410	default:
6411		printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
6412			dev->name, tp->mac_version);
6413		break;
6414	}
6415
6416	RTL_W8(Cfg9346, Cfg9346_Lock);
6417
6418	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
6419
6420	rtl_set_rx_mode(dev);
6421
6422	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
6423}
6424
6425#define R810X_CPCMD_QUIRK_MASK (\
6426	EnableBist | \
6427	Mac_dbgo_oe | \
6428	Force_half_dup | \
6429	Force_rxflow_en | \
6430	Force_txflow_en | \
6431	Cxpl_dbg_sel | \
6432	ASF | \
6433	PktCntrDisable | \
6434	Mac_dbgo_sel)
6435
6436static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
6437{
6438	void __iomem *ioaddr = tp->mmio_addr;
6439	struct pci_dev *pdev = tp->pci_dev;
6440	static const struct ephy_info e_info_8102e_1[] = {
6441		{ 0x01,	0, 0x6e65 },
6442		{ 0x02,	0, 0x091f },
6443		{ 0x03,	0, 0xc2f9 },
6444		{ 0x06,	0, 0xafb5 },
6445		{ 0x07,	0, 0x0e00 },
6446		{ 0x19,	0, 0xec80 },
6447		{ 0x01,	0, 0x2e65 },
6448		{ 0x01,	0, 0x6e65 }
6449	};
6450	u8 cfg1;
6451
6452	rtl_csi_access_enable_2(tp);
6453
6454	RTL_W8(DBG_REG, FIX_NAK_1);
6455
6456	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6457
6458	RTL_W8(Config1,
6459	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
6460	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
6461
6462	cfg1 = RTL_R8(Config1);
6463	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
6464		RTL_W8(Config1, cfg1 & ~LEDS0);
6465
6466	rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
6467}
6468
6469static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
6470{
6471	void __iomem *ioaddr = tp->mmio_addr;
6472	struct pci_dev *pdev = tp->pci_dev;
6473
6474	rtl_csi_access_enable_2(tp);
6475
6476	rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
6477
6478	RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
6479	RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
6480}
6481
6482static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
6483{
6484	rtl_hw_start_8102e_2(tp);
6485
6486	rtl_ephy_write(tp, 0x03, 0xc2f9);
6487}
6488
6489static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
6490{
6491	void __iomem *ioaddr = tp->mmio_addr;
6492	static const struct ephy_info e_info_8105e_1[] = {
6493		{ 0x07,	0, 0x4000 },
6494		{ 0x19,	0, 0x0200 },
6495		{ 0x19,	0, 0x0020 },
6496		{ 0x1e,	0, 0x2000 },
6497		{ 0x03,	0, 0x0001 },
6498		{ 0x19,	0, 0x0100 },
6499		{ 0x19,	0, 0x0004 },
6500		{ 0x0a,	0, 0x0020 }
6501	};
6502
6503	/* Force LAN exit from ASPM if Rx/Tx are not idle */
6504	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
6505
6506	/* Disable Early Tally Counter */
6507	RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
6508
6509	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
6510	RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
6511
6512	rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
6513
6514	rtl_pcie_state_l2l3_enable(tp, false);
6515}
6516
6517static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
6518{
6519	rtl_hw_start_8105e_1(tp);
6520	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
6521}
6522
6523static void rtl_hw_start_8402(struct rtl8169_private *tp)
6524{
6525	void __iomem *ioaddr = tp->mmio_addr;
6526	static const struct ephy_info e_info_8402[] = {
6527		{ 0x19,	0xffff, 0xff64 },
6528		{ 0x1e,	0, 0x4000 }
6529	};
6530
6531	rtl_csi_access_enable_2(tp);
6532
6533	/* Force LAN exit from ASPM if Rx/Tx are not idle */
6534	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
6535
6536	RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
6537	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6538
6539	rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
6540
6541	rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
6542
6543	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
6544	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
6545	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
6546	rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
6547	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6548	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
6549	rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
6550
6551	rtl_pcie_state_l2l3_enable(tp, false);
6552}
6553
6554static void rtl_hw_start_8106(struct rtl8169_private *tp)
6555{
6556	void __iomem *ioaddr = tp->mmio_addr;
6557
6558	/* Force LAN exit from ASPM if Rx/Tx are not idle */
6559	RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
6560
6561	RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
6562	RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
6563	RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
6564
6565	rtl_pcie_state_l2l3_enable(tp, false);
6566}
6567
6568static void rtl_hw_start_8101(struct net_device *dev)
6569{
6570	struct rtl8169_private *tp = netdev_priv(dev);
6571	void __iomem *ioaddr = tp->mmio_addr;
6572	struct pci_dev *pdev = tp->pci_dev;
6573
6574	if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
6575		tp->event_slow &= ~RxFIFOOver;
6576
6577	if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
6578	    tp->mac_version == RTL_GIGA_MAC_VER_16)
6579		pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
6580					 PCI_EXP_DEVCTL_NOSNOOP_EN);
6581
6582	RTL_W8(Cfg9346, Cfg9346_Unlock);
6583
6584	RTL_W8(MaxTxPacketSize, TxPacketMax);
6585
6586	rtl_set_rx_max_size(ioaddr, rx_buf_sz);
6587
6588	tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
6589	RTL_W16(CPlusCmd, tp->cp_cmd);
6590
6591	rtl_set_rx_tx_desc_registers(tp, ioaddr);
6592
6593	rtl_set_rx_tx_config_registers(tp);
6594
6595	switch (tp->mac_version) {
6596	case RTL_GIGA_MAC_VER_07:
6597		rtl_hw_start_8102e_1(tp);
6598		break;
6599
6600	case RTL_GIGA_MAC_VER_08:
6601		rtl_hw_start_8102e_3(tp);
6602		break;
6603
6604	case RTL_GIGA_MAC_VER_09:
6605		rtl_hw_start_8102e_2(tp);
6606		break;
6607
6608	case RTL_GIGA_MAC_VER_29:
6609		rtl_hw_start_8105e_1(tp);
6610		break;
6611	case RTL_GIGA_MAC_VER_30:
6612		rtl_hw_start_8105e_2(tp);
6613		break;
6614
6615	case RTL_GIGA_MAC_VER_37:
6616		rtl_hw_start_8402(tp);
6617		break;
6618
6619	case RTL_GIGA_MAC_VER_39:
6620		rtl_hw_start_8106(tp);
6621		break;
6622	case RTL_GIGA_MAC_VER_43:
6623		rtl_hw_start_8168g_2(tp);
6624		break;
6625	case RTL_GIGA_MAC_VER_47:
6626	case RTL_GIGA_MAC_VER_48:
6627		rtl_hw_start_8168h_1(tp);
6628		break;
6629	}
6630
6631	RTL_W8(Cfg9346, Cfg9346_Lock);
6632
6633	RTL_W16(IntrMitigate, 0x0000);
6634
6635	RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
6636
6637	rtl_set_rx_mode(dev);
6638
6639	RTL_R8(IntrMask);
6640
6641	RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
6642}
6643
6644static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
6645{
6646	struct rtl8169_private *tp = netdev_priv(dev);
6647
6648	if (new_mtu < ETH_ZLEN ||
6649	    new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
6650		return -EINVAL;
6651
6652	if (new_mtu > ETH_DATA_LEN)
6653		rtl_hw_jumbo_enable(tp);
6654	else
6655		rtl_hw_jumbo_disable(tp);
6656
6657	dev->mtu = new_mtu;
6658	netdev_update_features(dev);
6659
6660	return 0;
6661}
6662
6663static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
6664{
6665	desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
6666	desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
6667}
6668
6669static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
6670				     void **data_buff, struct RxDesc *desc)
6671{
6672	dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
6673			 DMA_FROM_DEVICE);
6674
6675	kfree(*data_buff);
6676	*data_buff = NULL;
6677	rtl8169_make_unusable_by_asic(desc);
6678}
6679
6680static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
6681{
6682	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
6683
6684	/* Force memory writes to complete before releasing descriptor */
6685	dma_wmb();
6686
6687	desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
6688}
6689
6690static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
6691				       u32 rx_buf_sz)
6692{
6693	desc->addr = cpu_to_le64(mapping);
 
6694	rtl8169_mark_to_asic(desc, rx_buf_sz);
6695}
6696
6697static inline void *rtl8169_align(void *data)
6698{
6699	return (void *)ALIGN((long)data, 16);
6700}
6701
6702static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
6703					     struct RxDesc *desc)
6704{
6705	void *data;
6706	dma_addr_t mapping;
6707	struct device *d = &tp->pci_dev->dev;
6708	struct net_device *dev = tp->dev;
6709	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
6710
6711	data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
6712	if (!data)
6713		return NULL;
6714
6715	if (rtl8169_align(data) != data) {
6716		kfree(data);
6717		data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
6718		if (!data)
6719			return NULL;
6720	}
6721
6722	mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
6723				 DMA_FROM_DEVICE);
6724	if (unlikely(dma_mapping_error(d, mapping))) {
6725		if (net_ratelimit())
6726			netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
6727		goto err_out;
6728	}
6729
6730	rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
6731	return data;
6732
6733err_out:
6734	kfree(data);
6735	return NULL;
6736}
6737
6738static void rtl8169_rx_clear(struct rtl8169_private *tp)
6739{
6740	unsigned int i;
6741
6742	for (i = 0; i < NUM_RX_DESC; i++) {
6743		if (tp->Rx_databuff[i]) {
6744			rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
6745					    tp->RxDescArray + i);
6746		}
6747	}
6748}
6749
6750static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
6751{
6752	desc->opts1 |= cpu_to_le32(RingEnd);
6753}
6754
6755static int rtl8169_rx_fill(struct rtl8169_private *tp)
6756{
6757	unsigned int i;
6758
6759	for (i = 0; i < NUM_RX_DESC; i++) {
6760		void *data;
6761
6762		if (tp->Rx_databuff[i])
6763			continue;
6764
6765		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
6766		if (!data) {
6767			rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
6768			goto err_out;
6769		}
6770		tp->Rx_databuff[i] = data;
6771	}
6772
6773	rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
6774	return 0;
6775
6776err_out:
6777	rtl8169_rx_clear(tp);
6778	return -ENOMEM;
6779}
6780
6781static int rtl8169_init_ring(struct net_device *dev)
6782{
6783	struct rtl8169_private *tp = netdev_priv(dev);
6784
6785	rtl8169_init_ring_indexes(tp);
6786
6787	memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
6788	memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
6789
6790	return rtl8169_rx_fill(tp);
6791}
6792
6793static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
6794				 struct TxDesc *desc)
6795{
6796	unsigned int len = tx_skb->len;
6797
6798	dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
6799
6800	desc->opts1 = 0x00;
6801	desc->opts2 = 0x00;
6802	desc->addr = 0x00;
6803	tx_skb->len = 0;
6804}
6805
6806static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
6807				   unsigned int n)
6808{
6809	unsigned int i;
6810
6811	for (i = 0; i < n; i++) {
6812		unsigned int entry = (start + i) % NUM_TX_DESC;
6813		struct ring_info *tx_skb = tp->tx_skb + entry;
6814		unsigned int len = tx_skb->len;
6815
6816		if (len) {
6817			struct sk_buff *skb = tx_skb->skb;
6818
6819			rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
6820					     tp->TxDescArray + entry);
6821			if (skb) {
6822				tp->dev->stats.tx_dropped++;
6823				dev_kfree_skb_any(skb);
6824				tx_skb->skb = NULL;
6825			}
6826		}
6827	}
6828}
6829
6830static void rtl8169_tx_clear(struct rtl8169_private *tp)
6831{
6832	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
6833	tp->cur_tx = tp->dirty_tx = 0;
6834}
6835
6836static void rtl_reset_work(struct rtl8169_private *tp)
6837{
6838	struct net_device *dev = tp->dev;
6839	int i;
6840
6841	napi_disable(&tp->napi);
6842	netif_stop_queue(dev);
6843	synchronize_sched();
6844
6845	rtl8169_hw_reset(tp);
6846
6847	for (i = 0; i < NUM_RX_DESC; i++)
6848		rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
6849
6850	rtl8169_tx_clear(tp);
6851	rtl8169_init_ring_indexes(tp);
6852
6853	napi_enable(&tp->napi);
6854	rtl_hw_start(dev);
6855	netif_wake_queue(dev);
6856	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
6857}
6858
6859static void rtl8169_tx_timeout(struct net_device *dev)
6860{
6861	struct rtl8169_private *tp = netdev_priv(dev);
6862
6863	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6864}
6865
6866static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
6867			      u32 *opts)
6868{
6869	struct skb_shared_info *info = skb_shinfo(skb);
6870	unsigned int cur_frag, entry;
6871	struct TxDesc *uninitialized_var(txd);
6872	struct device *d = &tp->pci_dev->dev;
6873
6874	entry = tp->cur_tx;
6875	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
6876		const skb_frag_t *frag = info->frags + cur_frag;
6877		dma_addr_t mapping;
6878		u32 status, len;
6879		void *addr;
6880
6881		entry = (entry + 1) % NUM_TX_DESC;
6882
6883		txd = tp->TxDescArray + entry;
6884		len = skb_frag_size(frag);
6885		addr = skb_frag_address(frag);
6886		mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
6887		if (unlikely(dma_mapping_error(d, mapping))) {
6888			if (net_ratelimit())
6889				netif_err(tp, drv, tp->dev,
6890					  "Failed to map TX fragments DMA!\n");
6891			goto err_out;
6892		}
6893
6894		/* Anti gcc 2.95.3 bugware (sic) */
6895		status = opts[0] | len |
6896			(RingEnd * !((entry + 1) % NUM_TX_DESC));
6897
6898		txd->opts1 = cpu_to_le32(status);
6899		txd->opts2 = cpu_to_le32(opts[1]);
6900		txd->addr = cpu_to_le64(mapping);
6901
6902		tp->tx_skb[entry].len = len;
6903	}
6904
6905	if (cur_frag) {
6906		tp->tx_skb[entry].skb = skb;
6907		txd->opts1 |= cpu_to_le32(LastFrag);
6908	}
6909
6910	return cur_frag;
6911
6912err_out:
6913	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
6914	return -EIO;
6915}
6916
6917static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
6918{
6919	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
 
 
 
6920}
6921
6922static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6923				      struct net_device *dev);
6924/* r8169_csum_workaround()
6925 * The hw limites the value the transport offset. When the offset is out of the
6926 * range, calculate the checksum by sw.
6927 */
6928static void r8169_csum_workaround(struct rtl8169_private *tp,
6929				  struct sk_buff *skb)
6930{
6931	if (skb_shinfo(skb)->gso_size) {
6932		netdev_features_t features = tp->dev->features;
6933		struct sk_buff *segs, *nskb;
6934
6935		features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
6936		segs = skb_gso_segment(skb, features);
6937		if (IS_ERR(segs) || !segs)
6938			goto drop;
6939
6940		do {
6941			nskb = segs;
6942			segs = segs->next;
6943			nskb->next = NULL;
6944			rtl8169_start_xmit(nskb, tp->dev);
6945		} while (segs);
6946
6947		dev_consume_skb_any(skb);
6948	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
6949		if (skb_checksum_help(skb) < 0)
6950			goto drop;
6951
6952		rtl8169_start_xmit(skb, tp->dev);
6953	} else {
6954		struct net_device_stats *stats;
6955
6956drop:
6957		stats = &tp->dev->stats;
6958		stats->tx_dropped++;
6959		dev_kfree_skb_any(skb);
6960	}
6961}
6962
6963/* msdn_giant_send_check()
6964 * According to the document of microsoft, the TCP Pseudo Header excludes the
6965 * packet length for IPv6 TCP large packets.
6966 */
6967static int msdn_giant_send_check(struct sk_buff *skb)
6968{
6969	const struct ipv6hdr *ipv6h;
6970	struct tcphdr *th;
6971	int ret;
6972
6973	ret = skb_cow_head(skb, 0);
6974	if (ret)
6975		return ret;
6976
6977	ipv6h = ipv6_hdr(skb);
6978	th = tcp_hdr(skb);
6979
6980	th->check = 0;
6981	th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
6982
6983	return ret;
6984}
6985
6986static inline __be16 get_protocol(struct sk_buff *skb)
6987{
6988	__be16 protocol;
6989
6990	if (skb->protocol == htons(ETH_P_8021Q))
6991		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
6992	else
6993		protocol = skb->protocol;
6994
6995	return protocol;
6996}
6997
6998static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
6999				struct sk_buff *skb, u32 *opts)
7000{
 
7001	u32 mss = skb_shinfo(skb)->gso_size;
 
7002
7003	if (mss) {
7004		opts[0] |= TD_LSO;
7005		opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
7006	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7007		const struct iphdr *ip = ip_hdr(skb);
7008
 
 
 
7009		if (ip->protocol == IPPROTO_TCP)
7010			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
7011		else if (ip->protocol == IPPROTO_UDP)
7012			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
7013		else
7014			WARN_ON_ONCE(1);
7015	}
7016
7017	return true;
7018}
7019
7020static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
7021				struct sk_buff *skb, u32 *opts)
7022{
7023	u32 transport_offset = (u32)skb_transport_offset(skb);
7024	u32 mss = skb_shinfo(skb)->gso_size;
7025
7026	if (mss) {
7027		if (transport_offset > GTTCPHO_MAX) {
7028			netif_warn(tp, tx_err, tp->dev,
7029				   "Invalid transport offset 0x%x for TSO\n",
7030				   transport_offset);
7031			return false;
7032		}
7033
7034		switch (get_protocol(skb)) {
7035		case htons(ETH_P_IP):
7036			opts[0] |= TD1_GTSENV4;
7037			break;
7038
7039		case htons(ETH_P_IPV6):
7040			if (msdn_giant_send_check(skb))
7041				return false;
7042
7043			opts[0] |= TD1_GTSENV6;
7044			break;
7045
7046		default:
7047			WARN_ON_ONCE(1);
7048			break;
7049		}
7050
7051		opts[0] |= transport_offset << GTTCPHO_SHIFT;
7052		opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
7053	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7054		u8 ip_protocol;
7055
7056		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
7057			return !(skb_checksum_help(skb) || eth_skb_pad(skb));
7058
7059		if (transport_offset > TCPHO_MAX) {
7060			netif_warn(tp, tx_err, tp->dev,
7061				   "Invalid transport offset 0x%x\n",
7062				   transport_offset);
7063			return false;
7064		}
7065
7066		switch (get_protocol(skb)) {
7067		case htons(ETH_P_IP):
7068			opts[1] |= TD1_IPv4_CS;
7069			ip_protocol = ip_hdr(skb)->protocol;
7070			break;
7071
7072		case htons(ETH_P_IPV6):
7073			opts[1] |= TD1_IPv6_CS;
7074			ip_protocol = ipv6_hdr(skb)->nexthdr;
7075			break;
7076
7077		default:
7078			ip_protocol = IPPROTO_RAW;
7079			break;
7080		}
7081
7082		if (ip_protocol == IPPROTO_TCP)
7083			opts[1] |= TD1_TCP_CS;
7084		else if (ip_protocol == IPPROTO_UDP)
7085			opts[1] |= TD1_UDP_CS;
7086		else
7087			WARN_ON_ONCE(1);
7088
7089		opts[1] |= transport_offset << TCPHO_SHIFT;
7090	} else {
7091		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
7092			return !eth_skb_pad(skb);
7093	}
7094
7095	return true;
7096}
7097
7098static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7099				      struct net_device *dev)
7100{
7101	struct rtl8169_private *tp = netdev_priv(dev);
7102	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
7103	struct TxDesc *txd = tp->TxDescArray + entry;
7104	void __iomem *ioaddr = tp->mmio_addr;
7105	struct device *d = &tp->pci_dev->dev;
7106	dma_addr_t mapping;
7107	u32 status, len;
7108	u32 opts[2];
7109	int frags;
7110
7111	if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7112		netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
7113		goto err_stop_0;
7114	}
7115
7116	if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
7117		goto err_stop_0;
7118
7119	opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
7120	opts[0] = DescOwn;
7121
7122	if (!tp->tso_csum(tp, skb, opts)) {
7123		r8169_csum_workaround(tp, skb);
7124		return NETDEV_TX_OK;
7125	}
7126
7127	len = skb_headlen(skb);
7128	mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
7129	if (unlikely(dma_mapping_error(d, mapping))) {
7130		if (net_ratelimit())
7131			netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
7132		goto err_dma_0;
7133	}
7134
7135	tp->tx_skb[entry].len = len;
7136	txd->addr = cpu_to_le64(mapping);
7137
7138	frags = rtl8169_xmit_frags(tp, skb, opts);
7139	if (frags < 0)
7140		goto err_dma_1;
7141	else if (frags)
7142		opts[0] |= FirstFrag;
7143	else {
7144		opts[0] |= FirstFrag | LastFrag;
7145		tp->tx_skb[entry].skb = skb;
7146	}
7147
7148	txd->opts2 = cpu_to_le32(opts[1]);
7149
7150	skb_tx_timestamp(skb);
7151
7152	/* Force memory writes to complete before releasing descriptor */
7153	dma_wmb();
7154
7155	/* Anti gcc 2.95.3 bugware (sic) */
7156	status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
7157	txd->opts1 = cpu_to_le32(status);
7158
7159	/* Force all memory writes to complete before notifying device */
 
7160	wmb();
7161
7162	tp->cur_tx += frags + 1;
7163
7164	RTL_W8(TxPoll, NPQ);
7165
7166	mmiowb();
7167
7168	if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7169		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7170		 * not miss a ring update when it notices a stopped queue.
7171		 */
7172		smp_wmb();
7173		netif_stop_queue(dev);
7174		/* Sync with rtl_tx:
7175		 * - publish queue status and cur_tx ring index (write barrier)
7176		 * - refresh dirty_tx ring index (read barrier).
7177		 * May the current thread have a pessimistic view of the ring
7178		 * status and forget to wake up queue, a racing rtl_tx thread
7179		 * can't.
7180		 */
7181		smp_mb();
7182		if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
7183			netif_wake_queue(dev);
7184	}
7185
7186	return NETDEV_TX_OK;
7187
7188err_dma_1:
7189	rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
7190err_dma_0:
7191	dev_kfree_skb_any(skb);
 
7192	dev->stats.tx_dropped++;
7193	return NETDEV_TX_OK;
7194
7195err_stop_0:
7196	netif_stop_queue(dev);
7197	dev->stats.tx_dropped++;
7198	return NETDEV_TX_BUSY;
7199}
7200
7201static void rtl8169_pcierr_interrupt(struct net_device *dev)
7202{
7203	struct rtl8169_private *tp = netdev_priv(dev);
7204	struct pci_dev *pdev = tp->pci_dev;
7205	u16 pci_status, pci_cmd;
7206
7207	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
7208	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
7209
7210	netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
7211		  pci_cmd, pci_status);
7212
7213	/*
7214	 * The recovery sequence below admits a very elaborated explanation:
7215	 * - it seems to work;
7216	 * - I did not see what else could be done;
7217	 * - it makes iop3xx happy.
7218	 *
7219	 * Feel free to adjust to your needs.
7220	 */
7221	if (pdev->broken_parity_status)
7222		pci_cmd &= ~PCI_COMMAND_PARITY;
7223	else
7224		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
7225
7226	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
7227
7228	pci_write_config_word(pdev, PCI_STATUS,
7229		pci_status & (PCI_STATUS_DETECTED_PARITY |
7230		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
7231		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
7232
7233	/* The infamous DAC f*ckup only happens at boot time */
7234	if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
7235		void __iomem *ioaddr = tp->mmio_addr;
7236
7237		netif_info(tp, intr, dev, "disabling PCI DAC\n");
7238		tp->cp_cmd &= ~PCIDAC;
7239		RTL_W16(CPlusCmd, tp->cp_cmd);
7240		dev->features &= ~NETIF_F_HIGHDMA;
7241	}
7242
7243	rtl8169_hw_reset(tp);
7244
7245	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7246}
7247
7248static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7249{
7250	unsigned int dirty_tx, tx_left;
7251
7252	dirty_tx = tp->dirty_tx;
7253	smp_rmb();
7254	tx_left = tp->cur_tx - dirty_tx;
7255
7256	while (tx_left > 0) {
7257		unsigned int entry = dirty_tx % NUM_TX_DESC;
7258		struct ring_info *tx_skb = tp->tx_skb + entry;
7259		u32 status;
7260
 
7261		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
7262		if (status & DescOwn)
7263			break;
7264
7265		/* This barrier is needed to keep us from reading
7266		 * any other fields out of the Tx descriptor until
7267		 * we know the status of DescOwn
7268		 */
7269		dma_rmb();
7270
7271		rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7272				     tp->TxDescArray + entry);
7273		if (status & LastFrag) {
7274			u64_stats_update_begin(&tp->tx_stats.syncp);
7275			tp->tx_stats.packets++;
7276			tp->tx_stats.bytes += tx_skb->skb->len;
7277			u64_stats_update_end(&tp->tx_stats.syncp);
7278			dev_kfree_skb_any(tx_skb->skb);
7279			tx_skb->skb = NULL;
7280		}
7281		dirty_tx++;
7282		tx_left--;
7283	}
7284
7285	if (tp->dirty_tx != dirty_tx) {
7286		tp->dirty_tx = dirty_tx;
7287		/* Sync with rtl8169_start_xmit:
7288		 * - publish dirty_tx ring index (write barrier)
7289		 * - refresh cur_tx ring index and queue status (read barrier)
7290		 * May the current thread miss the stopped queue condition,
7291		 * a racing xmit thread can only have a right view of the
7292		 * ring status.
7293		 */
7294		smp_mb();
7295		if (netif_queue_stopped(dev) &&
7296		    TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7297			netif_wake_queue(dev);
7298		}
7299		/*
7300		 * 8168 hack: TxPoll requests are lost when the Tx packets are
7301		 * too close. Let's kick an extra TxPoll request when a burst
7302		 * of start_xmit activity is detected (if it is not detected,
7303		 * it is slow enough). -- FR
7304		 */
7305		if (tp->cur_tx != dirty_tx) {
7306			void __iomem *ioaddr = tp->mmio_addr;
7307
7308			RTL_W8(TxPoll, NPQ);
7309		}
7310	}
7311}
7312
7313static inline int rtl8169_fragmented_frame(u32 status)
7314{
7315	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
7316}
7317
7318static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
7319{
7320	u32 status = opts1 & RxProtoMask;
7321
7322	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
7323	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
7324		skb->ip_summed = CHECKSUM_UNNECESSARY;
7325	else
7326		skb_checksum_none_assert(skb);
7327}
7328
7329static struct sk_buff *rtl8169_try_rx_copy(void *data,
7330					   struct rtl8169_private *tp,
7331					   int pkt_size,
7332					   dma_addr_t addr)
7333{
7334	struct sk_buff *skb;
7335	struct device *d = &tp->pci_dev->dev;
7336
7337	data = rtl8169_align(data);
7338	dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
7339	prefetch(data);
7340	skb = napi_alloc_skb(&tp->napi, pkt_size);
7341	if (skb)
7342		memcpy(skb->data, data, pkt_size);
7343	dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
7344
7345	return skb;
7346}
7347
7348static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
7349{
7350	unsigned int cur_rx, rx_left;
7351	unsigned int count;
7352
7353	cur_rx = tp->cur_rx;
7354
7355	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
7356		unsigned int entry = cur_rx % NUM_RX_DESC;
7357		struct RxDesc *desc = tp->RxDescArray + entry;
7358		u32 status;
7359
 
7360		status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
 
7361		if (status & DescOwn)
7362			break;
7363
7364		/* This barrier is needed to keep us from reading
7365		 * any other fields out of the Rx descriptor until
7366		 * we know the status of DescOwn
7367		 */
7368		dma_rmb();
7369
7370		if (unlikely(status & RxRES)) {
7371			netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
7372				   status);
7373			dev->stats.rx_errors++;
7374			if (status & (RxRWT | RxRUNT))
7375				dev->stats.rx_length_errors++;
7376			if (status & RxCRC)
7377				dev->stats.rx_crc_errors++;
7378			if (status & RxFOVF) {
7379				rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7380				dev->stats.rx_fifo_errors++;
7381			}
7382			if ((status & (RxRUNT | RxCRC)) &&
7383			    !(status & (RxRWT | RxFOVF)) &&
7384			    (dev->features & NETIF_F_RXALL))
7385				goto process_pkt;
7386		} else {
7387			struct sk_buff *skb;
7388			dma_addr_t addr;
7389			int pkt_size;
7390
7391process_pkt:
7392			addr = le64_to_cpu(desc->addr);
7393			if (likely(!(dev->features & NETIF_F_RXFCS)))
7394				pkt_size = (status & 0x00003fff) - 4;
7395			else
7396				pkt_size = status & 0x00003fff;
7397
7398			/*
7399			 * The driver does not support incoming fragmented
7400			 * frames. They are seen as a symptom of over-mtu
7401			 * sized frames.
7402			 */
7403			if (unlikely(rtl8169_fragmented_frame(status))) {
7404				dev->stats.rx_dropped++;
7405				dev->stats.rx_length_errors++;
7406				goto release_descriptor;
7407			}
7408
7409			skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
7410						  tp, pkt_size, addr);
7411			if (!skb) {
7412				dev->stats.rx_dropped++;
7413				goto release_descriptor;
7414			}
7415
7416			rtl8169_rx_csum(skb, status);
7417			skb_put(skb, pkt_size);
7418			skb->protocol = eth_type_trans(skb, dev);
7419
7420			rtl8169_rx_vlan_tag(desc, skb);
7421
7422			if (skb->pkt_type == PACKET_MULTICAST)
7423				dev->stats.multicast++;
7424
7425			napi_gro_receive(&tp->napi, skb);
7426
7427			u64_stats_update_begin(&tp->rx_stats.syncp);
7428			tp->rx_stats.packets++;
7429			tp->rx_stats.bytes += pkt_size;
7430			u64_stats_update_end(&tp->rx_stats.syncp);
7431		}
7432release_descriptor:
7433		desc->opts2 = 0;
 
7434		rtl8169_mark_to_asic(desc, rx_buf_sz);
7435	}
7436
7437	count = cur_rx - tp->cur_rx;
7438	tp->cur_rx = cur_rx;
7439
7440	return count;
7441}
7442
7443static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
7444{
7445	struct net_device *dev = dev_instance;
7446	struct rtl8169_private *tp = netdev_priv(dev);
7447	int handled = 0;
7448	u16 status;
7449
7450	status = rtl_get_events(tp);
7451	if (status && status != 0xffff) {
7452		status &= RTL_EVENT_NAPI | tp->event_slow;
7453		if (status) {
7454			handled = 1;
7455
7456			rtl_irq_disable(tp);
7457			napi_schedule(&tp->napi);
7458		}
7459	}
7460	return IRQ_RETVAL(handled);
7461}
7462
7463/*
7464 * Workqueue context.
7465 */
7466static void rtl_slow_event_work(struct rtl8169_private *tp)
7467{
7468	struct net_device *dev = tp->dev;
7469	u16 status;
7470
7471	status = rtl_get_events(tp) & tp->event_slow;
7472	rtl_ack_events(tp, status);
7473
7474	if (unlikely(status & RxFIFOOver)) {
7475		switch (tp->mac_version) {
7476		/* Work around for rx fifo overflow */
7477		case RTL_GIGA_MAC_VER_11:
7478			netif_stop_queue(dev);
7479			/* XXX - Hack alert. See rtl_task(). */
7480			set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
7481		default:
7482			break;
7483		}
7484	}
7485
7486	if (unlikely(status & SYSErr))
7487		rtl8169_pcierr_interrupt(dev);
7488
7489	if (status & LinkChg)
7490		__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
7491
7492	rtl_irq_enable_all(tp);
7493}
7494
7495static void rtl_task(struct work_struct *work)
7496{
7497	static const struct {
7498		int bitnr;
7499		void (*action)(struct rtl8169_private *);
7500	} rtl_work[] = {
7501		/* XXX - keep rtl_slow_event_work() as first element. */
7502		{ RTL_FLAG_TASK_SLOW_PENDING,	rtl_slow_event_work },
7503		{ RTL_FLAG_TASK_RESET_PENDING,	rtl_reset_work },
7504		{ RTL_FLAG_TASK_PHY_PENDING,	rtl_phy_work }
7505	};
7506	struct rtl8169_private *tp =
7507		container_of(work, struct rtl8169_private, wk.work);
7508	struct net_device *dev = tp->dev;
7509	int i;
7510
7511	rtl_lock_work(tp);
7512
7513	if (!netif_running(dev) ||
7514	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
7515		goto out_unlock;
7516
7517	for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
7518		bool pending;
7519
7520		pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
7521		if (pending)
7522			rtl_work[i].action(tp);
7523	}
7524
7525out_unlock:
7526	rtl_unlock_work(tp);
7527}
7528
7529static int rtl8169_poll(struct napi_struct *napi, int budget)
7530{
7531	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
7532	struct net_device *dev = tp->dev;
7533	u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
7534	int work_done= 0;
7535	u16 status;
7536
7537	status = rtl_get_events(tp);
7538	rtl_ack_events(tp, status & ~tp->event_slow);
7539
7540	if (status & RTL_EVENT_NAPI_RX)
7541		work_done = rtl_rx(dev, tp, (u32) budget);
7542
7543	if (status & RTL_EVENT_NAPI_TX)
7544		rtl_tx(dev, tp);
7545
7546	if (status & tp->event_slow) {
7547		enable_mask &= ~tp->event_slow;
7548
7549		rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
7550	}
7551
7552	if (work_done < budget) {
7553		napi_complete(napi);
7554
7555		rtl_irq_enable(tp, enable_mask);
7556		mmiowb();
7557	}
7558
7559	return work_done;
7560}
7561
7562static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
7563{
7564	struct rtl8169_private *tp = netdev_priv(dev);
7565
7566	if (tp->mac_version > RTL_GIGA_MAC_VER_06)
7567		return;
7568
7569	dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
7570	RTL_W32(RxMissed, 0);
7571}
7572
7573static void rtl8169_down(struct net_device *dev)
7574{
7575	struct rtl8169_private *tp = netdev_priv(dev);
7576	void __iomem *ioaddr = tp->mmio_addr;
7577
7578	del_timer_sync(&tp->timer);
7579
7580	napi_disable(&tp->napi);
7581	netif_stop_queue(dev);
7582
7583	rtl8169_hw_reset(tp);
7584	/*
7585	 * At this point device interrupts can not be enabled in any function,
7586	 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
7587	 * and napi is disabled (rtl8169_poll).
7588	 */
7589	rtl8169_rx_missed(dev, ioaddr);
7590
7591	/* Give a racing hard_start_xmit a few cycles to complete. */
7592	synchronize_sched();
7593
7594	rtl8169_tx_clear(tp);
7595
7596	rtl8169_rx_clear(tp);
7597
7598	rtl_pll_power_down(tp);
7599}
7600
7601static int rtl8169_close(struct net_device *dev)
7602{
7603	struct rtl8169_private *tp = netdev_priv(dev);
7604	struct pci_dev *pdev = tp->pci_dev;
7605
7606	pm_runtime_get_sync(&pdev->dev);
7607
7608	/* Update counters before going down */
7609	rtl8169_update_counters(dev);
7610
7611	rtl_lock_work(tp);
7612	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7613
7614	rtl8169_down(dev);
7615	rtl_unlock_work(tp);
7616
7617	cancel_work_sync(&tp->wk.work);
7618
7619	free_irq(pdev->irq, dev);
7620
7621	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7622			  tp->RxPhyAddr);
7623	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
7624			  tp->TxPhyAddr);
7625	tp->TxDescArray = NULL;
7626	tp->RxDescArray = NULL;
7627
7628	pm_runtime_put_sync(&pdev->dev);
7629
7630	return 0;
7631}
7632
7633#ifdef CONFIG_NET_POLL_CONTROLLER
7634static void rtl8169_netpoll(struct net_device *dev)
7635{
7636	struct rtl8169_private *tp = netdev_priv(dev);
7637
7638	rtl8169_interrupt(tp->pci_dev->irq, dev);
7639}
7640#endif
7641
7642static int rtl_open(struct net_device *dev)
7643{
7644	struct rtl8169_private *tp = netdev_priv(dev);
7645	void __iomem *ioaddr = tp->mmio_addr;
7646	struct pci_dev *pdev = tp->pci_dev;
7647	int retval = -ENOMEM;
7648
7649	pm_runtime_get_sync(&pdev->dev);
7650
7651	/*
7652	 * Rx and Tx descriptors needs 256 bytes alignment.
7653	 * dma_alloc_coherent provides more.
7654	 */
7655	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
7656					     &tp->TxPhyAddr, GFP_KERNEL);
7657	if (!tp->TxDescArray)
7658		goto err_pm_runtime_put;
7659
7660	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
7661					     &tp->RxPhyAddr, GFP_KERNEL);
7662	if (!tp->RxDescArray)
7663		goto err_free_tx_0;
7664
7665	retval = rtl8169_init_ring(dev);
7666	if (retval < 0)
7667		goto err_free_rx_1;
7668
7669	INIT_WORK(&tp->wk.work, rtl_task);
7670
7671	smp_mb();
7672
7673	rtl_request_firmware(tp);
7674
7675	retval = request_irq(pdev->irq, rtl8169_interrupt,
7676			     (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
7677			     dev->name, dev);
7678	if (retval < 0)
7679		goto err_release_fw_2;
7680
7681	rtl_lock_work(tp);
7682
7683	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7684
7685	napi_enable(&tp->napi);
7686
7687	rtl8169_init_phy(dev, tp);
7688
7689	__rtl8169_set_features(dev, dev->features);
7690
7691	rtl_pll_power_up(tp);
7692
7693	rtl_hw_start(dev);
7694
7695	if (!rtl8169_init_counter_offsets(dev))
7696		netif_warn(tp, hw, dev, "counter reset/update failed\n");
7697
7698	netif_start_queue(dev);
7699
7700	rtl_unlock_work(tp);
7701
7702	tp->saved_wolopts = 0;
7703	pm_runtime_put_noidle(&pdev->dev);
7704
7705	rtl8169_check_link_status(dev, tp, ioaddr);
7706out:
7707	return retval;
7708
7709err_release_fw_2:
7710	rtl_release_firmware(tp);
7711	rtl8169_rx_clear(tp);
7712err_free_rx_1:
7713	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
7714			  tp->RxPhyAddr);
7715	tp->RxDescArray = NULL;
7716err_free_tx_0:
7717	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
7718			  tp->TxPhyAddr);
7719	tp->TxDescArray = NULL;
7720err_pm_runtime_put:
7721	pm_runtime_put_noidle(&pdev->dev);
7722	goto out;
7723}
7724
7725static struct rtnl_link_stats64 *
7726rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7727{
7728	struct rtl8169_private *tp = netdev_priv(dev);
7729	void __iomem *ioaddr = tp->mmio_addr;
7730	struct pci_dev *pdev = tp->pci_dev;
7731	struct rtl8169_counters *counters = tp->counters;
7732	unsigned int start;
7733
7734	pm_runtime_get_noresume(&pdev->dev);
7735
7736	if (netif_running(dev) && pm_runtime_active(&pdev->dev))
7737		rtl8169_rx_missed(dev, ioaddr);
7738
7739	do {
7740		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
7741		stats->rx_packets = tp->rx_stats.packets;
7742		stats->rx_bytes	= tp->rx_stats.bytes;
7743	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
7744
 
7745	do {
7746		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
7747		stats->tx_packets = tp->tx_stats.packets;
7748		stats->tx_bytes	= tp->tx_stats.bytes;
7749	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
7750
7751	stats->rx_dropped	= dev->stats.rx_dropped;
7752	stats->tx_dropped	= dev->stats.tx_dropped;
7753	stats->rx_length_errors = dev->stats.rx_length_errors;
7754	stats->rx_errors	= dev->stats.rx_errors;
7755	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
7756	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
7757	stats->rx_missed_errors = dev->stats.rx_missed_errors;
7758	stats->multicast	= dev->stats.multicast;
7759
7760	/*
7761	 * Fetch additonal counter values missing in stats collected by driver
7762	 * from tally counters.
7763	 */
7764	if (pm_runtime_active(&pdev->dev))
7765		rtl8169_update_counters(dev);
7766
7767	/*
7768	 * Subtract values fetched during initalization.
7769	 * See rtl8169_init_counter_offsets for a description why we do that.
7770	 */
7771	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
7772		le64_to_cpu(tp->tc_offset.tx_errors);
7773	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
7774		le32_to_cpu(tp->tc_offset.tx_multi_collision);
7775	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
7776		le16_to_cpu(tp->tc_offset.tx_aborted);
7777
7778	pm_runtime_put_noidle(&pdev->dev);
7779
7780	return stats;
7781}
7782
7783static void rtl8169_net_suspend(struct net_device *dev)
7784{
7785	struct rtl8169_private *tp = netdev_priv(dev);
7786
7787	if (!netif_running(dev))
7788		return;
7789
7790	netif_device_detach(dev);
7791	netif_stop_queue(dev);
7792
7793	rtl_lock_work(tp);
7794	napi_disable(&tp->napi);
7795	clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7796	rtl_unlock_work(tp);
7797
7798	rtl_pll_power_down(tp);
7799}
7800
7801#ifdef CONFIG_PM
7802
7803static int rtl8169_suspend(struct device *device)
7804{
7805	struct pci_dev *pdev = to_pci_dev(device);
7806	struct net_device *dev = pci_get_drvdata(pdev);
7807
7808	rtl8169_net_suspend(dev);
7809
7810	return 0;
7811}
7812
7813static void __rtl8169_resume(struct net_device *dev)
7814{
7815	struct rtl8169_private *tp = netdev_priv(dev);
7816
7817	netif_device_attach(dev);
7818
7819	rtl_pll_power_up(tp);
7820
7821	rtl_lock_work(tp);
7822	napi_enable(&tp->napi);
7823	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
7824	rtl_unlock_work(tp);
7825
7826	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
7827}
7828
7829static int rtl8169_resume(struct device *device)
7830{
7831	struct pci_dev *pdev = to_pci_dev(device);
7832	struct net_device *dev = pci_get_drvdata(pdev);
7833	struct rtl8169_private *tp = netdev_priv(dev);
7834
7835	rtl8169_init_phy(dev, tp);
7836
7837	if (netif_running(dev))
7838		__rtl8169_resume(dev);
7839
7840	return 0;
7841}
7842
7843static int rtl8169_runtime_suspend(struct device *device)
7844{
7845	struct pci_dev *pdev = to_pci_dev(device);
7846	struct net_device *dev = pci_get_drvdata(pdev);
7847	struct rtl8169_private *tp = netdev_priv(dev);
7848
7849	if (!tp->TxDescArray)
7850		return 0;
7851
7852	rtl_lock_work(tp);
7853	tp->saved_wolopts = __rtl8169_get_wol(tp);
7854	__rtl8169_set_wol(tp, WAKE_ANY);
7855	rtl_unlock_work(tp);
7856
7857	rtl8169_net_suspend(dev);
7858
7859	/* Update counters before going runtime suspend */
7860	rtl8169_rx_missed(dev, tp->mmio_addr);
7861	rtl8169_update_counters(dev);
7862
7863	return 0;
7864}
7865
7866static int rtl8169_runtime_resume(struct device *device)
7867{
7868	struct pci_dev *pdev = to_pci_dev(device);
7869	struct net_device *dev = pci_get_drvdata(pdev);
7870	struct rtl8169_private *tp = netdev_priv(dev);
7871
7872	if (!tp->TxDescArray)
7873		return 0;
7874
7875	rtl_lock_work(tp);
7876	__rtl8169_set_wol(tp, tp->saved_wolopts);
7877	tp->saved_wolopts = 0;
7878	rtl_unlock_work(tp);
7879
7880	rtl8169_init_phy(dev, tp);
7881
7882	__rtl8169_resume(dev);
7883
7884	return 0;
7885}
7886
7887static int rtl8169_runtime_idle(struct device *device)
7888{
7889	struct pci_dev *pdev = to_pci_dev(device);
7890	struct net_device *dev = pci_get_drvdata(pdev);
7891	struct rtl8169_private *tp = netdev_priv(dev);
7892
7893	return tp->TxDescArray ? -EBUSY : 0;
7894}
7895
7896static const struct dev_pm_ops rtl8169_pm_ops = {
7897	.suspend		= rtl8169_suspend,
7898	.resume			= rtl8169_resume,
7899	.freeze			= rtl8169_suspend,
7900	.thaw			= rtl8169_resume,
7901	.poweroff		= rtl8169_suspend,
7902	.restore		= rtl8169_resume,
7903	.runtime_suspend	= rtl8169_runtime_suspend,
7904	.runtime_resume		= rtl8169_runtime_resume,
7905	.runtime_idle		= rtl8169_runtime_idle,
7906};
7907
7908#define RTL8169_PM_OPS	(&rtl8169_pm_ops)
7909
7910#else /* !CONFIG_PM */
7911
7912#define RTL8169_PM_OPS	NULL
7913
7914#endif /* !CONFIG_PM */
7915
7916static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
7917{
7918	void __iomem *ioaddr = tp->mmio_addr;
7919
7920	/* WoL fails with 8168b when the receiver is disabled. */
7921	switch (tp->mac_version) {
7922	case RTL_GIGA_MAC_VER_11:
7923	case RTL_GIGA_MAC_VER_12:
7924	case RTL_GIGA_MAC_VER_17:
7925		pci_clear_master(tp->pci_dev);
7926
7927		RTL_W8(ChipCmd, CmdRxEnb);
7928		/* PCI commit */
7929		RTL_R8(ChipCmd);
7930		break;
7931	default:
7932		break;
7933	}
7934}
7935
7936static void rtl_shutdown(struct pci_dev *pdev)
7937{
7938	struct net_device *dev = pci_get_drvdata(pdev);
7939	struct rtl8169_private *tp = netdev_priv(dev);
7940	struct device *d = &pdev->dev;
7941
7942	pm_runtime_get_sync(d);
7943
7944	rtl8169_net_suspend(dev);
7945
7946	/* Restore original MAC address */
7947	rtl_rar_set(tp, dev->perm_addr);
7948
7949	rtl8169_hw_reset(tp);
7950
7951	if (system_state == SYSTEM_POWER_OFF) {
7952		if (__rtl8169_get_wol(tp) & WAKE_ANY) {
7953			rtl_wol_suspend_quirk(tp);
7954			rtl_wol_shutdown_quirk(tp);
7955		}
7956
7957		pci_wake_from_d3(pdev, true);
7958		pci_set_power_state(pdev, PCI_D3hot);
7959	}
7960
7961	pm_runtime_put_noidle(d);
7962}
7963
7964static void rtl_remove_one(struct pci_dev *pdev)
7965{
7966	struct net_device *dev = pci_get_drvdata(pdev);
7967	struct rtl8169_private *tp = netdev_priv(dev);
7968
7969	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
7970	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
7971	     tp->mac_version == RTL_GIGA_MAC_VER_31 ||
7972	     tp->mac_version == RTL_GIGA_MAC_VER_49 ||
7973	     tp->mac_version == RTL_GIGA_MAC_VER_50 ||
7974	     tp->mac_version == RTL_GIGA_MAC_VER_51) &&
7975	    r8168_check_dash(tp)) {
7976		rtl8168_driver_stop(tp);
7977	}
7978
7979	netif_napi_del(&tp->napi);
7980
7981	unregister_netdev(dev);
7982
7983	dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
7984			  tp->counters, tp->counters_phys_addr);
7985
7986	rtl_release_firmware(tp);
7987
7988	if (pci_dev_run_wake(pdev))
7989		pm_runtime_get_noresume(&pdev->dev);
7990
7991	/* restore original MAC address */
7992	rtl_rar_set(tp, dev->perm_addr);
7993
7994	rtl_disable_msi(pdev, tp);
7995	rtl8169_release_board(pdev, dev, tp->mmio_addr);
7996}
7997
7998static const struct net_device_ops rtl_netdev_ops = {
7999	.ndo_open		= rtl_open,
8000	.ndo_stop		= rtl8169_close,
8001	.ndo_get_stats64	= rtl8169_get_stats64,
8002	.ndo_start_xmit		= rtl8169_start_xmit,
8003	.ndo_tx_timeout		= rtl8169_tx_timeout,
8004	.ndo_validate_addr	= eth_validate_addr,
8005	.ndo_change_mtu		= rtl8169_change_mtu,
8006	.ndo_fix_features	= rtl8169_fix_features,
8007	.ndo_set_features	= rtl8169_set_features,
8008	.ndo_set_mac_address	= rtl_set_mac_address,
8009	.ndo_do_ioctl		= rtl8169_ioctl,
8010	.ndo_set_rx_mode	= rtl_set_rx_mode,
8011#ifdef CONFIG_NET_POLL_CONTROLLER
8012	.ndo_poll_controller	= rtl8169_netpoll,
8013#endif
8014
8015};
8016
8017static const struct rtl_cfg_info {
8018	void (*hw_start)(struct net_device *);
8019	unsigned int region;
8020	unsigned int align;
8021	u16 event_slow;
8022	unsigned features;
8023	u8 default_ver;
8024} rtl_cfg_infos [] = {
8025	[RTL_CFG_0] = {
8026		.hw_start	= rtl_hw_start_8169,
8027		.region		= 1,
8028		.align		= 0,
8029		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver,
8030		.features	= RTL_FEATURE_GMII,
8031		.default_ver	= RTL_GIGA_MAC_VER_01,
8032	},
8033	[RTL_CFG_1] = {
8034		.hw_start	= rtl_hw_start_8168,
8035		.region		= 2,
8036		.align		= 8,
8037		.event_slow	= SYSErr | LinkChg | RxOverflow,
8038		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
8039		.default_ver	= RTL_GIGA_MAC_VER_11,
8040	},
8041	[RTL_CFG_2] = {
8042		.hw_start	= rtl_hw_start_8101,
8043		.region		= 2,
8044		.align		= 8,
8045		.event_slow	= SYSErr | LinkChg | RxOverflow | RxFIFOOver |
8046				  PCSTimeout,
8047		.features	= RTL_FEATURE_MSI,
8048		.default_ver	= RTL_GIGA_MAC_VER_13,
8049	}
8050};
8051
8052/* Cfg9346_Unlock assumed. */
8053static unsigned rtl_try_msi(struct rtl8169_private *tp,
8054			    const struct rtl_cfg_info *cfg)
8055{
8056	void __iomem *ioaddr = tp->mmio_addr;
8057	unsigned msi = 0;
8058	u8 cfg2;
8059
8060	cfg2 = RTL_R8(Config2) & ~MSIEnable;
8061	if (cfg->features & RTL_FEATURE_MSI) {
8062		if (pci_enable_msi(tp->pci_dev)) {
8063			netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
8064		} else {
8065			cfg2 |= MSIEnable;
8066			msi = RTL_FEATURE_MSI;
8067		}
8068	}
8069	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
8070		RTL_W8(Config2, cfg2);
8071	return msi;
8072}
8073
8074DECLARE_RTL_COND(rtl_link_list_ready_cond)
8075{
8076	void __iomem *ioaddr = tp->mmio_addr;
8077
8078	return RTL_R8(MCU) & LINK_LIST_RDY;
8079}
8080
8081DECLARE_RTL_COND(rtl_rxtx_empty_cond)
8082{
8083	void __iomem *ioaddr = tp->mmio_addr;
8084
8085	return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
8086}
8087
8088static void rtl_hw_init_8168g(struct rtl8169_private *tp)
8089{
8090	void __iomem *ioaddr = tp->mmio_addr;
8091	u32 data;
8092
8093	tp->ocp_base = OCP_STD_PHY_BASE;
8094
8095	RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
8096
8097	if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
8098		return;
8099
8100	if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
8101		return;
8102
8103	RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
8104	msleep(1);
8105	RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
8106
8107	data = r8168_mac_ocp_read(tp, 0xe8de);
8108	data &= ~(1 << 14);
8109	r8168_mac_ocp_write(tp, 0xe8de, data);
8110
8111	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
8112		return;
8113
8114	data = r8168_mac_ocp_read(tp, 0xe8de);
8115	data |= (1 << 15);
8116	r8168_mac_ocp_write(tp, 0xe8de, data);
8117
8118	if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
8119		return;
8120}
8121
8122static void rtl_hw_init_8168ep(struct rtl8169_private *tp)
8123{
8124	rtl8168ep_stop_cmac(tp);
8125	rtl_hw_init_8168g(tp);
8126}
8127
8128static void rtl_hw_initialize(struct rtl8169_private *tp)
8129{
8130	switch (tp->mac_version) {
8131	case RTL_GIGA_MAC_VER_40:
8132	case RTL_GIGA_MAC_VER_41:
8133	case RTL_GIGA_MAC_VER_42:
8134	case RTL_GIGA_MAC_VER_43:
8135	case RTL_GIGA_MAC_VER_44:
8136	case RTL_GIGA_MAC_VER_45:
8137	case RTL_GIGA_MAC_VER_46:
8138	case RTL_GIGA_MAC_VER_47:
8139	case RTL_GIGA_MAC_VER_48:
8140		rtl_hw_init_8168g(tp);
8141		break;
8142	case RTL_GIGA_MAC_VER_49:
8143	case RTL_GIGA_MAC_VER_50:
8144	case RTL_GIGA_MAC_VER_51:
8145		rtl_hw_init_8168ep(tp);
8146		break;
8147	default:
8148		break;
8149	}
8150}
8151
8152static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
8153{
8154	const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
8155	const unsigned int region = cfg->region;
8156	struct rtl8169_private *tp;
8157	struct mii_if_info *mii;
8158	struct net_device *dev;
8159	void __iomem *ioaddr;
8160	int chipset, i;
8161	int rc;
8162
8163	if (netif_msg_drv(&debug)) {
8164		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
8165		       MODULENAME, RTL8169_VERSION);
8166	}
8167
8168	dev = alloc_etherdev(sizeof (*tp));
8169	if (!dev) {
8170		rc = -ENOMEM;
8171		goto out;
8172	}
8173
8174	SET_NETDEV_DEV(dev, &pdev->dev);
8175	dev->netdev_ops = &rtl_netdev_ops;
8176	tp = netdev_priv(dev);
8177	tp->dev = dev;
8178	tp->pci_dev = pdev;
8179	tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
8180
8181	mii = &tp->mii;
8182	mii->dev = dev;
8183	mii->mdio_read = rtl_mdio_read;
8184	mii->mdio_write = rtl_mdio_write;
8185	mii->phy_id_mask = 0x1f;
8186	mii->reg_num_mask = 0x1f;
8187	mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
8188
8189	/* disable ASPM completely as that cause random device stop working
8190	 * problems as well as full system hangs for some PCIe devices users */
8191	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
8192				     PCIE_LINK_STATE_CLKPM);
8193
8194	/* enable device (incl. PCI PM wakeup and hotplug setup) */
8195	rc = pci_enable_device(pdev);
8196	if (rc < 0) {
8197		netif_err(tp, probe, dev, "enable failure\n");
8198		goto err_out_free_dev_1;
8199	}
8200
8201	if (pci_set_mwi(pdev) < 0)
8202		netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
8203
8204	/* make sure PCI base addr 1 is MMIO */
8205	if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
8206		netif_err(tp, probe, dev,
8207			  "region #%d not an MMIO resource, aborting\n",
8208			  region);
8209		rc = -ENODEV;
8210		goto err_out_mwi_2;
8211	}
8212
8213	/* check for weird/broken PCI region reporting */
8214	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
8215		netif_err(tp, probe, dev,
8216			  "Invalid PCI region size(s), aborting\n");
8217		rc = -ENODEV;
8218		goto err_out_mwi_2;
8219	}
8220
8221	rc = pci_request_regions(pdev, MODULENAME);
8222	if (rc < 0) {
8223		netif_err(tp, probe, dev, "could not request regions\n");
8224		goto err_out_mwi_2;
8225	}
8226
8227	tp->cp_cmd = 0;
8228
8229	if ((sizeof(dma_addr_t) > 4) &&
8230	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
8231		tp->cp_cmd |= PCIDAC;
8232		dev->features |= NETIF_F_HIGHDMA;
8233	} else {
8234		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8235		if (rc < 0) {
8236			netif_err(tp, probe, dev, "DMA configuration failed\n");
8237			goto err_out_free_res_3;
8238		}
8239	}
8240
8241	/* ioremap MMIO region */
8242	ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
8243	if (!ioaddr) {
8244		netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
8245		rc = -EIO;
8246		goto err_out_free_res_3;
8247	}
8248	tp->mmio_addr = ioaddr;
8249
8250	if (!pci_is_pcie(pdev))
8251		netif_info(tp, probe, dev, "not PCI Express\n");
8252
8253	/* Identify chip attached to board */
8254	rtl8169_get_mac_version(tp, dev, cfg->default_ver);
8255
8256	rtl_init_rxcfg(tp);
8257
8258	rtl_irq_disable(tp);
8259
8260	rtl_hw_initialize(tp);
8261
8262	rtl_hw_reset(tp);
8263
8264	rtl_ack_events(tp, 0xffff);
8265
8266	pci_set_master(pdev);
8267
 
 
 
 
 
 
 
8268	rtl_init_mdio_ops(tp);
8269	rtl_init_pll_power_ops(tp);
8270	rtl_init_jumbo_ops(tp);
8271	rtl_init_csi_ops(tp);
8272
8273	rtl8169_print_mac_version(tp);
8274
8275	chipset = tp->mac_version;
8276	tp->txd_version = rtl_chip_infos[chipset].txd_version;
8277
8278	RTL_W8(Cfg9346, Cfg9346_Unlock);
8279	RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
8280	RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
8281	switch (tp->mac_version) {
8282	case RTL_GIGA_MAC_VER_34:
8283	case RTL_GIGA_MAC_VER_35:
8284	case RTL_GIGA_MAC_VER_36:
8285	case RTL_GIGA_MAC_VER_37:
8286	case RTL_GIGA_MAC_VER_38:
8287	case RTL_GIGA_MAC_VER_40:
8288	case RTL_GIGA_MAC_VER_41:
8289	case RTL_GIGA_MAC_VER_42:
8290	case RTL_GIGA_MAC_VER_43:
8291	case RTL_GIGA_MAC_VER_44:
8292	case RTL_GIGA_MAC_VER_45:
8293	case RTL_GIGA_MAC_VER_46:
8294	case RTL_GIGA_MAC_VER_47:
8295	case RTL_GIGA_MAC_VER_48:
8296	case RTL_GIGA_MAC_VER_49:
8297	case RTL_GIGA_MAC_VER_50:
8298	case RTL_GIGA_MAC_VER_51:
8299		if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
8300			tp->features |= RTL_FEATURE_WOL;
8301		if ((RTL_R8(Config3) & LinkUp) != 0)
8302			tp->features |= RTL_FEATURE_WOL;
8303		break;
8304	default:
8305		if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
8306			tp->features |= RTL_FEATURE_WOL;
8307		break;
8308	}
8309	if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
8310		tp->features |= RTL_FEATURE_WOL;
8311	tp->features |= rtl_try_msi(tp, cfg);
8312	RTL_W8(Cfg9346, Cfg9346_Lock);
8313
8314	if (rtl_tbi_enabled(tp)) {
8315		tp->set_speed = rtl8169_set_speed_tbi;
8316		tp->get_settings = rtl8169_gset_tbi;
8317		tp->phy_reset_enable = rtl8169_tbi_reset_enable;
8318		tp->phy_reset_pending = rtl8169_tbi_reset_pending;
8319		tp->link_ok = rtl8169_tbi_link_ok;
8320		tp->do_ioctl = rtl_tbi_ioctl;
8321	} else {
8322		tp->set_speed = rtl8169_set_speed_xmii;
8323		tp->get_settings = rtl8169_gset_xmii;
8324		tp->phy_reset_enable = rtl8169_xmii_reset_enable;
8325		tp->phy_reset_pending = rtl8169_xmii_reset_pending;
8326		tp->link_ok = rtl8169_xmii_link_ok;
8327		tp->do_ioctl = rtl_xmii_ioctl;
8328	}
8329
8330	mutex_init(&tp->wk.mutex);
8331	u64_stats_init(&tp->rx_stats.syncp);
8332	u64_stats_init(&tp->tx_stats.syncp);
8333
8334	/* Get MAC address */
8335	if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
8336	    tp->mac_version == RTL_GIGA_MAC_VER_36 ||
8337	    tp->mac_version == RTL_GIGA_MAC_VER_37 ||
8338	    tp->mac_version == RTL_GIGA_MAC_VER_38 ||
8339	    tp->mac_version == RTL_GIGA_MAC_VER_40 ||
8340	    tp->mac_version == RTL_GIGA_MAC_VER_41 ||
8341	    tp->mac_version == RTL_GIGA_MAC_VER_42 ||
8342	    tp->mac_version == RTL_GIGA_MAC_VER_43 ||
8343	    tp->mac_version == RTL_GIGA_MAC_VER_44 ||
8344	    tp->mac_version == RTL_GIGA_MAC_VER_45 ||
8345	    tp->mac_version == RTL_GIGA_MAC_VER_46 ||
8346	    tp->mac_version == RTL_GIGA_MAC_VER_47 ||
8347	    tp->mac_version == RTL_GIGA_MAC_VER_48 ||
8348	    tp->mac_version == RTL_GIGA_MAC_VER_49 ||
8349	    tp->mac_version == RTL_GIGA_MAC_VER_50 ||
8350	    tp->mac_version == RTL_GIGA_MAC_VER_51) {
8351		u16 mac_addr[3];
8352
8353		*(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
8354		*(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
8355
8356		if (is_valid_ether_addr((u8 *)mac_addr))
8357			rtl_rar_set(tp, (u8 *)mac_addr);
8358	}
8359	for (i = 0; i < ETH_ALEN; i++)
8360		dev->dev_addr[i] = RTL_R8(MAC0 + i);
8361
8362	dev->ethtool_ops = &rtl8169_ethtool_ops;
8363	dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
8364
8365	netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
8366
8367	/* don't enable SG, IP_CSUM and TSO by default - it might not work
8368	 * properly for all devices */
8369	dev->features |= NETIF_F_RXCSUM |
8370		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
8371
8372	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
8373		NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
8374		NETIF_F_HW_VLAN_CTAG_RX;
8375	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
8376		NETIF_F_HIGHDMA;
8377
8378	tp->cp_cmd |= RxChkSum | RxVlan;
8379
8380	/*
8381	 * Pretend we are using VLANs; This bypasses a nasty bug where
8382	 * Interrupts stop flowing on high load on 8110SCd controllers.
8383	 */
8384	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
8385		/* Disallow toggling */
8386		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
8387
8388	if (tp->txd_version == RTL_TD_0)
8389		tp->tso_csum = rtl8169_tso_csum_v1;
8390	else if (tp->txd_version == RTL_TD_1) {
8391		tp->tso_csum = rtl8169_tso_csum_v2;
8392		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8393	} else
8394		WARN_ON_ONCE(1);
8395
8396	dev->hw_features |= NETIF_F_RXALL;
8397	dev->hw_features |= NETIF_F_RXFCS;
8398
8399	tp->hw_start = cfg->hw_start;
8400	tp->event_slow = cfg->event_slow;
8401
8402	tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
8403		~(RxBOVF | RxFOVF) : ~0;
8404
8405	init_timer(&tp->timer);
8406	tp->timer.data = (unsigned long) dev;
8407	tp->timer.function = rtl8169_phy_timer;
8408
8409	tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
8410
8411	tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
8412					   &tp->counters_phys_addr, GFP_KERNEL);
8413	if (!tp->counters) {
8414		rc = -ENOMEM;
8415		goto err_out_msi_4;
8416	}
8417
8418	rc = register_netdev(dev);
8419	if (rc < 0)
8420		goto err_out_cnt_5;
8421
8422	pci_set_drvdata(pdev, dev);
8423
8424	netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
8425		   rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
8426		   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
8427	if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
8428		netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
8429			   "tx checksumming: %s]\n",
8430			   rtl_chip_infos[chipset].jumbo_max,
8431			   rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
8432	}
8433
8434	if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
8435	     tp->mac_version == RTL_GIGA_MAC_VER_28 ||
8436	     tp->mac_version == RTL_GIGA_MAC_VER_31 ||
8437	     tp->mac_version == RTL_GIGA_MAC_VER_49 ||
8438	     tp->mac_version == RTL_GIGA_MAC_VER_50 ||
8439	     tp->mac_version == RTL_GIGA_MAC_VER_51) &&
8440	    r8168_check_dash(tp)) {
8441		rtl8168_driver_start(tp);
8442	}
8443
8444	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
8445
8446	if (pci_dev_run_wake(pdev))
8447		pm_runtime_put_noidle(&pdev->dev);
8448
8449	netif_carrier_off(dev);
8450
8451out:
8452	return rc;
8453
8454err_out_cnt_5:
8455	dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
8456			  tp->counters_phys_addr);
8457err_out_msi_4:
8458	netif_napi_del(&tp->napi);
8459	rtl_disable_msi(pdev, tp);
8460	iounmap(ioaddr);
8461err_out_free_res_3:
8462	pci_release_regions(pdev);
8463err_out_mwi_2:
8464	pci_clear_mwi(pdev);
8465	pci_disable_device(pdev);
8466err_out_free_dev_1:
8467	free_netdev(dev);
8468	goto out;
8469}
8470
8471static struct pci_driver rtl8169_pci_driver = {
8472	.name		= MODULENAME,
8473	.id_table	= rtl8169_pci_tbl,
8474	.probe		= rtl_init_one,
8475	.remove		= rtl_remove_one,
8476	.shutdown	= rtl_shutdown,
8477	.driver.pm	= RTL8169_PM_OPS,
8478};
8479
8480module_pci_driver(rtl8169_pci_driver);