Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
   4 *
   5 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
   6 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
   7 * Copyright (c) a lot of people too. Please respect their work.
   8 *
   9 * See MAINTAINERS file for support contact information.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/netdevice.h>
  15#include <linux/etherdevice.h>
  16#include <linux/clk.h>
  17#include <linux/delay.h>
  18#include <linux/ethtool.h>
  19#include <linux/phy.h>
  20#include <linux/if_vlan.h>
  21#include <linux/in.h>
  22#include <linux/io.h>
  23#include <linux/ip.h>
  24#include <linux/tcp.h>
  25#include <linux/interrupt.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/bitfield.h>
  29#include <linux/prefetch.h>
  30#include <linux/ipv6.h>
  31#include <linux/unaligned.h>
  32#include <net/ip6_checksum.h>
  33#include <net/netdev_queues.h>
  34
  35#include "r8169.h"
  36#include "r8169_firmware.h"
  37
 
 
  38#define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
  39#define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
  40#define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
  41#define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
  42#define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
  43#define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
  44#define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
  45#define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
  46#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
  47#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
  48#define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
  49#define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
  50#define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
  51#define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
  52#define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
 
  53#define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
  54#define FIRMWARE_8168FP_3	"rtl_nic/rtl8168fp-3.fw"
 
  55#define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
  56#define FIRMWARE_8125A_3	"rtl_nic/rtl8125a-3.fw"
  57#define FIRMWARE_8125B_2	"rtl_nic/rtl8125b-2.fw"
  58#define FIRMWARE_8125D_1	"rtl_nic/rtl8125d-1.fw"
  59#define FIRMWARE_8126A_2	"rtl_nic/rtl8126a-2.fw"
  60#define FIRMWARE_8126A_3	"rtl_nic/rtl8126a-3.fw"
 
  61
  62#define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
  63#define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
  64
  65#define R8169_REGS_SIZE		256
  66#define R8169_RX_BUF_SIZE	(SZ_16K - 1)
  67#define NUM_TX_DESC	256	/* Number of Tx descriptor registers */
  68#define NUM_RX_DESC	256	/* Number of Rx descriptor registers */
  69#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
  70#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
  71#define R8169_TX_STOP_THRS	(MAX_SKB_FRAGS + 1)
  72#define R8169_TX_START_THRS	(2 * R8169_TX_STOP_THRS)
  73
  74#define OCP_STD_PHY_BASE	0xa400
  75
  76#define RTL_CFG_NO_GBIT	1
  77
  78/* write/read MMIO register */
  79#define RTL_W8(tp, reg, val8)	writeb((val8), tp->mmio_addr + (reg))
  80#define RTL_W16(tp, reg, val16)	writew((val16), tp->mmio_addr + (reg))
  81#define RTL_W32(tp, reg, val32)	writel((val32), tp->mmio_addr + (reg))
  82#define RTL_R8(tp, reg)		readb(tp->mmio_addr + (reg))
  83#define RTL_R16(tp, reg)		readw(tp->mmio_addr + (reg))
  84#define RTL_R32(tp, reg)		readl(tp->mmio_addr + (reg))
  85
  86#define JUMBO_4K	(4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  87#define JUMBO_6K	(6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  88#define JUMBO_7K	(7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  89#define JUMBO_9K	(9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  90
  91static const struct {
  92	const char *name;
  93	const char *fw_name;
  94} rtl_chip_infos[] = {
  95	/* PCI devices. */
  96	[RTL_GIGA_MAC_VER_02] = {"RTL8169s"				},
  97	[RTL_GIGA_MAC_VER_03] = {"RTL8110s"				},
  98	[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb"			},
  99	[RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc"			},
 100	[RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc"			},
 101	/* PCI-E devices. */
 102	[RTL_GIGA_MAC_VER_07] = {"RTL8102e"				},
 103	[RTL_GIGA_MAC_VER_08] = {"RTL8102e"				},
 104	[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e"			},
 105	[RTL_GIGA_MAC_VER_10] = {"RTL8101e/RTL8100e"			},
 106	[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b"			},
 
 
 107	[RTL_GIGA_MAC_VER_14] = {"RTL8401"				},
 
 108	[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b"			},
 109	[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp"			},
 110	[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c"			},
 111	[RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c"			},
 112	[RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c"			},
 113	[RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c"			},
 114	[RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp"			},
 115	[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp"			},
 116	[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d",	FIRMWARE_8168D_1},
 117	[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d",	FIRMWARE_8168D_2},
 
 118	[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp"			},
 119	[RTL_GIGA_MAC_VER_29] = {"RTL8105e",		FIRMWARE_8105E_1},
 120	[RTL_GIGA_MAC_VER_30] = {"RTL8105e",		FIRMWARE_8105E_1},
 121	[RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp"			},
 122	[RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e",	FIRMWARE_8168E_1},
 123	[RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e",	FIRMWARE_8168E_2},
 124	[RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl",	FIRMWARE_8168E_3},
 125	[RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f",	FIRMWARE_8168F_1},
 126	[RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f",	FIRMWARE_8168F_2},
 127	[RTL_GIGA_MAC_VER_37] = {"RTL8402",		FIRMWARE_8402_1 },
 128	[RTL_GIGA_MAC_VER_38] = {"RTL8411",		FIRMWARE_8411_1 },
 129	[RTL_GIGA_MAC_VER_39] = {"RTL8106e",		FIRMWARE_8106E_1},
 130	[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g",	FIRMWARE_8168G_2},
 
 131	[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu",	FIRMWARE_8168G_3},
 132	[RTL_GIGA_MAC_VER_43] = {"RTL8106eus",		FIRMWARE_8106E_2},
 133	[RTL_GIGA_MAC_VER_44] = {"RTL8411b",		FIRMWARE_8411_2 },
 
 134	[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h",	FIRMWARE_8168H_2},
 
 135	[RTL_GIGA_MAC_VER_48] = {"RTL8107e",		FIRMWARE_8107E_2},
 
 
 136	[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep"			},
 137	[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117",  FIRMWARE_8168FP_3},
 138	[RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117",			},
 139	[RTL_GIGA_MAC_VER_61] = {"RTL8125A",		FIRMWARE_8125A_3},
 140	/* reserve 62 for CFG_METHOD_4 in the vendor driver */
 141	[RTL_GIGA_MAC_VER_63] = {"RTL8125B",		FIRMWARE_8125B_2},
 142	[RTL_GIGA_MAC_VER_64] = {"RTL8125D",		FIRMWARE_8125D_1},
 143	[RTL_GIGA_MAC_VER_65] = {"RTL8126A",		FIRMWARE_8126A_2},
 144	[RTL_GIGA_MAC_VER_66] = {"RTL8126A",		FIRMWARE_8126A_3},
 145};
 146
 147static const struct pci_device_id rtl8169_pci_tbl[] = {
 148	{ PCI_VDEVICE(REALTEK,	0x2502) },
 149	{ PCI_VDEVICE(REALTEK,	0x2600) },
 150	{ PCI_VDEVICE(REALTEK,	0x8129) },
 151	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_NO_GBIT },
 152	{ PCI_VDEVICE(REALTEK,	0x8161) },
 153	{ PCI_VDEVICE(REALTEK,	0x8162) },
 154	{ PCI_VDEVICE(REALTEK,	0x8167) },
 155	{ PCI_VDEVICE(REALTEK,	0x8168) },
 156	{ PCI_VDEVICE(NCUBE,	0x8168) },
 157	{ PCI_VDEVICE(REALTEK,	0x8169) },
 158	{ PCI_VENDOR_ID_DLINK,	0x4300,
 159		PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
 160	{ PCI_VDEVICE(DLINK,	0x4300) },
 161	{ PCI_VDEVICE(DLINK,	0x4302) },
 162	{ PCI_VDEVICE(AT,	0xc107) },
 163	{ PCI_VDEVICE(USR,	0x0116) },
 164	{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
 165	{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
 166	{ PCI_VDEVICE(REALTEK,	0x8125) },
 167	{ PCI_VDEVICE(REALTEK,	0x8126) },
 168	{ PCI_VDEVICE(REALTEK,	0x3000) },
 169	{}
 170};
 171
 172MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 173
 174enum rtl_registers {
 175	MAC0		= 0,	/* Ethernet hardware address. */
 176	MAC4		= 4,
 177	MAR0		= 8,	/* Multicast filter. */
 178	CounterAddrLow		= 0x10,
 179	CounterAddrHigh		= 0x14,
 180	TxDescStartAddrLow	= 0x20,
 181	TxDescStartAddrHigh	= 0x24,
 182	TxHDescStartAddrLow	= 0x28,
 183	TxHDescStartAddrHigh	= 0x2c,
 184	FLASH		= 0x30,
 185	ERSR		= 0x36,
 186	ChipCmd		= 0x37,
 187	TxPoll		= 0x38,
 188	IntrMask	= 0x3c,
 189	IntrStatus	= 0x3e,
 190
 191	TxConfig	= 0x40,
 192#define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
 193#define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
 194
 195	RxConfig	= 0x44,
 196#define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
 197#define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
 198#define	RXCFG_FIFO_SHIFT		13
 199					/* No threshold before first PCI xfer */
 200#define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
 201#define	RX_EARLY_OFF			(1 << 11)
 202#define	RX_PAUSE_SLOT_ON		(1 << 11)	/* 8125b and later */
 203#define	RXCFG_DMA_SHIFT			8
 204					/* Unlimited maximum PCI burst. */
 205#define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
 206
 207	Cfg9346		= 0x50,
 208	Config0		= 0x51,
 209	Config1		= 0x52,
 210	Config2		= 0x53,
 211#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
 212
 213	Config3		= 0x54,
 214	Config4		= 0x55,
 215	Config5		= 0x56,
 216	PHYAR		= 0x60,
 217	PHYstatus	= 0x6c,
 218	RxMaxSize	= 0xda,
 219	CPlusCmd	= 0xe0,
 220	IntrMitigate	= 0xe2,
 221
 222#define RTL_COALESCE_TX_USECS	GENMASK(15, 12)
 223#define RTL_COALESCE_TX_FRAMES	GENMASK(11, 8)
 224#define RTL_COALESCE_RX_USECS	GENMASK(7, 4)
 225#define RTL_COALESCE_RX_FRAMES	GENMASK(3, 0)
 226
 227#define RTL_COALESCE_T_MAX	0x0fU
 228#define RTL_COALESCE_FRAME_MAX	(RTL_COALESCE_T_MAX * 4)
 229
 230	RxDescAddrLow	= 0xe4,
 231	RxDescAddrHigh	= 0xe8,
 232	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
 233
 234#define NoEarlyTx	0x3f	/* Max value : no early transmit. */
 235
 236	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
 237
 238#define TxPacketMax	(8064 >> 7)
 239#define EarlySize	0x27
 240
 241	FuncEvent	= 0xf0,
 242	FuncEventMask	= 0xf4,
 243	FuncPresetState	= 0xf8,
 244	IBCR0           = 0xf8,
 245	IBCR2           = 0xf9,
 246	IBIMR0          = 0xfa,
 247	IBISR0          = 0xfb,
 248	FuncForceEvent	= 0xfc,
 249};
 250
 251enum rtl8168_8101_registers {
 252	CSIDR			= 0x64,
 253	CSIAR			= 0x68,
 254#define	CSIAR_FLAG			0x80000000
 255#define	CSIAR_WRITE_CMD			0x80000000
 256#define	CSIAR_BYTE_ENABLE		0x0000f000
 257#define	CSIAR_ADDR_MASK			0x00000fff
 258	PMCH			= 0x6f,
 259#define D3COLD_NO_PLL_DOWN		BIT(7)
 260#define D3HOT_NO_PLL_DOWN		BIT(6)
 261#define D3_NO_PLL_DOWN			(BIT(7) | BIT(6))
 262	EPHYAR			= 0x80,
 263#define	EPHYAR_FLAG			0x80000000
 264#define	EPHYAR_WRITE_CMD		0x80000000
 265#define	EPHYAR_REG_MASK			0x1f
 266#define	EPHYAR_REG_SHIFT		16
 267#define	EPHYAR_DATA_MASK		0xffff
 268	DLLPR			= 0xd0,
 269#define	PFM_EN				(1 << 6)
 270#define	TX_10M_PS_EN			(1 << 7)
 271	DBG_REG			= 0xd1,
 272#define	FIX_NAK_1			(1 << 4)
 273#define	FIX_NAK_2			(1 << 3)
 274	TWSI			= 0xd2,
 275	MCU			= 0xd3,
 276#define	NOW_IS_OOB			(1 << 7)
 277#define	TX_EMPTY			(1 << 5)
 278#define	RX_EMPTY			(1 << 4)
 279#define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
 280#define	EN_NDP				(1 << 3)
 281#define	EN_OOB_RESET			(1 << 2)
 282#define	LINK_LIST_RDY			(1 << 1)
 283	EFUSEAR			= 0xdc,
 284#define	EFUSEAR_FLAG			0x80000000
 285#define	EFUSEAR_WRITE_CMD		0x80000000
 286#define	EFUSEAR_READ_CMD		0x00000000
 287#define	EFUSEAR_REG_MASK		0x03ff
 288#define	EFUSEAR_REG_SHIFT		8
 289#define	EFUSEAR_DATA_MASK		0xff
 290	MISC_1			= 0xf2,
 291#define	PFM_D3COLD_EN			(1 << 6)
 292};
 293
 294enum rtl8168_registers {
 295	LED_CTRL		= 0x18,
 296	LED_FREQ		= 0x1a,
 297	EEE_LED			= 0x1b,
 298	ERIDR			= 0x70,
 299	ERIAR			= 0x74,
 300#define ERIAR_FLAG			0x80000000
 301#define ERIAR_WRITE_CMD			0x80000000
 302#define ERIAR_READ_CMD			0x00000000
 303#define ERIAR_ADDR_BYTE_ALIGN		4
 304#define ERIAR_TYPE_SHIFT		16
 305#define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
 306#define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
 307#define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
 308#define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
 309#define ERIAR_MASK_SHIFT		12
 310#define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
 311#define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
 312#define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
 313#define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
 314#define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
 315	EPHY_RXER_NUM		= 0x7c,
 316	OCPDR			= 0xb0,	/* OCP GPHY access */
 317#define OCPDR_WRITE_CMD			0x80000000
 318#define OCPDR_READ_CMD			0x00000000
 319#define OCPDR_REG_MASK			0x7f
 320#define OCPDR_GPHY_REG_SHIFT		16
 321#define OCPDR_DATA_MASK			0xffff
 322	OCPAR			= 0xb4,
 323#define OCPAR_FLAG			0x80000000
 324#define OCPAR_GPHY_WRITE_CMD		0x8000f060
 325#define OCPAR_GPHY_READ_CMD		0x0000f060
 326	GPHY_OCP		= 0xb8,
 327	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
 328	MISC			= 0xf0,	/* 8168e only. */
 329#define TXPLA_RST			(1 << 29)
 330#define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
 331#define PWM_EN				(1 << 22)
 332#define RXDV_GATED_EN			(1 << 19)
 333#define EARLY_TALLY_EN			(1 << 16)
 334};
 335
 336enum rtl8125_registers {
 337	LEDSEL0			= 0x18,
 338	INT_CFG0_8125		= 0x34,
 339#define INT_CFG0_ENABLE_8125		BIT(0)
 340#define INT_CFG0_CLKREQEN		BIT(3)
 341	IntrMask_8125		= 0x38,
 342	IntrStatus_8125		= 0x3c,
 343	INT_CFG1_8125		= 0x7a,
 344	LEDSEL2			= 0x84,
 345	LEDSEL1			= 0x86,
 346	TxPoll_8125		= 0x90,
 347	LEDSEL3			= 0x96,
 348	MAC0_BKP		= 0x19e0,
 349	RSS_CTRL_8125		= 0x4500,
 350	Q_NUM_CTRL_8125		= 0x4800,
 351	EEE_TXIDLE_TIMER_8125	= 0x6048,
 352};
 353
 354#define LEDSEL_MASK_8125	0x23f
 355
 356#define RX_VLAN_INNER_8125	BIT(22)
 357#define RX_VLAN_OUTER_8125	BIT(23)
 358#define RX_VLAN_8125		(RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
 359
 360#define RX_FETCH_DFLT_8125	(8 << 27)
 361
 362enum rtl_register_content {
 363	/* InterruptStatusBits */
 364	SYSErr		= 0x8000,
 365	PCSTimeout	= 0x4000,
 366	SWInt		= 0x0100,
 367	TxDescUnavail	= 0x0080,
 368	RxFIFOOver	= 0x0040,
 369	LinkChg		= 0x0020,
 370	RxOverflow	= 0x0010,
 371	TxErr		= 0x0008,
 372	TxOK		= 0x0004,
 373	RxErr		= 0x0002,
 374	RxOK		= 0x0001,
 375
 376	/* RxStatusDesc */
 377	RxRWT	= (1 << 22),
 378	RxRES	= (1 << 21),
 379	RxRUNT	= (1 << 20),
 380	RxCRC	= (1 << 19),
 381
 382	/* ChipCmdBits */
 383	StopReq		= 0x80,
 384	CmdReset	= 0x10,
 385	CmdRxEnb	= 0x08,
 386	CmdTxEnb	= 0x04,
 387	RxBufEmpty	= 0x01,
 388
 389	/* TXPoll register p.5 */
 390	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
 391	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
 392	FSWInt		= 0x01,		/* Forced software interrupt */
 393
 394	/* Cfg9346Bits */
 395	Cfg9346_Lock	= 0x00,
 396	Cfg9346_Unlock	= 0xc0,
 397
 398	/* rx_mode_bits */
 399	AcceptErr	= 0x20,
 400	AcceptRunt	= 0x10,
 401#define RX_CONFIG_ACCEPT_ERR_MASK	0x30
 402	AcceptBroadcast	= 0x08,
 403	AcceptMulticast	= 0x04,
 404	AcceptMyPhys	= 0x02,
 405	AcceptAllPhys	= 0x01,
 406#define RX_CONFIG_ACCEPT_OK_MASK	0x0f
 407#define RX_CONFIG_ACCEPT_MASK		0x3f
 408
 409	/* TxConfigBits */
 410	TxInterFrameGapShift = 24,
 411	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
 412
 413	/* Config1 register p.24 */
 414	LEDS1		= (1 << 7),
 415	LEDS0		= (1 << 6),
 416	Speed_down	= (1 << 4),
 417	MEMMAP		= (1 << 3),
 418	IOMAP		= (1 << 2),
 419	VPD		= (1 << 1),
 420	PMEnable	= (1 << 0),	/* Power Management Enable */
 421
 422	/* Config2 register p. 25 */
 423	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
 424	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
 425	PCI_Clock_66MHz = 0x01,
 426	PCI_Clock_33MHz = 0x00,
 427
 428	/* Config3 register p.25 */
 429	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
 430	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
 431	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
 432	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
 433	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
 434
 435	/* Config4 register */
 436	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
 437
 438	/* Config5 register p.27 */
 439	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
 440	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
 441	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
 442	Spi_en		= (1 << 3),
 443	LanWake		= (1 << 1),	/* LanWake enable/disable */
 444	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
 445	ASPM_en		= (1 << 0),	/* ASPM enable */
 446
 447	/* CPlusCmd p.31 */
 448	EnableBist	= (1 << 15),	// 8168 8101
 449	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
 450	EnAnaPLL	= (1 << 14),	// 8169
 451	Normal_mode	= (1 << 13),	// unused
 452	Force_half_dup	= (1 << 12),	// 8168 8101
 453	Force_rxflow_en	= (1 << 11),	// 8168 8101
 454	Force_txflow_en	= (1 << 10),	// 8168 8101
 455	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
 456	ASF		= (1 << 8),	// 8168 8101
 457	PktCntrDisable	= (1 << 7),	// 8168 8101
 458	Mac_dbgo_sel	= 0x001c,	// 8168
 459	RxVlan		= (1 << 6),
 460	RxChkSum	= (1 << 5),
 461	PCIDAC		= (1 << 4),
 462	PCIMulRW	= (1 << 3),
 463#define INTT_MASK	GENMASK(1, 0)
 464#define CPCMD_MASK	(Normal_mode | RxVlan | RxChkSum | INTT_MASK)
 465
 466	/* rtl8169_PHYstatus */
 467	TBI_Enable	= 0x80,
 468	TxFlowCtrl	= 0x40,
 469	RxFlowCtrl	= 0x20,
 470	_1000bpsF	= 0x10,
 471	_100bps		= 0x08,
 472	_10bps		= 0x04,
 473	LinkStatus	= 0x02,
 474	FullDup		= 0x01,
 475
 476	/* ResetCounterCommand */
 477	CounterReset	= 0x1,
 478
 479	/* DumpCounterCommand */
 480	CounterDump	= 0x8,
 481
 482	/* magic enable v2 */
 483	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
 484};
 485
 486enum rtl_desc_bit {
 487	/* First doubleword. */
 488	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
 489	RingEnd		= (1 << 30), /* End of descriptor ring */
 490	FirstFrag	= (1 << 29), /* First segment of a packet */
 491	LastFrag	= (1 << 28), /* Final segment of a packet */
 492};
 493
 494/* Generic case. */
 495enum rtl_tx_desc_bit {
 496	/* First doubleword. */
 497	TD_LSO		= (1 << 27),		/* Large Send Offload */
 498#define TD_MSS_MAX			0x07ffu	/* MSS value */
 499
 500	/* Second doubleword. */
 501	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
 502};
 503
 504/* 8169, 8168b and 810x except 8102e. */
 505enum rtl_tx_desc_bit_0 {
 506	/* First doubleword. */
 507#define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
 508	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
 509	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
 510	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
 511};
 512
 513/* 8102e, 8168c and beyond. */
 514enum rtl_tx_desc_bit_1 {
 515	/* First doubleword. */
 516	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
 517	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
 518#define GTTCPHO_SHIFT			18
 519#define GTTCPHO_MAX			0x7f
 520
 521	/* Second doubleword. */
 522#define TCPHO_SHIFT			18
 523#define TCPHO_MAX			0x3ff
 524#define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
 525	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
 526	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
 527	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
 528	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
 529};
 530
 531enum rtl_rx_desc_bit {
 532	/* Rx private */
 533	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
 534	PID0		= (1 << 17), /* Protocol ID bit 0/2 */
 535
 536#define RxProtoUDP	(PID1)
 537#define RxProtoTCP	(PID0)
 538#define RxProtoIP	(PID1 | PID0)
 539#define RxProtoMask	RxProtoIP
 540
 541	IPFail		= (1 << 16), /* IP checksum failed */
 542	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
 543	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
 544
 545#define RxCSFailMask	(IPFail | UDPFail | TCPFail)
 546
 547	RxVlanTag	= (1 << 16), /* VLAN tag available */
 548};
 549
 550#define RTL_GSO_MAX_SIZE_V1	32000
 551#define RTL_GSO_MAX_SEGS_V1	24
 552#define RTL_GSO_MAX_SIZE_V2	64000
 553#define RTL_GSO_MAX_SEGS_V2	64
 554
 555struct TxDesc {
 556	__le32 opts1;
 557	__le32 opts2;
 558	__le64 addr;
 559};
 560
 561struct RxDesc {
 562	__le32 opts1;
 563	__le32 opts2;
 564	__le64 addr;
 565};
 566
 567struct ring_info {
 568	struct sk_buff	*skb;
 569	u32		len;
 570};
 571
 572struct rtl8169_counters {
 573	__le64	tx_packets;
 574	__le64	rx_packets;
 575	__le64	tx_errors;
 576	__le32	rx_errors;
 577	__le16	rx_missed;
 578	__le16	align_errors;
 579	__le32	tx_one_collision;
 580	__le32	tx_multi_collision;
 581	__le64	rx_unicast;
 582	__le64	rx_broadcast;
 583	__le32	rx_multicast;
 584	__le16	tx_aborted;
 585	__le16	tx_underrun;
 586	/* new since RTL8125 */
 587	__le64 tx_octets;
 588	__le64 rx_octets;
 589	__le64 rx_multicast64;
 590	__le64 tx_unicast64;
 591	__le64 tx_broadcast64;
 592	__le64 tx_multicast64;
 593	__le32 tx_pause_on;
 594	__le32 tx_pause_off;
 595	__le32 tx_pause_all;
 596	__le32 tx_deferred;
 597	__le32 tx_late_collision;
 598	__le32 tx_all_collision;
 599	__le32 tx_aborted32;
 600	__le32 align_errors32;
 601	__le32 rx_frame_too_long;
 602	__le32 rx_runt;
 603	__le32 rx_pause_on;
 604	__le32 rx_pause_off;
 605	__le32 rx_pause_all;
 606	__le32 rx_unknown_opcode;
 607	__le32 rx_mac_error;
 608	__le32 tx_underrun32;
 609	__le32 rx_mac_missed;
 610	__le32 rx_tcam_dropped;
 611	__le32 tdu;
 612	__le32 rdu;
 613};
 614
 615struct rtl8169_tc_offsets {
 616	bool	inited;
 617	__le64	tx_errors;
 618	__le32	tx_multi_collision;
 619	__le16	tx_aborted;
 620	__le16	rx_missed;
 621};
 622
 623enum rtl_flag {
 
 624	RTL_FLAG_TASK_RESET_PENDING,
 625	RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
 626	RTL_FLAG_TASK_TX_TIMEOUT,
 627	RTL_FLAG_MAX
 628};
 629
 630enum rtl_dash_type {
 631	RTL_DASH_NONE,
 632	RTL_DASH_DP,
 633	RTL_DASH_EP,
 634};
 635
 636struct rtl8169_private {
 637	void __iomem *mmio_addr;	/* memory map physical address */
 638	struct pci_dev *pci_dev;
 639	struct net_device *dev;
 640	struct phy_device *phydev;
 641	struct napi_struct napi;
 642	enum mac_version mac_version;
 643	enum rtl_dash_type dash_type;
 644	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
 645	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
 646	u32 dirty_tx;
 
 
 647	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
 648	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
 649	dma_addr_t TxPhyAddr;
 650	dma_addr_t RxPhyAddr;
 651	struct page *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
 652	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
 653	u16 cp_cmd;
 654	u16 tx_lpi_timer;
 655	u32 irq_mask;
 656	int irq;
 657	struct clk *clk;
 658
 659	struct {
 660		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
 661		struct work_struct work;
 662	} wk;
 663
 664	raw_spinlock_t mac_ocp_lock;
 665	struct mutex led_lock;	/* serialize LED ctrl RMW access */
 666
 667	unsigned supports_gmii:1;
 668	unsigned aspm_manageable:1;
 669	unsigned dash_enabled:1;
 670	dma_addr_t counters_phys_addr;
 671	struct rtl8169_counters *counters;
 672	struct rtl8169_tc_offsets tc_offset;
 673	u32 saved_wolopts;
 
 674
 675	const char *fw_name;
 676	struct rtl_fw *rtl_fw;
 677
 678	struct r8169_led_classdev *leds;
 679
 680	u32 ocp_base;
 681};
 682
 683typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
 684
 685MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 686MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 687MODULE_SOFTDEP("pre: realtek");
 688MODULE_LICENSE("GPL");
 689MODULE_FIRMWARE(FIRMWARE_8168D_1);
 690MODULE_FIRMWARE(FIRMWARE_8168D_2);
 691MODULE_FIRMWARE(FIRMWARE_8168E_1);
 692MODULE_FIRMWARE(FIRMWARE_8168E_2);
 693MODULE_FIRMWARE(FIRMWARE_8168E_3);
 694MODULE_FIRMWARE(FIRMWARE_8105E_1);
 695MODULE_FIRMWARE(FIRMWARE_8168F_1);
 696MODULE_FIRMWARE(FIRMWARE_8168F_2);
 697MODULE_FIRMWARE(FIRMWARE_8402_1);
 698MODULE_FIRMWARE(FIRMWARE_8411_1);
 699MODULE_FIRMWARE(FIRMWARE_8411_2);
 700MODULE_FIRMWARE(FIRMWARE_8106E_1);
 701MODULE_FIRMWARE(FIRMWARE_8106E_2);
 702MODULE_FIRMWARE(FIRMWARE_8168G_2);
 703MODULE_FIRMWARE(FIRMWARE_8168G_3);
 
 704MODULE_FIRMWARE(FIRMWARE_8168H_2);
 705MODULE_FIRMWARE(FIRMWARE_8168FP_3);
 
 706MODULE_FIRMWARE(FIRMWARE_8107E_2);
 707MODULE_FIRMWARE(FIRMWARE_8125A_3);
 708MODULE_FIRMWARE(FIRMWARE_8125B_2);
 709MODULE_FIRMWARE(FIRMWARE_8125D_1);
 710MODULE_FIRMWARE(FIRMWARE_8126A_2);
 711MODULE_FIRMWARE(FIRMWARE_8126A_3);
 712
 713static inline struct device *tp_to_dev(struct rtl8169_private *tp)
 714{
 715	return &tp->pci_dev->dev;
 716}
 717
 718static void rtl_lock_config_regs(struct rtl8169_private *tp)
 719{
 720	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
 721}
 722
 723static void rtl_unlock_config_regs(struct rtl8169_private *tp)
 724{
 725	RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
 726}
 727
 728static void rtl_pci_commit(struct rtl8169_private *tp)
 729{
 730	/* Read an arbitrary register to commit a preceding PCI write */
 731	RTL_R8(tp, ChipCmd);
 732}
 733
 734static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
 735{
 736	u8 val;
 737
 738	val = RTL_R8(tp, Config2);
 739	RTL_W8(tp, Config2, (val & ~clear) | set);
 740}
 741
 742static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
 743{
 744	u8 val;
 745
 746	val = RTL_R8(tp, Config5);
 747	RTL_W8(tp, Config5, (val & ~clear) | set);
 748}
 749
 750static void r8169_mod_reg8_cond(struct rtl8169_private *tp, int reg,
 751				u8 bits, bool cond)
 752{
 753	u8 val, old_val;
 754
 755	old_val = RTL_R8(tp, reg);
 756	if (cond)
 757		val = old_val | bits;
 758	else
 759		val = old_val & ~bits;
 760	if (val != old_val)
 761		RTL_W8(tp, reg, val);
 762}
 763
 764static bool rtl_is_8125(struct rtl8169_private *tp)
 765{
 766	return tp->mac_version >= RTL_GIGA_MAC_VER_61;
 767}
 768
 769static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
 770{
 771	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
 772	       tp->mac_version != RTL_GIGA_MAC_VER_39 &&
 773	       tp->mac_version <= RTL_GIGA_MAC_VER_53;
 774}
 775
 776static bool rtl_supports_eee(struct rtl8169_private *tp)
 777{
 778	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
 779	       tp->mac_version != RTL_GIGA_MAC_VER_37 &&
 780	       tp->mac_version != RTL_GIGA_MAC_VER_39;
 781}
 782
 783static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
 784{
 785	int i;
 786
 787	for (i = 0; i < ETH_ALEN; i++)
 788		mac[i] = RTL_R8(tp, reg + i);
 789}
 790
 791struct rtl_cond {
 792	bool (*check)(struct rtl8169_private *);
 793	const char *msg;
 794};
 795
 796static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
 797			  unsigned long usecs, int n, bool high)
 798{
 799	int i;
 800
 801	for (i = 0; i < n; i++) {
 802		if (c->check(tp) == high)
 803			return true;
 804		fsleep(usecs);
 805	}
 806
 807	if (net_ratelimit())
 808		netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
 809			   c->msg, !high, n, usecs);
 810	return false;
 811}
 812
 813static bool rtl_loop_wait_high(struct rtl8169_private *tp,
 814			       const struct rtl_cond *c,
 815			       unsigned long d, int n)
 816{
 817	return rtl_loop_wait(tp, c, d, n, true);
 818}
 819
 820static bool rtl_loop_wait_low(struct rtl8169_private *tp,
 821			      const struct rtl_cond *c,
 822			      unsigned long d, int n)
 823{
 824	return rtl_loop_wait(tp, c, d, n, false);
 825}
 826
 827#define DECLARE_RTL_COND(name)				\
 828static bool name ## _check(struct rtl8169_private *);	\
 829							\
 830static const struct rtl_cond name = {			\
 831	.check	= name ## _check,			\
 832	.msg	= #name					\
 833};							\
 834							\
 835static bool name ## _check(struct rtl8169_private *tp)
 836
 837int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val)
 838{
 839	struct device *dev = tp_to_dev(tp);
 840	int ret;
 841
 842	ret = pm_runtime_resume_and_get(dev);
 843	if (ret < 0)
 844		return ret;
 845
 846	mutex_lock(&tp->led_lock);
 847	RTL_W16(tp, LED_CTRL, (RTL_R16(tp, LED_CTRL) & ~mask) | val);
 848	mutex_unlock(&tp->led_lock);
 849
 850	pm_runtime_put_sync(dev);
 851
 852	return 0;
 853}
 854
 855int rtl8168_get_led_mode(struct rtl8169_private *tp)
 856{
 857	struct device *dev = tp_to_dev(tp);
 858	int ret;
 859
 860	ret = pm_runtime_resume_and_get(dev);
 861	if (ret < 0)
 862		return ret;
 863
 864	ret = RTL_R16(tp, LED_CTRL);
 865
 866	pm_runtime_put_sync(dev);
 867
 868	return ret;
 869}
 870
 871static int rtl8125_get_led_reg(int index)
 872{
 873	static const int led_regs[] = { LEDSEL0, LEDSEL1, LEDSEL2, LEDSEL3 };
 874
 875	return led_regs[index];
 876}
 877
 878int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode)
 879{
 880	int reg = rtl8125_get_led_reg(index);
 881	struct device *dev = tp_to_dev(tp);
 882	int ret;
 883	u16 val;
 884
 885	ret = pm_runtime_resume_and_get(dev);
 886	if (ret < 0)
 887		return ret;
 888
 889	mutex_lock(&tp->led_lock);
 890	val = RTL_R16(tp, reg) & ~LEDSEL_MASK_8125;
 891	RTL_W16(tp, reg, val | mode);
 892	mutex_unlock(&tp->led_lock);
 893
 894	pm_runtime_put_sync(dev);
 895
 896	return 0;
 897}
 898
 899int rtl8125_get_led_mode(struct rtl8169_private *tp, int index)
 900{
 901	int reg = rtl8125_get_led_reg(index);
 902	struct device *dev = tp_to_dev(tp);
 903	int ret;
 904
 905	ret = pm_runtime_resume_and_get(dev);
 906	if (ret < 0)
 907		return ret;
 908
 909	ret = RTL_R16(tp, reg);
 910
 911	pm_runtime_put_sync(dev);
 912
 913	return ret;
 914}
 915
 916void r8169_get_led_name(struct rtl8169_private *tp, int idx,
 917			char *buf, int buf_len)
 918{
 919	struct pci_dev *pdev = tp->pci_dev;
 920	char pdom[8], pfun[8];
 921	int domain;
 922
 923	domain = pci_domain_nr(pdev->bus);
 924	if (domain)
 925		snprintf(pdom, sizeof(pdom), "P%d", domain);
 926	else
 927		pdom[0] = '\0';
 928
 929	if (pdev->multifunction)
 930		snprintf(pfun, sizeof(pfun), "f%d", PCI_FUNC(pdev->devfn));
 931	else
 932		pfun[0] = '\0';
 933
 934	snprintf(buf, buf_len, "en%sp%ds%d%s-%d::lan", pdom, pdev->bus->number,
 935		 PCI_SLOT(pdev->devfn), pfun, idx);
 936}
 937
 938static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
 939{
 940	/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
 941	if (type == ERIAR_OOB &&
 942	    (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
 943	     tp->mac_version == RTL_GIGA_MAC_VER_53))
 944		*cmd |= 0xf70 << 18;
 945}
 946
 947DECLARE_RTL_COND(rtl_eriar_cond)
 948{
 949	return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
 950}
 951
 952static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
 953			   u32 val, int type)
 954{
 955	u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
 956
 957	if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask))
 958		return;
 959
 960	RTL_W32(tp, ERIDR, val);
 961	r8168fp_adjust_ocp_cmd(tp, &cmd, type);
 962	RTL_W32(tp, ERIAR, cmd);
 963
 964	rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
 965}
 966
 967static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
 968			  u32 val)
 969{
 970	_rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
 971}
 972
 973static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
 974{
 975	u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
 976
 977	r8168fp_adjust_ocp_cmd(tp, &cmd, type);
 978	RTL_W32(tp, ERIAR, cmd);
 979
 980	return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
 981		RTL_R32(tp, ERIDR) : ~0;
 982}
 983
 984static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
 985{
 986	return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
 987}
 988
 989static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
 990{
 991	u32 val = rtl_eri_read(tp, addr);
 992
 993	rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
 994}
 995
 996static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
 997{
 998	rtl_w0w1_eri(tp, addr, p, 0);
 999}
1000
1001static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
1002{
1003	rtl_w0w1_eri(tp, addr, 0, m);
1004}
1005
1006static bool rtl_ocp_reg_failure(u32 reg)
1007{
1008	return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg);
1009}
1010
1011DECLARE_RTL_COND(rtl_ocp_gphy_cond)
1012{
1013	return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
1014}
1015
1016static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1017{
1018	if (rtl_ocp_reg_failure(reg))
1019		return;
1020
1021	RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1022
1023	rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1024}
1025
1026static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1027{
1028	if (rtl_ocp_reg_failure(reg))
1029		return 0;
1030
1031	RTL_W32(tp, GPHY_OCP, reg << 15);
1032
1033	return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1034		(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
1035}
1036
1037static void __r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1038{
1039	if (rtl_ocp_reg_failure(reg))
1040		return;
1041
1042	RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
1043}
1044
1045static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1046{
1047	unsigned long flags;
1048
1049	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
1050	__r8168_mac_ocp_write(tp, reg, data);
1051	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
1052}
1053
1054static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1055{
1056	if (rtl_ocp_reg_failure(reg))
1057		return 0;
1058
1059	RTL_W32(tp, OCPDR, reg << 15);
1060
1061	return RTL_R32(tp, OCPDR);
1062}
1063
1064static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1065{
1066	unsigned long flags;
1067	u16 val;
1068
1069	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
1070	val = __r8168_mac_ocp_read(tp, reg);
1071	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
1072
1073	return val;
1074}
1075
1076static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
1077				 u16 set)
1078{
1079	unsigned long flags;
1080	u16 data;
1081
1082	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
1083	data = __r8168_mac_ocp_read(tp, reg);
1084	__r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
1085	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
1086}
1087
1088/* Work around a hw issue with RTL8168g PHY, the quirk disables
1089 * PHY MCU interrupts before PHY power-down.
1090 */
1091static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
1092{
1093	switch (tp->mac_version) {
1094	case RTL_GIGA_MAC_VER_40:
1095		if (value & BMCR_RESET || !(value & BMCR_PDOWN))
1096			rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
1097		else
1098			rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
1099		break;
1100	default:
1101		break;
1102	}
1103};
1104
1105static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1106{
1107	if (reg == 0x1f) {
1108		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1109		return;
1110	}
1111
1112	if (tp->ocp_base != OCP_STD_PHY_BASE)
1113		reg -= 0x10;
1114
1115	if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR)
1116		rtl8168g_phy_suspend_quirk(tp, value);
1117
1118	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1119}
1120
1121static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1122{
1123	if (reg == 0x1f)
1124		return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
1125
1126	if (tp->ocp_base != OCP_STD_PHY_BASE)
1127		reg -= 0x10;
1128
1129	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1130}
1131
1132static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1133{
1134	if (reg == 0x1f) {
1135		tp->ocp_base = value << 4;
1136		return;
1137	}
1138
1139	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1140}
1141
1142static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1143{
1144	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1145}
1146
1147DECLARE_RTL_COND(rtl_phyar_cond)
1148{
1149	return RTL_R32(tp, PHYAR) & 0x80000000;
1150}
1151
1152static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1153{
1154	RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1155
1156	rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1157	/*
1158	 * According to hardware specs a 20us delay is required after write
1159	 * complete indication, but before sending next command.
1160	 */
1161	udelay(20);
1162}
1163
1164static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1165{
1166	int value;
1167
1168	RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
1169
1170	value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1171		RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
1172
1173	/*
1174	 * According to hardware specs a 20us delay is required after read
1175	 * complete indication, but before sending next command.
1176	 */
1177	udelay(20);
1178
1179	return value;
1180}
1181
1182DECLARE_RTL_COND(rtl_ocpar_cond)
1183{
1184	return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
1185}
1186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187#define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
1188
1189static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
1190{
1191	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1192}
1193
1194static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
1195{
1196	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1197}
1198
1199static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1200{
1201	r8168dp_2_mdio_start(tp);
1202
1203	r8169_mdio_write(tp, reg, value);
1204
1205	r8168dp_2_mdio_stop(tp);
1206}
1207
1208static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1209{
1210	int value;
1211
1212	/* Work around issue with chip reporting wrong PHY ID */
1213	if (reg == MII_PHYSID2)
1214		return 0xc912;
1215
1216	r8168dp_2_mdio_start(tp);
1217
1218	value = r8169_mdio_read(tp, reg);
1219
1220	r8168dp_2_mdio_stop(tp);
1221
1222	return value;
1223}
1224
1225static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
1226{
1227	switch (tp->mac_version) {
 
 
 
1228	case RTL_GIGA_MAC_VER_28:
1229	case RTL_GIGA_MAC_VER_31:
1230		r8168dp_2_mdio_write(tp, location, val);
1231		break;
1232	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
1233		r8168g_mdio_write(tp, location, val);
1234		break;
1235	default:
1236		r8169_mdio_write(tp, location, val);
1237		break;
1238	}
1239}
1240
1241static int rtl_readphy(struct rtl8169_private *tp, int location)
1242{
1243	switch (tp->mac_version) {
 
 
1244	case RTL_GIGA_MAC_VER_28:
1245	case RTL_GIGA_MAC_VER_31:
1246		return r8168dp_2_mdio_read(tp, location);
1247	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
1248		return r8168g_mdio_read(tp, location);
1249	default:
1250		return r8169_mdio_read(tp, location);
1251	}
1252}
1253
1254DECLARE_RTL_COND(rtl_ephyar_cond)
1255{
1256	return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
1257}
1258
1259static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1260{
1261	RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1262		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1263
1264	rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1265
1266	udelay(10);
1267}
1268
1269static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1270{
1271	RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1272
1273	return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1274		RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
1275}
1276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1277static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
1278{
1279	RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
1280	return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1281		RTL_R32(tp, OCPDR) : ~0;
1282}
1283
1284static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg)
1285{
1286	return _rtl_eri_read(tp, reg, ERIAR_OOB);
1287}
1288
1289static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1290			      u32 data)
1291{
1292	RTL_W32(tp, OCPDR, data);
1293	RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1294	rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1295}
1296
1297static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1298			      u32 data)
1299{
1300	_rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1301		       data, ERIAR_OOB);
1302}
1303
1304static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
1305{
1306	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
1307
1308	r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
1309}
1310
1311#define OOB_CMD_RESET		0x00
1312#define OOB_CMD_DRIVER_START	0x05
1313#define OOB_CMD_DRIVER_STOP	0x06
1314
1315static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1316{
1317	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1318}
1319
1320DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
1321{
1322	u16 reg;
1323
1324	reg = rtl8168_get_ocp_reg(tp);
1325
1326	return r8168dp_ocp_read(tp, reg) & 0x00000800;
1327}
1328
1329DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1330{
1331	return r8168ep_ocp_read(tp, 0x124) & 0x00000001;
1332}
1333
1334DECLARE_RTL_COND(rtl_ocp_tx_cond)
1335{
1336	return RTL_R8(tp, IBISR0) & 0x20;
1337}
1338
1339static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1340{
1341	RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
1342	rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
1343	RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
1344	RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
1345}
1346
1347static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1348{
1349	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
1350	if (tp->dash_enabled)
1351		rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
1352}
1353
1354static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1355{
1356	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1357	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
1358	if (tp->dash_enabled)
1359		rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30);
1360}
1361
1362static void rtl8168_driver_start(struct rtl8169_private *tp)
1363{
1364	if (tp->dash_type == RTL_DASH_DP)
 
 
 
1365		rtl8168dp_driver_start(tp);
1366	else
 
1367		rtl8168ep_driver_start(tp);
 
 
 
 
 
1368}
1369
1370static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1371{
1372	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1373	if (tp->dash_enabled)
1374		rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
1375}
1376
1377static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1378{
1379	rtl8168ep_stop_cmac(tp);
1380	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1381	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
1382	if (tp->dash_enabled)
1383		rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
1384}
1385
1386static void rtl8168_driver_stop(struct rtl8169_private *tp)
1387{
1388	if (tp->dash_type == RTL_DASH_DP)
 
 
 
1389		rtl8168dp_driver_stop(tp);
1390	else
 
1391		rtl8168ep_driver_stop(tp);
 
 
 
 
 
1392}
1393
1394static bool r8168dp_check_dash(struct rtl8169_private *tp)
1395{
1396	u16 reg = rtl8168_get_ocp_reg(tp);
1397
1398	return r8168dp_ocp_read(tp, reg) & BIT(15);
1399}
1400
1401static bool r8168ep_check_dash(struct rtl8169_private *tp)
1402{
1403	return r8168ep_ocp_read(tp, 0x128) & BIT(0);
1404}
1405
1406static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
1407{
1408	switch (tp->dash_type) {
1409	case RTL_DASH_DP:
1410		return r8168dp_check_dash(tp);
1411	case RTL_DASH_EP:
1412		return r8168ep_check_dash(tp);
1413	default:
1414		return false;
1415	}
1416}
1417
1418static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
1419{
1420	switch (tp->mac_version) {
 
1421	case RTL_GIGA_MAC_VER_28:
1422	case RTL_GIGA_MAC_VER_31:
1423		return RTL_DASH_DP;
1424	case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
1425		return RTL_DASH_EP;
1426	default:
1427		return RTL_DASH_NONE;
1428	}
1429}
1430
1431static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
1432{
1433	if (tp->mac_version >= RTL_GIGA_MAC_VER_25 &&
1434	    tp->mac_version != RTL_GIGA_MAC_VER_28 &&
1435	    tp->mac_version != RTL_GIGA_MAC_VER_31 &&
1436	    tp->mac_version != RTL_GIGA_MAC_VER_38)
1437		r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable);
1438}
1439
1440static void rtl_reset_packet_filter(struct rtl8169_private *tp)
1441{
1442	rtl_eri_clear_bits(tp, 0xdc, BIT(0));
1443	rtl_eri_set_bits(tp, 0xdc, BIT(0));
1444}
1445
1446DECLARE_RTL_COND(rtl_efusear_cond)
1447{
1448	return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
1449}
1450
1451u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1452{
1453	RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1454
1455	return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1456		RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1457}
1458
1459static u32 rtl_get_events(struct rtl8169_private *tp)
1460{
1461	if (rtl_is_8125(tp))
1462		return RTL_R32(tp, IntrStatus_8125);
1463	else
1464		return RTL_R16(tp, IntrStatus);
1465}
1466
1467static void rtl_ack_events(struct rtl8169_private *tp, u32 bits)
1468{
1469	if (rtl_is_8125(tp))
1470		RTL_W32(tp, IntrStatus_8125, bits);
1471	else
1472		RTL_W16(tp, IntrStatus, bits);
1473}
1474
1475static void rtl_irq_disable(struct rtl8169_private *tp)
1476{
1477	if (rtl_is_8125(tp))
1478		RTL_W32(tp, IntrMask_8125, 0);
1479	else
1480		RTL_W16(tp, IntrMask, 0);
 
1481}
1482
1483static void rtl_irq_enable(struct rtl8169_private *tp)
1484{
 
1485	if (rtl_is_8125(tp))
1486		RTL_W32(tp, IntrMask_8125, tp->irq_mask);
1487	else
1488		RTL_W16(tp, IntrMask, tp->irq_mask);
1489}
1490
1491static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1492{
1493	rtl_irq_disable(tp);
1494	rtl_ack_events(tp, 0xffffffff);
1495	rtl_pci_commit(tp);
1496}
1497
1498static void rtl_link_chg_patch(struct rtl8169_private *tp)
1499{
1500	struct phy_device *phydev = tp->phydev;
1501
1502	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1503	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1504		if (phydev->speed == SPEED_1000) {
1505			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1506			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1507		} else if (phydev->speed == SPEED_100) {
1508			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1509			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1510		} else {
1511			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1512			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1513		}
1514		rtl_reset_packet_filter(tp);
1515	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1516		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1517		if (phydev->speed == SPEED_1000) {
1518			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1519			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1520		} else {
1521			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1522			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1523		}
1524	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1525		if (phydev->speed == SPEED_10) {
1526			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
1527			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
1528		} else {
1529			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
1530		}
1531	}
1532}
1533
1534#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1535
1536static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1537{
1538	struct rtl8169_private *tp = netdev_priv(dev);
1539
1540	wol->supported = WAKE_ANY;
1541	wol->wolopts = tp->saved_wolopts;
1542}
1543
1544static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1545{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1546	rtl_unlock_config_regs(tp);
1547
1548	if (rtl_is_8168evl_up(tp)) {
 
1549		if (wolopts & WAKE_MAGIC)
1550			rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
1551		else
1552			rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
1553	} else if (rtl_is_8125(tp)) {
 
1554		if (wolopts & WAKE_MAGIC)
1555			r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
1556		else
1557			r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
1558	} else {
1559		r8169_mod_reg8_cond(tp, Config3, MagicPacket,
1560				    wolopts & WAKE_MAGIC);
1561	}
1562
1563	r8169_mod_reg8_cond(tp, Config3, LinkUp, wolopts & WAKE_PHY);
1564	if (rtl_is_8125(tp))
1565		r8168_mac_ocp_modify(tp, 0xe0c6, 0x3f,
1566				     wolopts & WAKE_PHY ? 0x13 : 0);
1567	r8169_mod_reg8_cond(tp, Config5, UWF, wolopts & WAKE_UCAST);
1568	r8169_mod_reg8_cond(tp, Config5, BWF, wolopts & WAKE_BCAST);
1569	r8169_mod_reg8_cond(tp, Config5, MWF, wolopts & WAKE_MCAST);
1570	r8169_mod_reg8_cond(tp, Config5, LanWake, wolopts);
1571
1572	switch (tp->mac_version) {
1573	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
1574		r8169_mod_reg8_cond(tp, Config1, PMEnable, wolopts);
 
 
 
1575		break;
1576	case RTL_GIGA_MAC_VER_34:
1577	case RTL_GIGA_MAC_VER_37:
1578	case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_66:
1579		r8169_mod_reg8_cond(tp, Config2, PME_SIGNAL, wolopts);
 
 
 
1580		break;
1581	default:
1582		break;
1583	}
1584
1585	rtl_lock_config_regs(tp);
1586
1587	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
1588
1589	if (!tp->dash_enabled) {
1590		rtl_set_d3_pll_down(tp, !wolopts);
1591		tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0;
1592	}
1593}
1594
1595static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1596{
1597	struct rtl8169_private *tp = netdev_priv(dev);
1598
1599	if (wol->wolopts & ~WAKE_ANY)
1600		return -EINVAL;
1601
1602	tp->saved_wolopts = wol->wolopts;
1603	__rtl8169_set_wol(tp, tp->saved_wolopts);
1604
1605	return 0;
1606}
1607
1608static void rtl8169_get_drvinfo(struct net_device *dev,
1609				struct ethtool_drvinfo *info)
1610{
1611	struct rtl8169_private *tp = netdev_priv(dev);
1612	struct rtl_fw *rtl_fw = tp->rtl_fw;
1613
1614	strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1615	strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1616	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1617	if (rtl_fw)
1618		strscpy(info->fw_version, rtl_fw->version,
1619			sizeof(info->fw_version));
1620}
1621
1622static int rtl8169_get_regs_len(struct net_device *dev)
1623{
1624	return R8169_REGS_SIZE;
1625}
1626
1627static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1628	netdev_features_t features)
1629{
1630	struct rtl8169_private *tp = netdev_priv(dev);
1631
1632	if (dev->mtu > TD_MSS_MAX)
1633		features &= ~NETIF_F_ALL_TSO;
1634
1635	if (dev->mtu > ETH_DATA_LEN &&
1636	    tp->mac_version > RTL_GIGA_MAC_VER_06)
1637		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
1638
1639	return features;
1640}
1641
1642static void rtl_set_rx_config_features(struct rtl8169_private *tp,
1643				       netdev_features_t features)
1644{
1645	u32 rx_config = RTL_R32(tp, RxConfig);
1646
1647	if (features & NETIF_F_RXALL)
1648		rx_config |= RX_CONFIG_ACCEPT_ERR_MASK;
1649	else
1650		rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK;
1651
1652	if (rtl_is_8125(tp)) {
1653		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1654			rx_config |= RX_VLAN_8125;
1655		else
1656			rx_config &= ~RX_VLAN_8125;
1657	}
1658
1659	RTL_W32(tp, RxConfig, rx_config);
1660}
1661
1662static int rtl8169_set_features(struct net_device *dev,
1663				netdev_features_t features)
1664{
1665	struct rtl8169_private *tp = netdev_priv(dev);
1666
1667	rtl_set_rx_config_features(tp, features);
1668
1669	if (features & NETIF_F_RXCSUM)
1670		tp->cp_cmd |= RxChkSum;
1671	else
1672		tp->cp_cmd &= ~RxChkSum;
1673
1674	if (!rtl_is_8125(tp)) {
1675		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1676			tp->cp_cmd |= RxVlan;
1677		else
1678			tp->cp_cmd &= ~RxVlan;
1679	}
1680
1681	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1682	rtl_pci_commit(tp);
1683
1684	return 0;
1685}
1686
1687static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1688{
1689	return (skb_vlan_tag_present(skb)) ?
1690		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
1691}
1692
1693static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1694{
1695	u32 opts2 = le32_to_cpu(desc->opts2);
1696
1697	if (opts2 & RxVlanTag)
1698		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1699}
1700
1701static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1702			     void *p)
1703{
1704	struct rtl8169_private *tp = netdev_priv(dev);
1705	u32 __iomem *data = tp->mmio_addr;
1706	u32 *dw = p;
1707	int i;
1708
1709	for (i = 0; i < R8169_REGS_SIZE; i += 4)
1710		memcpy_fromio(dw++, data++, 4);
1711}
1712
1713static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1714	"tx_packets",
1715	"rx_packets",
1716	"tx_errors",
1717	"rx_errors",
1718	"rx_missed",
1719	"align_errors",
1720	"tx_single_collisions",
1721	"tx_multi_collisions",
1722	"unicast",
1723	"broadcast",
1724	"multicast",
1725	"tx_aborted",
1726	"tx_underrun",
1727};
1728
1729static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1730{
1731	switch (sset) {
1732	case ETH_SS_STATS:
1733		return ARRAY_SIZE(rtl8169_gstrings);
1734	default:
1735		return -EOPNOTSUPP;
1736	}
1737}
1738
1739DECLARE_RTL_COND(rtl_counters_cond)
1740{
1741	return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
1742}
1743
1744static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
1745{
1746	u32 cmd = lower_32_bits(tp->counters_phys_addr);
 
1747
1748	RTL_W32(tp, CounterAddrHigh, upper_32_bits(tp->counters_phys_addr));
1749	rtl_pci_commit(tp);
 
1750	RTL_W32(tp, CounterAddrLow, cmd);
1751	RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
1752
1753	rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1754}
1755
 
 
 
 
 
 
 
 
 
 
1756static void rtl8169_update_counters(struct rtl8169_private *tp)
1757{
1758	u8 val = RTL_R8(tp, ChipCmd);
1759
1760	/*
1761	 * Some chips are unable to dump tally counters when the receiver
1762	 * is disabled. If 0xff chip may be in a PCI power-save state.
1763	 */
1764	if (val & CmdRxEnb && val != 0xff)
1765		rtl8169_do_counters(tp, CounterDump);
1766}
1767
1768static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
1769{
1770	struct rtl8169_counters *counters = tp->counters;
1771
1772	/*
1773	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
1774	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1775	 * reset by a power cycle, while the counter values collected by the
1776	 * driver are reset at every driver unload/load cycle.
1777	 *
1778	 * To make sure the HW values returned by @get_stats64 match the SW
1779	 * values, we collect the initial values at first open(*) and use them
1780	 * as offsets to normalize the values returned by @get_stats64.
1781	 *
1782	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1783	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1784	 * set at open time by rtl_hw_start.
1785	 */
1786
1787	if (tp->tc_offset.inited)
1788		return;
1789
1790	if (tp->mac_version >= RTL_GIGA_MAC_VER_19) {
1791		rtl8169_do_counters(tp, CounterReset);
1792	} else {
1793		rtl8169_update_counters(tp);
1794		tp->tc_offset.tx_errors = counters->tx_errors;
1795		tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
1796		tp->tc_offset.tx_aborted = counters->tx_aborted;
1797		tp->tc_offset.rx_missed = counters->rx_missed;
1798	}
1799
 
 
 
 
1800	tp->tc_offset.inited = true;
1801}
1802
1803static void rtl8169_get_ethtool_stats(struct net_device *dev,
1804				      struct ethtool_stats *stats, u64 *data)
1805{
1806	struct rtl8169_private *tp = netdev_priv(dev);
1807	struct rtl8169_counters *counters;
1808
1809	counters = tp->counters;
1810	rtl8169_update_counters(tp);
1811
1812	data[0] = le64_to_cpu(counters->tx_packets);
1813	data[1] = le64_to_cpu(counters->rx_packets);
1814	data[2] = le64_to_cpu(counters->tx_errors);
1815	data[3] = le32_to_cpu(counters->rx_errors);
1816	data[4] = le16_to_cpu(counters->rx_missed);
1817	data[5] = le16_to_cpu(counters->align_errors);
1818	data[6] = le32_to_cpu(counters->tx_one_collision);
1819	data[7] = le32_to_cpu(counters->tx_multi_collision);
1820	data[8] = le64_to_cpu(counters->rx_unicast);
1821	data[9] = le64_to_cpu(counters->rx_broadcast);
1822	data[10] = le32_to_cpu(counters->rx_multicast);
1823	data[11] = le16_to_cpu(counters->tx_aborted);
1824	data[12] = le16_to_cpu(counters->tx_underrun);
1825}
1826
1827static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1828{
1829	switch(stringset) {
1830	case ETH_SS_STATS:
1831		memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
1832		break;
1833	}
1834}
1835
1836/*
1837 * Interrupt coalescing
1838 *
1839 * > 1 - the availability of the IntrMitigate (0xe2) register through the
1840 * >     8169, 8168 and 810x line of chipsets
1841 *
1842 * 8169, 8168, and 8136(810x) serial chipsets support it.
1843 *
1844 * > 2 - the Tx timer unit at gigabit speed
1845 *
1846 * The unit of the timer depends on both the speed and the setting of CPlusCmd
1847 * (0xe0) bit 1 and bit 0.
1848 *
1849 * For 8169
1850 * bit[1:0] \ speed        1000M           100M            10M
1851 * 0 0                     320ns           2.56us          40.96us
1852 * 0 1                     2.56us          20.48us         327.7us
1853 * 1 0                     5.12us          40.96us         655.4us
1854 * 1 1                     10.24us         81.92us         1.31ms
1855 *
1856 * For the other
1857 * bit[1:0] \ speed        1000M           100M            10M
1858 * 0 0                     5us             2.56us          40.96us
1859 * 0 1                     40us            20.48us         327.7us
1860 * 1 0                     80us            40.96us         655.4us
1861 * 1 1                     160us           81.92us         1.31ms
1862 */
1863
1864/* rx/tx scale factors for all CPlusCmd[0:1] cases */
1865struct rtl_coalesce_info {
1866	u32 speed;
1867	u32 scale_nsecs[4];
1868};
1869
1870/* produce array with base delay *1, *8, *8*2, *8*2*2 */
1871#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
1872
1873static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
1874	{ SPEED_1000,	COALESCE_DELAY(320) },
1875	{ SPEED_100,	COALESCE_DELAY(2560) },
1876	{ SPEED_10,	COALESCE_DELAY(40960) },
1877	{ 0 },
1878};
1879
1880static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
1881	{ SPEED_1000,	COALESCE_DELAY(5000) },
1882	{ SPEED_100,	COALESCE_DELAY(2560) },
1883	{ SPEED_10,	COALESCE_DELAY(40960) },
1884	{ 0 },
1885};
1886#undef COALESCE_DELAY
1887
1888/* get rx/tx scale vector corresponding to current speed */
1889static const struct rtl_coalesce_info *
1890rtl_coalesce_info(struct rtl8169_private *tp)
1891{
1892	const struct rtl_coalesce_info *ci;
1893
1894	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
1895		ci = rtl_coalesce_info_8169;
1896	else
1897		ci = rtl_coalesce_info_8168_8136;
1898
1899	/* if speed is unknown assume highest one */
1900	if (tp->phydev->speed == SPEED_UNKNOWN)
1901		return ci;
1902
1903	for (; ci->speed; ci++) {
1904		if (tp->phydev->speed == ci->speed)
1905			return ci;
1906	}
1907
1908	return ERR_PTR(-ELNRNG);
1909}
1910
1911static int rtl_get_coalesce(struct net_device *dev,
1912			    struct ethtool_coalesce *ec,
1913			    struct kernel_ethtool_coalesce *kernel_coal,
1914			    struct netlink_ext_ack *extack)
1915{
1916	struct rtl8169_private *tp = netdev_priv(dev);
1917	const struct rtl_coalesce_info *ci;
1918	u32 scale, c_us, c_fr;
1919	u16 intrmit;
1920
1921	if (rtl_is_8125(tp))
1922		return -EOPNOTSUPP;
1923
1924	memset(ec, 0, sizeof(*ec));
1925
1926	/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1927	ci = rtl_coalesce_info(tp);
1928	if (IS_ERR(ci))
1929		return PTR_ERR(ci);
1930
1931	scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK];
1932
1933	intrmit = RTL_R16(tp, IntrMitigate);
1934
1935	c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit);
1936	ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
1937
1938	c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit);
1939	/* ethtool_coalesce states usecs and max_frames must not both be 0 */
1940	ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
1941
1942	c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit);
1943	ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
1944
1945	c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit);
1946	ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
1947
1948	return 0;
1949}
1950
1951/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */
1952static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec,
1953				     u16 *cp01)
1954{
1955	const struct rtl_coalesce_info *ci;
1956	u16 i;
1957
1958	ci = rtl_coalesce_info(tp);
1959	if (IS_ERR(ci))
1960		return PTR_ERR(ci);
1961
1962	for (i = 0; i < 4; i++) {
1963		if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) {
1964			*cp01 = i;
1965			return ci->scale_nsecs[i];
1966		}
1967	}
1968
1969	return -ERANGE;
1970}
1971
1972static int rtl_set_coalesce(struct net_device *dev,
1973			    struct ethtool_coalesce *ec,
1974			    struct kernel_ethtool_coalesce *kernel_coal,
1975			    struct netlink_ext_ack *extack)
1976{
1977	struct rtl8169_private *tp = netdev_priv(dev);
1978	u32 tx_fr = ec->tx_max_coalesced_frames;
1979	u32 rx_fr = ec->rx_max_coalesced_frames;
1980	u32 coal_usec_max, units;
1981	u16 w = 0, cp01 = 0;
1982	int scale;
1983
1984	if (rtl_is_8125(tp))
1985		return -EOPNOTSUPP;
1986
1987	if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX)
1988		return -ERANGE;
1989
1990	coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs);
1991	scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01);
1992	if (scale < 0)
1993		return scale;
1994
1995	/* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it
1996	 * not only when usecs=0 because of e.g. the following scenario:
1997	 *
1998	 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1999	 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
2000	 * - then user does `ethtool -C eth0 rx-usecs 100`
2001	 *
2002	 * Since ethtool sends to kernel whole ethtool_coalesce settings,
2003	 * if we want to ignore rx_frames then it has to be set to 0.
2004	 */
2005	if (rx_fr == 1)
2006		rx_fr = 0;
2007	if (tx_fr == 1)
2008		tx_fr = 0;
2009
2010	/* HW requires time limit to be set if frame limit is set */
2011	if ((tx_fr && !ec->tx_coalesce_usecs) ||
2012	    (rx_fr && !ec->rx_coalesce_usecs))
2013		return -EINVAL;
2014
2015	w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4));
2016	w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4));
2017
2018	units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale);
2019	w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units);
2020	units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
2021	w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
2022
2023	RTL_W16(tp, IntrMitigate, w);
2024
2025	/* Meaning of PktCntrDisable bit changed from RTL8168e-vl */
2026	if (rtl_is_8168evl_up(tp)) {
2027		if (!rx_fr && !tx_fr)
2028			/* disable packet counter */
2029			tp->cp_cmd |= PktCntrDisable;
2030		else
2031			tp->cp_cmd &= ~PktCntrDisable;
2032	}
2033
2034	tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
2035	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
2036	rtl_pci_commit(tp);
2037
2038	return 0;
2039}
2040
2041static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp)
2042{
2043	unsigned int timer_val = READ_ONCE(tp->dev->mtu) + ETH_HLEN + 0x20;
2044
2045	switch (tp->mac_version) {
2046	case RTL_GIGA_MAC_VER_46:
2047	case RTL_GIGA_MAC_VER_48:
2048		tp->tx_lpi_timer = timer_val;
2049		r8168_mac_ocp_write(tp, 0xe048, timer_val);
2050		break;
2051	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
2052		tp->tx_lpi_timer = timer_val;
2053		RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val);
2054		break;
2055	default:
2056		break;
2057	}
2058}
2059
2060static unsigned int r8169_get_tx_lpi_timer_us(struct rtl8169_private *tp)
2061{
2062	unsigned int speed = tp->phydev->speed;
2063	unsigned int timer = tp->tx_lpi_timer;
2064
2065	if (!timer || speed == SPEED_UNKNOWN)
2066		return 0;
2067
2068	/* tx_lpi_timer value is in bytes */
2069	return DIV_ROUND_CLOSEST(timer * BITS_PER_BYTE, speed);
2070}
2071
2072static int rtl8169_get_eee(struct net_device *dev, struct ethtool_keee *data)
2073{
2074	struct rtl8169_private *tp = netdev_priv(dev);
2075	int ret;
2076
2077	if (!rtl_supports_eee(tp))
2078		return -EOPNOTSUPP;
2079
2080	ret = phy_ethtool_get_eee(tp->phydev, data);
2081	if (ret)
2082		return ret;
2083
2084	data->tx_lpi_timer = r8169_get_tx_lpi_timer_us(tp);
2085
2086	return 0;
2087}
2088
2089static int rtl8169_set_eee(struct net_device *dev, struct ethtool_keee *data)
2090{
2091	struct rtl8169_private *tp = netdev_priv(dev);
 
2092
2093	if (!rtl_supports_eee(tp))
2094		return -EOPNOTSUPP;
2095
2096	return phy_ethtool_set_eee(tp->phydev, data);
2097}
2098
2099static void rtl8169_get_ringparam(struct net_device *dev,
2100				  struct ethtool_ringparam *data,
2101				  struct kernel_ethtool_ringparam *kernel_data,
2102				  struct netlink_ext_ack *extack)
2103{
2104	data->rx_max_pending = NUM_RX_DESC;
2105	data->rx_pending = NUM_RX_DESC;
2106	data->tx_max_pending = NUM_TX_DESC;
2107	data->tx_pending = NUM_TX_DESC;
2108}
2109
2110static void rtl8169_get_pause_stats(struct net_device *dev,
2111				    struct ethtool_pause_stats *pause_stats)
2112{
2113	struct rtl8169_private *tp = netdev_priv(dev);
2114
2115	if (!rtl_is_8125(tp))
2116		return;
2117
2118	rtl8169_update_counters(tp);
2119	pause_stats->tx_pause_frames = le32_to_cpu(tp->counters->tx_pause_on);
2120	pause_stats->rx_pause_frames = le32_to_cpu(tp->counters->rx_pause_on);
2121}
2122
2123static void rtl8169_get_pauseparam(struct net_device *dev,
2124				   struct ethtool_pauseparam *data)
2125{
2126	struct rtl8169_private *tp = netdev_priv(dev);
2127	bool tx_pause, rx_pause;
2128
2129	phy_get_pause(tp->phydev, &tx_pause, &rx_pause);
2130
2131	data->autoneg = tp->phydev->autoneg;
2132	data->tx_pause = tx_pause ? 1 : 0;
2133	data->rx_pause = rx_pause ? 1 : 0;
2134}
2135
2136static int rtl8169_set_pauseparam(struct net_device *dev,
2137				  struct ethtool_pauseparam *data)
2138{
2139	struct rtl8169_private *tp = netdev_priv(dev);
2140
2141	if (dev->mtu > ETH_DATA_LEN)
2142		return -EOPNOTSUPP;
2143
2144	phy_set_asym_pause(tp->phydev, data->rx_pause, data->tx_pause);
2145
2146	return 0;
2147}
2148
2149static void rtl8169_get_eth_mac_stats(struct net_device *dev,
2150				      struct ethtool_eth_mac_stats *mac_stats)
2151{
2152	struct rtl8169_private *tp = netdev_priv(dev);
2153
2154	rtl8169_update_counters(tp);
2155
2156	mac_stats->FramesTransmittedOK =
2157		le64_to_cpu(tp->counters->tx_packets);
2158	mac_stats->SingleCollisionFrames =
2159		le32_to_cpu(tp->counters->tx_one_collision);
2160	mac_stats->MultipleCollisionFrames =
2161		le32_to_cpu(tp->counters->tx_multi_collision);
2162	mac_stats->FramesReceivedOK =
2163		le64_to_cpu(tp->counters->rx_packets);
2164	mac_stats->AlignmentErrors =
2165		le16_to_cpu(tp->counters->align_errors);
2166	mac_stats->FramesLostDueToIntMACXmitError =
2167		le64_to_cpu(tp->counters->tx_errors);
2168	mac_stats->BroadcastFramesReceivedOK =
2169		le64_to_cpu(tp->counters->rx_broadcast);
2170	mac_stats->MulticastFramesReceivedOK =
2171		le32_to_cpu(tp->counters->rx_multicast);
2172
2173	if (!rtl_is_8125(tp))
2174		return;
2175
2176	mac_stats->AlignmentErrors =
2177		le32_to_cpu(tp->counters->align_errors32);
2178	mac_stats->OctetsTransmittedOK =
2179		le64_to_cpu(tp->counters->tx_octets);
2180	mac_stats->LateCollisions =
2181		le32_to_cpu(tp->counters->tx_late_collision);
2182	mac_stats->FramesAbortedDueToXSColls =
2183		le32_to_cpu(tp->counters->tx_aborted32);
2184	mac_stats->OctetsReceivedOK =
2185		le64_to_cpu(tp->counters->rx_octets);
2186	mac_stats->FramesLostDueToIntMACRcvError =
2187		le32_to_cpu(tp->counters->rx_mac_error);
2188	mac_stats->MulticastFramesXmittedOK =
2189		le64_to_cpu(tp->counters->tx_multicast64);
2190	mac_stats->BroadcastFramesXmittedOK =
2191		le64_to_cpu(tp->counters->tx_broadcast64);
2192	mac_stats->MulticastFramesReceivedOK =
2193		le64_to_cpu(tp->counters->rx_multicast64);
2194	mac_stats->FrameTooLongErrors =
2195		le32_to_cpu(tp->counters->rx_frame_too_long);
2196}
2197
2198static void rtl8169_get_eth_ctrl_stats(struct net_device *dev,
2199				       struct ethtool_eth_ctrl_stats *ctrl_stats)
2200{
2201	struct rtl8169_private *tp = netdev_priv(dev);
2202
2203	if (!rtl_is_8125(tp))
2204		return;
2205
2206	rtl8169_update_counters(tp);
2207
2208	ctrl_stats->UnsupportedOpcodesReceived =
2209		le32_to_cpu(tp->counters->rx_unknown_opcode);
2210}
2211
2212static const struct ethtool_ops rtl8169_ethtool_ops = {
2213	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
2214				     ETHTOOL_COALESCE_MAX_FRAMES,
2215	.get_drvinfo		= rtl8169_get_drvinfo,
2216	.get_regs_len		= rtl8169_get_regs_len,
2217	.get_link		= ethtool_op_get_link,
2218	.get_coalesce		= rtl_get_coalesce,
2219	.set_coalesce		= rtl_set_coalesce,
2220	.get_regs		= rtl8169_get_regs,
2221	.get_wol		= rtl8169_get_wol,
2222	.set_wol		= rtl8169_set_wol,
2223	.get_strings		= rtl8169_get_strings,
2224	.get_sset_count		= rtl8169_get_sset_count,
2225	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
2226	.get_ts_info		= ethtool_op_get_ts_info,
2227	.nway_reset		= phy_ethtool_nway_reset,
2228	.get_eee		= rtl8169_get_eee,
2229	.set_eee		= rtl8169_set_eee,
2230	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
2231	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
2232	.get_ringparam		= rtl8169_get_ringparam,
2233	.get_pause_stats	= rtl8169_get_pause_stats,
2234	.get_pauseparam		= rtl8169_get_pauseparam,
2235	.set_pauseparam		= rtl8169_set_pauseparam,
2236	.get_eth_mac_stats	= rtl8169_get_eth_mac_stats,
2237	.get_eth_ctrl_stats	= rtl8169_get_eth_ctrl_stats,
2238};
2239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2240static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
2241{
2242	/*
2243	 * The driver currently handles the 8168Bf and the 8168Be identically
2244	 * but they can be identified more specifically through the test below
2245	 * if needed:
2246	 *
2247	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2248	 *
2249	 * Same thing for the 8101Eb and the 8101Ec:
2250	 *
2251	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2252	 */
2253	static const struct rtl_mac_info {
2254		u16 mask;
2255		u16 val;
2256		enum mac_version ver;
2257	} mac_info[] = {
2258		/* 8126A family. */
2259		{ 0x7cf, 0x64a,	RTL_GIGA_MAC_VER_66 },
2260		{ 0x7cf, 0x649,	RTL_GIGA_MAC_VER_65 },
2261
2262		/* 8125D family. */
2263		{ 0x7cf, 0x688,	RTL_GIGA_MAC_VER_64 },
2264
2265		/* 8125B family. */
2266		{ 0x7cf, 0x641,	RTL_GIGA_MAC_VER_63 },
2267
2268		/* 8125A family. */
2269		{ 0x7cf, 0x609,	RTL_GIGA_MAC_VER_61 },
2270		/* It seems only XID 609 made it to the mass market.
2271		 * { 0x7cf, 0x608,	RTL_GIGA_MAC_VER_60 },
2272		 * { 0x7c8, 0x608,	RTL_GIGA_MAC_VER_61 },
2273		 */
2274
2275		/* RTL8117 */
2276		{ 0x7cf, 0x54b,	RTL_GIGA_MAC_VER_53 },
2277		{ 0x7cf, 0x54a,	RTL_GIGA_MAC_VER_52 },
2278
2279		/* 8168EP family. */
2280		{ 0x7cf, 0x502,	RTL_GIGA_MAC_VER_51 },
2281		/* It seems this chip version never made it to
2282		 * the wild. Let's disable detection.
2283		 * { 0x7cf, 0x501,      RTL_GIGA_MAC_VER_50 },
2284		 * { 0x7cf, 0x500,      RTL_GIGA_MAC_VER_49 },
2285		 */
2286
2287		/* 8168H family. */
2288		{ 0x7cf, 0x541,	RTL_GIGA_MAC_VER_46 },
2289		/* It seems this chip version never made it to
2290		 * the wild. Let's disable detection.
2291		 * { 0x7cf, 0x540,	RTL_GIGA_MAC_VER_45 },
2292		 */
2293		/* Realtek calls it RTL8168M, but it's handled like RTL8168H */
2294		{ 0x7cf, 0x6c0,	RTL_GIGA_MAC_VER_46 },
2295
2296		/* 8168G family. */
2297		{ 0x7cf, 0x5c8,	RTL_GIGA_MAC_VER_44 },
2298		{ 0x7cf, 0x509,	RTL_GIGA_MAC_VER_42 },
2299		/* It seems this chip version never made it to
2300		 * the wild. Let's disable detection.
2301		 * { 0x7cf, 0x4c1,	RTL_GIGA_MAC_VER_41 },
2302		 */
2303		{ 0x7cf, 0x4c0,	RTL_GIGA_MAC_VER_40 },
2304
2305		/* 8168F family. */
2306		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
2307		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
2308		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
2309
2310		/* 8168E family. */
2311		{ 0x7c8, 0x2c8,	RTL_GIGA_MAC_VER_34 },
2312		{ 0x7cf, 0x2c1,	RTL_GIGA_MAC_VER_32 },
2313		{ 0x7c8, 0x2c0,	RTL_GIGA_MAC_VER_33 },
2314
2315		/* 8168D family. */
2316		{ 0x7cf, 0x281,	RTL_GIGA_MAC_VER_25 },
2317		{ 0x7c8, 0x280,	RTL_GIGA_MAC_VER_26 },
2318
2319		/* 8168DP family. */
2320		/* It seems this early RTL8168dp version never made it to
2321		 * the wild. Support has been removed.
2322		 * { 0x7cf, 0x288,      RTL_GIGA_MAC_VER_27 },
2323		 */
2324		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
2325		{ 0x7cf, 0x28b,	RTL_GIGA_MAC_VER_31 },
2326
2327		/* 8168C family. */
2328		{ 0x7cf, 0x3c9,	RTL_GIGA_MAC_VER_23 },
2329		{ 0x7cf, 0x3c8,	RTL_GIGA_MAC_VER_18 },
2330		{ 0x7c8, 0x3c8,	RTL_GIGA_MAC_VER_24 },
2331		{ 0x7cf, 0x3c0,	RTL_GIGA_MAC_VER_19 },
2332		{ 0x7cf, 0x3c2,	RTL_GIGA_MAC_VER_20 },
2333		{ 0x7cf, 0x3c3,	RTL_GIGA_MAC_VER_21 },
2334		{ 0x7c8, 0x3c0,	RTL_GIGA_MAC_VER_22 },
2335
2336		/* 8168B family. */
 
2337		{ 0x7c8, 0x380,	RTL_GIGA_MAC_VER_17 },
2338		/* This one is very old and rare, let's see if anybody complains.
2339		 * { 0x7c8, 0x300,	RTL_GIGA_MAC_VER_11 },
2340		 */
2341
2342		/* 8101 family. */
2343		{ 0x7c8, 0x448,	RTL_GIGA_MAC_VER_39 },
2344		{ 0x7c8, 0x440,	RTL_GIGA_MAC_VER_37 },
2345		{ 0x7cf, 0x409,	RTL_GIGA_MAC_VER_29 },
2346		{ 0x7c8, 0x408,	RTL_GIGA_MAC_VER_30 },
2347		{ 0x7cf, 0x349,	RTL_GIGA_MAC_VER_08 },
2348		{ 0x7cf, 0x249,	RTL_GIGA_MAC_VER_08 },
2349		{ 0x7cf, 0x348,	RTL_GIGA_MAC_VER_07 },
2350		{ 0x7cf, 0x248,	RTL_GIGA_MAC_VER_07 },
 
2351		{ 0x7cf, 0x240,	RTL_GIGA_MAC_VER_14 },
 
 
2352		{ 0x7c8, 0x348,	RTL_GIGA_MAC_VER_09 },
2353		{ 0x7c8, 0x248,	RTL_GIGA_MAC_VER_09 },
2354		{ 0x7c8, 0x340,	RTL_GIGA_MAC_VER_10 },
 
 
 
2355
2356		/* 8110 family. */
2357		{ 0xfc8, 0x980,	RTL_GIGA_MAC_VER_06 },
2358		{ 0xfc8, 0x180,	RTL_GIGA_MAC_VER_05 },
2359		{ 0xfc8, 0x100,	RTL_GIGA_MAC_VER_04 },
2360		{ 0xfc8, 0x040,	RTL_GIGA_MAC_VER_03 },
2361		{ 0xfc8, 0x008,	RTL_GIGA_MAC_VER_02 },
2362
2363		/* Catch-all */
2364		{ 0x000, 0x000,	RTL_GIGA_MAC_NONE   }
2365	};
2366	const struct rtl_mac_info *p = mac_info;
2367	enum mac_version ver;
2368
2369	while ((xid & p->mask) != p->val)
2370		p++;
2371	ver = p->ver;
2372
2373	if (ver != RTL_GIGA_MAC_NONE && !gmii) {
2374		if (ver == RTL_GIGA_MAC_VER_42)
2375			ver = RTL_GIGA_MAC_VER_43;
 
 
2376		else if (ver == RTL_GIGA_MAC_VER_46)
2377			ver = RTL_GIGA_MAC_VER_48;
2378	}
2379
2380	return ver;
2381}
2382
2383static void rtl_release_firmware(struct rtl8169_private *tp)
2384{
2385	if (tp->rtl_fw) {
2386		rtl_fw_release_firmware(tp->rtl_fw);
2387		kfree(tp->rtl_fw);
2388		tp->rtl_fw = NULL;
2389	}
2390}
2391
2392void r8169_apply_firmware(struct rtl8169_private *tp)
2393{
2394	int val;
2395
2396	/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
2397	if (tp->rtl_fw) {
2398		rtl_fw_write_firmware(tp, tp->rtl_fw);
2399		/* At least one firmware doesn't reset tp->ocp_base. */
2400		tp->ocp_base = OCP_STD_PHY_BASE;
2401
2402		/* PHY soft reset may still be in progress */
2403		phy_read_poll_timeout(tp->phydev, MII_BMCR, val,
2404				      !(val & BMCR_RESET),
2405				      50000, 600000, true);
2406	}
2407}
2408
2409static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
2410{
2411	/* Adjust EEE LED frequency */
2412	if (tp->mac_version != RTL_GIGA_MAC_VER_38)
2413		RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
2414
2415	rtl_eri_set_bits(tp, 0x1b0, 0x0003);
2416}
2417
2418static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
2419{
2420	r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2421	r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
2422}
2423
 
 
 
 
 
2424static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
2425{
 
2426	r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2427}
2428
2429static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
2430{
2431	rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
2432	rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4));
2433	rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16);
2434	rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2));
 
 
 
 
 
 
2435}
2436
2437u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
2438{
2439	u16 data1, data2, ioffset;
2440
2441	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
2442	data1 = r8168_mac_ocp_read(tp, 0xdd02);
2443	data2 = r8168_mac_ocp_read(tp, 0xdd00);
2444
2445	ioffset = (data2 >> 1) & 0x7ff8;
2446	ioffset |= data2 & 0x0007;
2447	if (data1 & BIT(7))
2448		ioffset |= BIT(15);
2449
2450	return ioffset;
2451}
2452
2453static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
2454{
2455	set_bit(flag, tp->wk.flags);
2456	if (!schedule_work(&tp->wk.work))
2457		clear_bit(flag, tp->wk.flags);
2458}
2459
2460static void rtl8169_init_phy(struct rtl8169_private *tp)
2461{
2462	r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
2463
2464	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
2465		pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
2466		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
2467		/* set undocumented MAC Reg C+CR Offset 0x82h */
2468		RTL_W8(tp, 0x82, 0x01);
2469	}
2470
2471	if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
2472	    tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
2473	    tp->pci_dev->subsystem_device == 0xe000)
2474		phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
2475
2476	/* We may have called phy_speed_down before */
2477	phy_speed_up(tp->phydev);
2478
 
 
 
2479	genphy_soft_reset(tp->phydev);
2480}
2481
2482static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr)
2483{
2484	rtl_unlock_config_regs(tp);
2485
2486	RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4));
2487	rtl_pci_commit(tp);
2488
2489	RTL_W32(tp, MAC0, get_unaligned_le32(addr));
2490	rtl_pci_commit(tp);
2491
2492	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
2493		rtl_rar_exgmac_set(tp, addr);
2494
2495	rtl_lock_config_regs(tp);
2496}
2497
2498static int rtl_set_mac_address(struct net_device *dev, void *p)
2499{
2500	struct rtl8169_private *tp = netdev_priv(dev);
2501	int ret;
2502
2503	ret = eth_mac_addr(dev, p);
2504	if (ret)
2505		return ret;
2506
2507	rtl_rar_set(tp, dev->dev_addr);
2508
2509	return 0;
2510}
2511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2512static void rtl_init_rxcfg(struct rtl8169_private *tp)
2513{
2514	switch (tp->mac_version) {
2515	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
2516	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
2517		RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
2518		break;
2519	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
2520	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
2521	case RTL_GIGA_MAC_VER_38:
2522		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
2523		break;
2524	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
2525		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
2526		break;
2527	case RTL_GIGA_MAC_VER_61:
2528		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
2529		break;
2530	case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
2531		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST |
2532			RX_PAUSE_SLOT_ON);
2533		break;
2534	default:
2535		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
2536		break;
2537	}
2538}
2539
2540static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2541{
2542	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
2543}
2544
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2545static void rtl_jumbo_config(struct rtl8169_private *tp)
2546{
2547	bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
2548	int readrq = 4096;
2549
2550	if (jumbo && tp->mac_version >= RTL_GIGA_MAC_VER_17 &&
2551	    tp->mac_version <= RTL_GIGA_MAC_VER_26)
2552		readrq = 512;
2553
2554	rtl_unlock_config_regs(tp);
2555	switch (tp->mac_version) {
 
2556	case RTL_GIGA_MAC_VER_17:
2557		r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
 
 
 
 
 
2558		break;
2559	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
2560		r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
2561		r8169_mod_reg8_cond(tp, Config4, Jumbo_En1, jumbo);
 
 
 
 
2562		break;
2563	case RTL_GIGA_MAC_VER_28:
2564		r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
 
 
 
2565		break;
2566	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
2567		RTL_W8(tp, MaxTxPacketSize, jumbo ? 0x24 : 0x3f);
2568		r8169_mod_reg8_cond(tp, Config3, Jumbo_En0, jumbo);
2569		r8169_mod_reg8_cond(tp, Config4, BIT(0), jumbo);
 
 
 
2570		break;
2571	default:
2572		break;
2573	}
2574	rtl_lock_config_regs(tp);
2575
2576	if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
2577		pcie_set_readrq(tp->pci_dev, readrq);
2578
2579	/* Chip doesn't support pause in jumbo mode */
2580	if (jumbo) {
2581		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2582				   tp->phydev->advertising);
2583		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2584				   tp->phydev->advertising);
2585		phy_start_aneg(tp->phydev);
2586	}
2587}
2588
2589DECLARE_RTL_COND(rtl_chipcmd_cond)
2590{
2591	return RTL_R8(tp, ChipCmd) & CmdReset;
2592}
2593
2594static void rtl_hw_reset(struct rtl8169_private *tp)
2595{
2596	RTL_W8(tp, ChipCmd, CmdReset);
2597
2598	rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
2599}
2600
2601static void rtl_request_firmware(struct rtl8169_private *tp)
2602{
2603	struct rtl_fw *rtl_fw;
2604
2605	/* firmware loaded already or no firmware available */
2606	if (tp->rtl_fw || !tp->fw_name)
2607		return;
2608
2609	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
2610	if (!rtl_fw)
2611		return;
2612
2613	rtl_fw->phy_write = rtl_writephy;
2614	rtl_fw->phy_read = rtl_readphy;
2615	rtl_fw->mac_mcu_write = mac_mcu_write;
2616	rtl_fw->mac_mcu_read = mac_mcu_read;
2617	rtl_fw->fw_name = tp->fw_name;
2618	rtl_fw->dev = tp_to_dev(tp);
2619
2620	if (rtl_fw_request_firmware(rtl_fw))
2621		kfree(rtl_fw);
2622	else
2623		tp->rtl_fw = rtl_fw;
2624}
2625
2626static void rtl_rx_close(struct rtl8169_private *tp)
2627{
2628	RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
2629}
2630
2631DECLARE_RTL_COND(rtl_npq_cond)
2632{
2633	return RTL_R8(tp, TxPoll) & NPQ;
2634}
2635
2636DECLARE_RTL_COND(rtl_txcfg_empty_cond)
2637{
2638	return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
2639}
2640
2641DECLARE_RTL_COND(rtl_rxtx_empty_cond)
2642{
2643	return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
2644}
2645
2646DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
2647{
2648	/* IntrMitigate has new functionality on RTL8125 */
2649	return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103;
2650}
2651
2652static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
2653{
2654	switch (tp->mac_version) {
2655	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
2656		rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
2657		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2658		break;
2659	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61:
2660		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2661		break;
2662	case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_66:
2663		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
2664		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2665		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
2666		break;
2667	default:
2668		break;
2669	}
2670}
2671
2672static void rtl_disable_rxdvgate(struct rtl8169_private *tp)
2673{
2674	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
2675}
2676
2677static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
2678{
2679	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
2680	fsleep(2000);
2681	rtl_wait_txrx_fifo_empty(tp);
2682}
2683
2684static void rtl_wol_enable_rx(struct rtl8169_private *tp)
2685{
2686	if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
2687		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
2688			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
2689
2690	if (tp->mac_version >= RTL_GIGA_MAC_VER_40)
2691		rtl_disable_rxdvgate(tp);
2692}
2693
2694static void rtl_prepare_power_down(struct rtl8169_private *tp)
2695{
2696	if (tp->dash_enabled)
2697		return;
2698
2699	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
2700	    tp->mac_version == RTL_GIGA_MAC_VER_33)
2701		rtl_ephy_write(tp, 0x19, 0xff64);
2702
2703	if (device_may_wakeup(tp_to_dev(tp))) {
2704		phy_speed_down(tp->phydev, false);
2705		rtl_wol_enable_rx(tp);
2706	}
2707}
2708
2709static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
2710{
2711	u32 val = TX_DMA_BURST << TxDMAShift |
2712		  InterFrameGap << TxInterFrameGapShift;
2713
2714	if (rtl_is_8168evl_up(tp))
2715		val |= TXCFG_AUTO_FIFO;
2716
2717	RTL_W32(tp, TxConfig, val);
2718}
2719
2720static void rtl_set_rx_max_size(struct rtl8169_private *tp)
2721{
2722	/* Low hurts. Let's disable the filtering. */
2723	RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
2724}
2725
2726static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
2727{
2728	/*
2729	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
2730	 * register to be written before TxDescAddrLow to work.
2731	 * Switching from MMIO to I/O access fixes the issue as well.
2732	 */
2733	RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
2734	RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
2735	RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
2736	RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
2737}
2738
2739static void rtl8169_set_magic_reg(struct rtl8169_private *tp)
2740{
2741	u32 val;
2742
2743	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
2744		val = 0x000fff00;
2745	else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
2746		val = 0x00ffff00;
2747	else
2748		return;
2749
2750	if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
2751		val |= 0xff;
2752
2753	RTL_W32(tp, 0x7c, val);
2754}
2755
2756static void rtl_set_rx_mode(struct net_device *dev)
2757{
2758	u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
2759	/* Multicast hash filter */
2760	u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
2761	struct rtl8169_private *tp = netdev_priv(dev);
2762	u32 tmp;
2763
2764	if (dev->flags & IFF_PROMISC) {
2765		rx_mode |= AcceptAllPhys;
2766	} else if (!(dev->flags & IFF_MULTICAST)) {
2767		rx_mode &= ~AcceptMulticast;
2768	} else if (dev->flags & IFF_ALLMULTI ||
2769		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
2770		/* accept all multicasts */
2771	} else if (netdev_mc_empty(dev)) {
2772		rx_mode &= ~AcceptMulticast;
2773	} else {
2774		struct netdev_hw_addr *ha;
2775
2776		mc_filter[1] = mc_filter[0] = 0;
2777		netdev_for_each_mc_addr(ha, dev) {
2778			u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
2779			mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
2780		}
2781
2782		if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
2783			tmp = mc_filter[0];
2784			mc_filter[0] = swab32(mc_filter[1]);
2785			mc_filter[1] = swab32(tmp);
2786		}
2787	}
2788
2789	RTL_W32(tp, MAR0 + 4, mc_filter[1]);
2790	RTL_W32(tp, MAR0 + 0, mc_filter[0]);
2791
2792	tmp = RTL_R32(tp, RxConfig);
2793	RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode);
2794}
2795
2796DECLARE_RTL_COND(rtl_csiar_cond)
2797{
2798	return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
2799}
2800
2801static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
2802{
2803	u32 func = PCI_FUNC(tp->pci_dev->devfn);
2804
2805	RTL_W32(tp, CSIDR, value);
2806	RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
2807		CSIAR_BYTE_ENABLE | func << 16);
2808
2809	rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
2810}
2811
2812static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
2813{
2814	u32 func = PCI_FUNC(tp->pci_dev->devfn);
2815
2816	RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
2817		CSIAR_BYTE_ENABLE);
2818
2819	return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
2820		RTL_R32(tp, CSIDR) : ~0;
2821}
2822
2823static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val)
2824{
2825	struct pci_dev *pdev = tp->pci_dev;
2826	u32 csi;
2827
2828	/* According to Realtek the value at config space address 0x070f
2829	 * controls the L0s/L1 entrance latency. We try standard ECAM access
2830	 * first and if it fails fall back to CSI.
2831	 * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo)
2832	 * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us
2833	 */
2834	if (pdev->cfg_size > 0x070f &&
2835	    pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
2836		return;
2837
2838	netdev_notice_once(tp->dev,
2839		"No native access to PCI extended config space, falling back to CSI\n");
2840	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
2841	rtl_csi_write(tp, 0x070c, csi | val << 24);
2842}
2843
2844static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
2845{
2846	/* L0 7us, L1 16us */
2847	rtl_set_aspm_entry_latency(tp, 0x27);
2848}
2849
2850struct ephy_info {
2851	unsigned int offset;
2852	u16 mask;
2853	u16 bits;
2854};
2855
2856static void __rtl_ephy_init(struct rtl8169_private *tp,
2857			    const struct ephy_info *e, int len)
2858{
2859	u16 w;
2860
2861	while (len-- > 0) {
2862		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
2863		rtl_ephy_write(tp, e->offset, w);
2864		e++;
2865	}
2866}
2867
2868#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
2869
2870static void rtl_disable_clock_request(struct rtl8169_private *tp)
2871{
2872	pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
2873				   PCI_EXP_LNKCTL_CLKREQ_EN);
2874}
2875
2876static void rtl_enable_clock_request(struct rtl8169_private *tp)
2877{
2878	pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL,
2879				 PCI_EXP_LNKCTL_CLKREQ_EN);
2880}
2881
2882static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
2883{
2884	/* work around an issue when PCI reset occurs during L2/L3 state */
2885	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
2886}
2887
2888static void rtl_enable_exit_l1(struct rtl8169_private *tp)
2889{
2890	/* Bits control which events trigger ASPM L1 exit:
2891	 * Bit 12: rxdv
2892	 * Bit 11: ltr_msg
2893	 * Bit 10: txdma_poll
2894	 * Bit  9: xadm
2895	 * Bit  8: pktavi
2896	 * Bit  7: txpla
2897	 */
2898	switch (tp->mac_version) {
2899	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
2900		rtl_eri_set_bits(tp, 0xd4, 0x1f00);
2901		break;
2902	case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38:
2903		rtl_eri_set_bits(tp, 0xd4, 0x0c00);
2904		break;
2905	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
2906		r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80);
2907		break;
2908	default:
2909		break;
2910	}
2911}
2912
2913static void rtl_disable_exit_l1(struct rtl8169_private *tp)
2914{
2915	switch (tp->mac_version) {
2916	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
2917		rtl_eri_clear_bits(tp, 0xd4, 0x1f00);
2918		break;
2919	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
2920		r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0);
2921		break;
2922	default:
2923		break;
2924	}
2925}
2926
2927static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
2928{
2929	u8 val8;
2930
2931	if (tp->mac_version < RTL_GIGA_MAC_VER_32)
2932		return;
2933
2934	/* Don't enable ASPM in the chip if OS can't control ASPM */
2935	if (enable && tp->aspm_manageable) {
2936		/* On these chip versions ASPM can even harm
2937		 * bus communication of other PCI devices.
2938		 */
2939		if (tp->mac_version == RTL_GIGA_MAC_VER_42 ||
2940		    tp->mac_version == RTL_GIGA_MAC_VER_43)
2941			return;
2942
2943		rtl_mod_config5(tp, 0, ASPM_en);
2944		switch (tp->mac_version) {
2945		case RTL_GIGA_MAC_VER_65:
2946		case RTL_GIGA_MAC_VER_66:
2947			val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN;
2948			RTL_W8(tp, INT_CFG0_8125, val8);
2949			break;
2950		default:
2951			rtl_mod_config2(tp, 0, ClkReqEn);
2952			break;
2953		}
2954
2955		switch (tp->mac_version) {
2956		case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
2957		case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
2958			/* reset ephy tx/rx disable timer */
2959			r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0);
2960			/* chip can trigger L1.2 */
2961			r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, BIT(2));
2962			break;
2963		default:
2964			break;
2965		}
2966	} else {
2967		switch (tp->mac_version) {
2968		case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48:
2969		case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
2970			r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0);
2971			break;
2972		default:
2973			break;
2974		}
2975
2976		switch (tp->mac_version) {
2977		case RTL_GIGA_MAC_VER_65:
2978		case RTL_GIGA_MAC_VER_66:
2979			val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN;
2980			RTL_W8(tp, INT_CFG0_8125, val8);
2981			break;
2982		default:
2983			rtl_mod_config2(tp, ClkReqEn, 0);
2984			break;
2985		}
2986		rtl_mod_config5(tp, ASPM_en, 0);
2987	}
 
 
2988}
2989
2990static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
2991			      u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
2992{
2993	/* Usage of dynamic vs. static FIFO is controlled by bit
2994	 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
2995	 */
2996	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
2997	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
2998}
2999
3000static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
3001					  u8 low, u8 high)
3002{
3003	/* FIFO thresholds for pause flow control */
3004	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
3005	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
3006}
3007
3008static void rtl_hw_start_8168b(struct rtl8169_private *tp)
3009{
3010	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3011}
3012
3013static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
3014{
3015	RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
3016
3017	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3018
3019	rtl_disable_clock_request(tp);
3020}
3021
3022static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
3023{
3024	static const struct ephy_info e_info_8168cp[] = {
3025		{ 0x01, 0,	0x0001 },
3026		{ 0x02, 0x0800,	0x1000 },
3027		{ 0x03, 0,	0x0042 },
3028		{ 0x06, 0x0080,	0x0000 },
3029		{ 0x07, 0,	0x2000 }
3030	};
3031
3032	rtl_set_def_aspm_entry_latency(tp);
3033
3034	rtl_ephy_init(tp, e_info_8168cp);
3035
3036	__rtl_hw_start_8168cp(tp);
3037}
3038
3039static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
3040{
3041	rtl_set_def_aspm_entry_latency(tp);
3042
3043	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3044}
3045
3046static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
3047{
3048	rtl_set_def_aspm_entry_latency(tp);
3049
3050	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3051
3052	/* Magic. */
3053	RTL_W8(tp, DBG_REG, 0x20);
3054}
3055
3056static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
3057{
3058	static const struct ephy_info e_info_8168c_1[] = {
3059		{ 0x02, 0x0800,	0x1000 },
3060		{ 0x03, 0,	0x0002 },
3061		{ 0x06, 0x0080,	0x0000 }
3062	};
3063
3064	rtl_set_def_aspm_entry_latency(tp);
3065
3066	RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
3067
3068	rtl_ephy_init(tp, e_info_8168c_1);
3069
3070	__rtl_hw_start_8168cp(tp);
3071}
3072
3073static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
3074{
3075	static const struct ephy_info e_info_8168c_2[] = {
3076		{ 0x01, 0,	0x0001 },
3077		{ 0x03, 0x0400,	0x0020 }
3078	};
3079
3080	rtl_set_def_aspm_entry_latency(tp);
3081
3082	rtl_ephy_init(tp, e_info_8168c_2);
3083
3084	__rtl_hw_start_8168cp(tp);
3085}
3086
 
 
 
 
 
3087static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
3088{
3089	rtl_set_def_aspm_entry_latency(tp);
3090
3091	__rtl_hw_start_8168cp(tp);
3092}
3093
3094static void rtl_hw_start_8168d(struct rtl8169_private *tp)
3095{
3096	rtl_set_def_aspm_entry_latency(tp);
3097
3098	rtl_disable_clock_request(tp);
3099}
3100
3101static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
3102{
3103	static const struct ephy_info e_info_8168d_4[] = {
3104		{ 0x0b, 0x0000,	0x0048 },
3105		{ 0x19, 0x0020,	0x0050 },
3106		{ 0x0c, 0x0100,	0x0020 },
3107		{ 0x10, 0x0004,	0x0000 },
3108	};
3109
3110	rtl_set_def_aspm_entry_latency(tp);
3111
3112	rtl_ephy_init(tp, e_info_8168d_4);
3113
3114	rtl_enable_clock_request(tp);
3115}
3116
3117static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
3118{
3119	static const struct ephy_info e_info_8168e_1[] = {
3120		{ 0x00, 0x0200,	0x0100 },
3121		{ 0x00, 0x0000,	0x0004 },
3122		{ 0x06, 0x0002,	0x0001 },
3123		{ 0x06, 0x0000,	0x0030 },
3124		{ 0x07, 0x0000,	0x2000 },
3125		{ 0x00, 0x0000,	0x0020 },
3126		{ 0x03, 0x5800,	0x2000 },
3127		{ 0x03, 0x0000,	0x0001 },
3128		{ 0x01, 0x0800,	0x1000 },
3129		{ 0x07, 0x0000,	0x4000 },
3130		{ 0x1e, 0x0000,	0x2000 },
3131		{ 0x19, 0xffff,	0xfe6c },
3132		{ 0x0a, 0x0000,	0x0040 }
3133	};
3134
3135	rtl_set_def_aspm_entry_latency(tp);
3136
3137	rtl_ephy_init(tp, e_info_8168e_1);
3138
3139	rtl_disable_clock_request(tp);
3140
3141	/* Reset tx FIFO pointer */
3142	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
3143	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
3144
3145	rtl_mod_config5(tp, Spi_en, 0);
3146}
3147
3148static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
3149{
3150	static const struct ephy_info e_info_8168e_2[] = {
3151		{ 0x09, 0x0000,	0x0080 },
3152		{ 0x19, 0x0000,	0x0224 },
3153		{ 0x00, 0x0000,	0x0004 },
3154		{ 0x0c, 0x3df0,	0x0200 },
3155	};
3156
3157	rtl_set_def_aspm_entry_latency(tp);
3158
3159	rtl_ephy_init(tp, e_info_8168e_2);
3160
3161	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3162	rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
3163	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
 
3164	rtl_eri_set_bits(tp, 0x1d0, BIT(1));
3165	rtl_reset_packet_filter(tp);
3166	rtl_eri_set_bits(tp, 0x1b0, BIT(4));
3167	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
3168	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
3169
3170	rtl_disable_clock_request(tp);
3171
3172	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3173
3174	rtl8168_config_eee_mac(tp);
3175
3176	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3177	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
3178	rtl_mod_config5(tp, Spi_en, 0);
 
 
3179}
3180
3181static void rtl_hw_start_8168f(struct rtl8169_private *tp)
3182{
3183	rtl_set_def_aspm_entry_latency(tp);
3184
3185	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3186	rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
3187	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
3188	rtl_reset_packet_filter(tp);
3189	rtl_eri_set_bits(tp, 0x1b0, BIT(4));
3190	rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1));
3191	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
3192	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
3193
3194	rtl_disable_clock_request(tp);
3195
3196	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3197	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3198	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
3199	rtl_mod_config5(tp, Spi_en, 0);
3200
3201	rtl8168_config_eee_mac(tp);
3202}
3203
3204static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
3205{
3206	static const struct ephy_info e_info_8168f_1[] = {
3207		{ 0x06, 0x00c0,	0x0020 },
3208		{ 0x08, 0x0001,	0x0002 },
3209		{ 0x09, 0x0000,	0x0080 },
3210		{ 0x19, 0x0000,	0x0224 },
3211		{ 0x00, 0x0000,	0x0008 },
3212		{ 0x0c, 0x3df0,	0x0200 },
3213	};
3214
3215	rtl_hw_start_8168f(tp);
3216
3217	rtl_ephy_init(tp, e_info_8168f_1);
 
 
3218}
3219
3220static void rtl_hw_start_8411(struct rtl8169_private *tp)
3221{
3222	static const struct ephy_info e_info_8168f_1[] = {
3223		{ 0x06, 0x00c0,	0x0020 },
3224		{ 0x0f, 0xffff,	0x5200 },
3225		{ 0x19, 0x0000,	0x0224 },
3226		{ 0x00, 0x0000,	0x0008 },
3227		{ 0x0c, 0x3df0,	0x0200 },
3228	};
3229
3230	rtl_hw_start_8168f(tp);
3231	rtl_pcie_state_l2l3_disable(tp);
3232
3233	rtl_ephy_init(tp, e_info_8168f_1);
 
 
3234}
3235
3236static void rtl_hw_start_8168g(struct rtl8169_private *tp)
3237{
3238	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3239	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3240
3241	rtl_set_def_aspm_entry_latency(tp);
3242
3243	rtl_reset_packet_filter(tp);
3244	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
3245
3246	rtl_disable_rxdvgate(tp);
3247
3248	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3249	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
 
3250
3251	rtl8168_config_eee_mac(tp);
3252
3253	rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
3254	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3255
3256	rtl_pcie_state_l2l3_disable(tp);
3257}
3258
3259static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
3260{
3261	static const struct ephy_info e_info_8168g_1[] = {
3262		{ 0x00, 0x0008,	0x0000 },
3263		{ 0x0c, 0x3ff0,	0x0820 },
3264		{ 0x1e, 0x0000,	0x0001 },
3265		{ 0x19, 0x8000,	0x0000 }
3266	};
3267
3268	rtl_hw_start_8168g(tp);
 
 
 
3269	rtl_ephy_init(tp, e_info_8168g_1);
 
3270}
3271
3272static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
3273{
3274	static const struct ephy_info e_info_8168g_2[] = {
3275		{ 0x00, 0x0008,	0x0000 },
3276		{ 0x0c, 0x3ff0,	0x0820 },
3277		{ 0x19, 0xffff,	0x7c00 },
3278		{ 0x1e, 0xffff,	0x20eb },
3279		{ 0x0d, 0xffff,	0x1666 },
3280		{ 0x00, 0xffff,	0x10a3 },
3281		{ 0x06, 0xffff,	0xf050 },
3282		{ 0x04, 0x0000,	0x0010 },
3283		{ 0x1d, 0x4000,	0x0000 },
3284	};
3285
3286	rtl_hw_start_8168g(tp);
3287	rtl_ephy_init(tp, e_info_8168g_2);
3288}
3289
3290static void rtl8411b_fix_phy_down(struct rtl8169_private *tp)
3291{
3292	static const u16 fix_data[] = {
3293/* 0xf800 */ 0xe008, 0xe00a, 0xe00c, 0xe00e, 0xe027, 0xe04f, 0xe05e, 0xe065,
3294/* 0xf810 */ 0xc602, 0xbe00, 0x0000, 0xc502, 0xbd00, 0x074c, 0xc302, 0xbb00,
3295/* 0xf820 */ 0x080a, 0x6420, 0x48c2, 0x8c20, 0xc516, 0x64a4, 0x49c0, 0xf009,
3296/* 0xf830 */ 0x74a2, 0x8ca5, 0x74a0, 0xc50e, 0x9ca2, 0x1c11, 0x9ca0, 0xe006,
3297/* 0xf840 */ 0x74f8, 0x48c4, 0x8cf8, 0xc404, 0xbc00, 0xc403, 0xbc00, 0x0bf2,
3298/* 0xf850 */ 0x0c0a, 0xe434, 0xd3c0, 0x49d9, 0xf01f, 0xc526, 0x64a5, 0x1400,
3299/* 0xf860 */ 0xf007, 0x0c01, 0x8ca5, 0x1c15, 0xc51b, 0x9ca0, 0xe013, 0xc519,
3300/* 0xf870 */ 0x74a0, 0x48c4, 0x8ca0, 0xc516, 0x74a4, 0x48c8, 0x48ca, 0x9ca4,
3301/* 0xf880 */ 0xc512, 0x1b00, 0x9ba0, 0x1b1c, 0x483f, 0x9ba2, 0x1b04, 0xc508,
3302/* 0xf890 */ 0x9ba0, 0xc505, 0xbd00, 0xc502, 0xbd00, 0x0300, 0x051e, 0xe434,
3303/* 0xf8a0 */ 0xe018, 0xe092, 0xde20, 0xd3c0, 0xc50f, 0x76a4, 0x49e3, 0xf007,
3304/* 0xf8b0 */ 0x49c0, 0xf103, 0xc607, 0xbe00, 0xc606, 0xbe00, 0xc602, 0xbe00,
3305/* 0xf8c0 */ 0x0c4c, 0x0c28, 0x0c2c, 0xdc00, 0xc707, 0x1d00, 0x8de2, 0x48c1,
3306/* 0xf8d0 */ 0xc502, 0xbd00, 0x00aa, 0xe0c0, 0xc502, 0xbd00, 0x0132
3307	};
3308	unsigned long flags;
3309	int i;
3310
3311	raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
3312	for (i = 0; i < ARRAY_SIZE(fix_data); i++)
3313		__r8168_mac_ocp_write(tp, 0xf800 + 2 * i, fix_data[i]);
3314	raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
3315}
3316
3317static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
3318{
3319	static const struct ephy_info e_info_8411_2[] = {
3320		{ 0x00, 0x0008,	0x0000 },
3321		{ 0x0c, 0x37d0,	0x0820 },
3322		{ 0x1e, 0x0000,	0x0001 },
3323		{ 0x19, 0x8021,	0x0000 },
3324		{ 0x1e, 0x0000,	0x2000 },
3325		{ 0x0d, 0x0100,	0x0200 },
3326		{ 0x00, 0x0000,	0x0080 },
3327		{ 0x06, 0x0000,	0x0010 },
3328		{ 0x04, 0x0000,	0x0010 },
3329		{ 0x1d, 0x0000,	0x4000 },
3330	};
3331
3332	rtl_hw_start_8168g(tp);
3333
 
 
3334	rtl_ephy_init(tp, e_info_8411_2);
3335
3336	/* The following Realtek-provided magic fixes an issue with the RX unit
3337	 * getting confused after the PHY having been powered-down.
3338	 */
3339	r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
3340	r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
3341	r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
3342	r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
3343	r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
3344	r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
3345	r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
3346	r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
3347	mdelay(3);
3348	r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
3349
3350	rtl8411b_fix_phy_down(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3351
3352	r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
3353
3354	r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
3355	r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
3356	r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
3357	r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
3358	r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
3359	r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
3360	r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
 
 
3361}
3362
3363static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
3364{
3365	static const struct ephy_info e_info_8168h_1[] = {
3366		{ 0x1e, 0x0800,	0x0001 },
3367		{ 0x1d, 0x0000,	0x0800 },
3368		{ 0x05, 0xffff,	0x2089 },
3369		{ 0x06, 0xffff,	0x5881 },
3370		{ 0x04, 0xffff,	0x854a },
3371		{ 0x01, 0xffff,	0x068b }
3372	};
3373	int rg_saw_cnt;
3374
 
 
3375	rtl_ephy_init(tp, e_info_8168h_1);
3376
3377	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3378	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3379
3380	rtl_set_def_aspm_entry_latency(tp);
3381
3382	rtl_reset_packet_filter(tp);
3383
 
3384	rtl_eri_set_bits(tp, 0xdc, 0x001c);
3385
3386	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3387
3388	rtl_disable_rxdvgate(tp);
3389
3390	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3391	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3392
3393	rtl8168_config_eee_mac(tp);
3394
3395	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3396	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3397
3398	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3399
3400	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3401
3402	rtl_pcie_state_l2l3_disable(tp);
3403
3404	rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3405	if (rg_saw_cnt > 0) {
3406		u16 sw_cnt_1ms_ini;
3407
3408		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
3409		sw_cnt_1ms_ini &= 0x0fff;
3410		r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3411	}
3412
3413	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3414	r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
3415	r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
3416	r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3417
3418	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3419	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3420	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3421	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
 
 
3422}
3423
3424static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
3425{
3426	rtl8168ep_stop_cmac(tp);
3427
3428	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3429	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3430
3431	rtl_set_def_aspm_entry_latency(tp);
3432
3433	rtl_reset_packet_filter(tp);
3434
 
 
3435	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3436
3437	rtl_disable_rxdvgate(tp);
3438
3439	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3440	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3441
3442	rtl8168_config_eee_mac(tp);
3443
3444	rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
3445
3446	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3447
3448	rtl_pcie_state_l2l3_disable(tp);
3449}
3450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3451static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
3452{
3453	static const struct ephy_info e_info_8168ep_3[] = {
3454		{ 0x00, 0x0000,	0x0080 },
3455		{ 0x0d, 0x0100,	0x0200 },
3456		{ 0x19, 0x8021,	0x0000 },
3457		{ 0x1e, 0x0000,	0x2000 },
3458	};
3459
 
 
3460	rtl_ephy_init(tp, e_info_8168ep_3);
3461
3462	rtl_hw_start_8168ep(tp);
3463
3464	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3465	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3466
3467	r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
3468	r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3469	r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
 
 
3470}
3471
3472static void rtl_hw_start_8117(struct rtl8169_private *tp)
3473{
3474	static const struct ephy_info e_info_8117[] = {
3475		{ 0x19, 0x0040,	0x1100 },
3476		{ 0x59, 0x0040,	0x1100 },
3477	};
3478	int rg_saw_cnt;
3479
3480	rtl8168ep_stop_cmac(tp);
 
 
 
3481	rtl_ephy_init(tp, e_info_8117);
3482
3483	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3484	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3485
3486	rtl_set_def_aspm_entry_latency(tp);
3487
3488	rtl_reset_packet_filter(tp);
3489
3490	rtl_eri_set_bits(tp, 0xd4, 0x0010);
3491
3492	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3493
3494	rtl_disable_rxdvgate(tp);
3495
3496	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3497	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3498
3499	rtl8168_config_eee_mac(tp);
3500
3501	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3502	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3503
3504	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3505
3506	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3507
3508	rtl_pcie_state_l2l3_disable(tp);
3509
3510	rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3511	if (rg_saw_cnt > 0) {
3512		u16 sw_cnt_1ms_ini;
3513
3514		sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
3515		r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3516	}
3517
3518	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3519	r8168_mac_ocp_write(tp, 0xea80, 0x0003);
3520	r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
3521	r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3522
3523	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3524	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3525	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3526	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3527
3528	/* firmware is for MAC only */
3529	r8169_apply_firmware(tp);
 
 
3530}
3531
3532static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
3533{
3534	static const struct ephy_info e_info_8102e_1[] = {
3535		{ 0x01,	0, 0x6e65 },
3536		{ 0x02,	0, 0x091f },
3537		{ 0x03,	0, 0xc2f9 },
3538		{ 0x06,	0, 0xafb5 },
3539		{ 0x07,	0, 0x0e00 },
3540		{ 0x19,	0, 0xec80 },
3541		{ 0x01,	0, 0x2e65 },
3542		{ 0x01,	0, 0x6e65 }
3543	};
3544	u8 cfg1;
3545
3546	rtl_set_def_aspm_entry_latency(tp);
3547
3548	RTL_W8(tp, DBG_REG, FIX_NAK_1);
3549
3550	RTL_W8(tp, Config1,
3551	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
3552	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3553
3554	cfg1 = RTL_R8(tp, Config1);
3555	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3556		RTL_W8(tp, Config1, cfg1 & ~LEDS0);
3557
3558	rtl_ephy_init(tp, e_info_8102e_1);
3559}
3560
3561static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
3562{
3563	rtl_set_def_aspm_entry_latency(tp);
3564
3565	RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
3566	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3567}
3568
3569static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
3570{
3571	rtl_hw_start_8102e_2(tp);
3572
3573	rtl_ephy_write(tp, 0x03, 0xc2f9);
3574}
3575
3576static void rtl_hw_start_8401(struct rtl8169_private *tp)
3577{
3578	static const struct ephy_info e_info_8401[] = {
3579		{ 0x01,	0xffff, 0x6fe5 },
3580		{ 0x03,	0xffff, 0x0599 },
3581		{ 0x06,	0xffff, 0xaf25 },
3582		{ 0x07,	0xffff, 0x8e68 },
3583	};
3584
3585	rtl_ephy_init(tp, e_info_8401);
3586	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3587}
3588
3589static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
3590{
3591	static const struct ephy_info e_info_8105e_1[] = {
3592		{ 0x07,	0, 0x4000 },
3593		{ 0x19,	0, 0x0200 },
3594		{ 0x19,	0, 0x0020 },
3595		{ 0x1e,	0, 0x2000 },
3596		{ 0x03,	0, 0x0001 },
3597		{ 0x19,	0, 0x0100 },
3598		{ 0x19,	0, 0x0004 },
3599		{ 0x0a,	0, 0x0020 }
3600	};
3601
3602	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3603	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3604
3605	/* Disable Early Tally Counter */
3606	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
3607
3608	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3609	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3610
3611	rtl_ephy_init(tp, e_info_8105e_1);
3612
3613	rtl_pcie_state_l2l3_disable(tp);
3614}
3615
3616static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
3617{
3618	rtl_hw_start_8105e_1(tp);
3619	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
3620}
3621
3622static void rtl_hw_start_8402(struct rtl8169_private *tp)
3623{
3624	static const struct ephy_info e_info_8402[] = {
3625		{ 0x19,	0xffff, 0xff64 },
3626		{ 0x1e,	0, 0x4000 }
3627	};
3628
3629	rtl_set_def_aspm_entry_latency(tp);
3630
3631	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3632	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3633
3634	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3635
3636	rtl_ephy_init(tp, e_info_8402);
3637
3638	rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
3639	rtl_reset_packet_filter(tp);
3640	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3641	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3642	rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00);
3643
3644	/* disable EEE */
3645	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3646
3647	rtl_pcie_state_l2l3_disable(tp);
3648}
3649
3650static void rtl_hw_start_8106(struct rtl8169_private *tp)
3651{
 
 
3652	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3653	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3654
3655	RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
3656	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3657	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3658
3659	/* L0 7us, L1 32us - needed to avoid issues with link-up detection */
3660	rtl_set_aspm_entry_latency(tp, 0x2f);
3661
3662	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
3663
3664	/* disable EEE */
3665	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3666
3667	rtl_pcie_state_l2l3_disable(tp);
 
3668}
3669
3670DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
3671{
3672	return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13);
3673}
3674
3675static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
3676{
3677	rtl_pcie_state_l2l3_disable(tp);
3678
3679	RTL_W16(tp, 0x382, 0x221b);
3680	RTL_W32(tp, RSS_CTRL_8125, 0);
3681	RTL_W16(tp, Q_NUM_CTRL_8125, 0);
3682
3683	/* disable UPS */
3684	r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
3685
3686	RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10);
3687
3688	r8168_mac_ocp_write(tp, 0xc140, 0xffff);
3689	r8168_mac_ocp_write(tp, 0xc142, 0xffff);
3690
3691	r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9);
3692	r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3693	r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3694
3695	/* disable new tx descriptor format */
3696	r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
3697
3698	if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
3699	    tp->mac_version == RTL_GIGA_MAC_VER_66)
3700		RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02);
3701
3702	if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
3703	    tp->mac_version == RTL_GIGA_MAC_VER_66)
3704		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
3705	else if (tp->mac_version == RTL_GIGA_MAC_VER_63)
3706		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
3707	else
3708		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0300);
3709
3710	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
3711		r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
3712	else
3713		r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
3714
3715	r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
3716	r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
3717	r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
3718	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
3719	r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
3720	r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
3721	if (tp->mac_version == RTL_GIGA_MAC_VER_65 ||
3722	    tp->mac_version == RTL_GIGA_MAC_VER_66)
3723		r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000);
3724	else
3725		r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
3726	r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
3727	r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
 
3728	r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
3729
3730	r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
3731	r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
3732	udelay(1);
3733	r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000);
3734	RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030);
3735
3736	r8168_mac_ocp_write(tp, 0xe098, 0xc302);
3737
3738	rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
3739
3740	if (tp->mac_version == RTL_GIGA_MAC_VER_61)
3741		rtl8125a_config_eee_mac(tp);
3742	else
3743		rtl8125b_config_eee_mac(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3744
3745	rtl_disable_rxdvgate(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3746}
3747
3748static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
3749{
3750	static const struct ephy_info e_info_8125a_2[] = {
3751		{ 0x04, 0xffff, 0xd000 },
3752		{ 0x0a, 0xffff, 0x8653 },
3753		{ 0x23, 0xffff, 0xab66 },
3754		{ 0x20, 0xffff, 0x9455 },
3755		{ 0x21, 0xffff, 0x99ff },
3756		{ 0x29, 0xffff, 0xfe04 },
3757
3758		{ 0x44, 0xffff, 0xd000 },
3759		{ 0x4a, 0xffff, 0x8653 },
3760		{ 0x63, 0xffff, 0xab66 },
3761		{ 0x60, 0xffff, 0x9455 },
3762		{ 0x61, 0xffff, 0x99ff },
3763		{ 0x69, 0xffff, 0xfe04 },
3764	};
3765
3766	rtl_set_def_aspm_entry_latency(tp);
 
 
 
3767	rtl_ephy_init(tp, e_info_8125a_2);
 
3768	rtl_hw_start_8125_common(tp);
 
3769}
3770
3771static void rtl_hw_start_8125b(struct rtl8169_private *tp)
3772{
3773	static const struct ephy_info e_info_8125b[] = {
3774		{ 0x0b, 0xffff, 0xa908 },
3775		{ 0x1e, 0xffff, 0x20eb },
3776		{ 0x4b, 0xffff, 0xa908 },
3777		{ 0x5e, 0xffff, 0x20eb },
3778		{ 0x22, 0x0030, 0x0020 },
3779		{ 0x62, 0x0030, 0x0020 },
3780	};
3781
3782	rtl_set_def_aspm_entry_latency(tp);
3783	rtl_ephy_init(tp, e_info_8125b);
3784	rtl_hw_start_8125_common(tp);
3785}
3786
3787static void rtl_hw_start_8125d(struct rtl8169_private *tp)
3788{
3789	rtl_set_def_aspm_entry_latency(tp);
3790	rtl_hw_start_8125_common(tp);
3791}
3792
3793static void rtl_hw_start_8126a(struct rtl8169_private *tp)
3794{
3795	rtl_set_def_aspm_entry_latency(tp);
3796	rtl_hw_start_8125_common(tp);
3797}
3798
3799static void rtl_hw_config(struct rtl8169_private *tp)
3800{
3801	static const rtl_generic_fct hw_configs[] = {
3802		[RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
3803		[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
3804		[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
3805		[RTL_GIGA_MAC_VER_10] = NULL,
3806		[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
 
 
3807		[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
 
3808		[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
3809		[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
3810		[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
3811		[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
3812		[RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_2,
3813		[RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
3814		[RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
3815		[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
3816		[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
3817		[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
 
3818		[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
3819		[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
3820		[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
3821		[RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
3822		[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
3823		[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
3824		[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
3825		[RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
3826		[RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
3827		[RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
3828		[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
3829		[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
3830		[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
 
3831		[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
3832		[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
3833		[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
 
3834		[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
 
3835		[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
 
 
3836		[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
3837		[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
3838		[RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
3839		[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
3840		[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
3841		[RTL_GIGA_MAC_VER_64] = rtl_hw_start_8125d,
3842		[RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a,
3843		[RTL_GIGA_MAC_VER_66] = rtl_hw_start_8126a,
3844	};
3845
3846	if (hw_configs[tp->mac_version])
3847		hw_configs[tp->mac_version](tp);
3848}
3849
3850static void rtl_hw_start_8125(struct rtl8169_private *tp)
3851{
3852	int i;
3853
3854	RTL_W8(tp, INT_CFG0_8125, 0x00);
3855
3856	/* disable interrupt coalescing */
3857	switch (tp->mac_version) {
3858	case RTL_GIGA_MAC_VER_61:
3859	case RTL_GIGA_MAC_VER_64:
3860		for (i = 0xa00; i < 0xb00; i += 4)
3861			RTL_W32(tp, i, 0);
3862		break;
3863	case RTL_GIGA_MAC_VER_63:
3864	case RTL_GIGA_MAC_VER_65:
3865	case RTL_GIGA_MAC_VER_66:
3866		for (i = 0xa00; i < 0xa80; i += 4)
3867			RTL_W32(tp, i, 0);
3868		RTL_W16(tp, INT_CFG1_8125, 0x0000);
3869		break;
3870	default:
3871		break;
3872	}
3873
3874	/* enable extended tally counter */
3875	r8168_mac_ocp_modify(tp, 0xea84, 0, BIT(1) | BIT(0));
3876
3877	rtl_hw_config(tp);
3878}
3879
3880static void rtl_hw_start_8168(struct rtl8169_private *tp)
3881{
3882	if (rtl_is_8168evl_up(tp))
3883		RTL_W8(tp, MaxTxPacketSize, EarlySize);
3884	else
3885		RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
3886
3887	rtl_hw_config(tp);
3888
3889	/* disable interrupt coalescing */
3890	RTL_W16(tp, IntrMitigate, 0x0000);
3891}
3892
3893static void rtl_hw_start_8169(struct rtl8169_private *tp)
3894{
3895	RTL_W8(tp, EarlyTxThres, NoEarlyTx);
3896
3897	tp->cp_cmd |= PCIMulRW;
3898
3899	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3900	    tp->mac_version == RTL_GIGA_MAC_VER_03)
3901		tp->cp_cmd |= EnAnaPLL;
3902
3903	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3904
3905	rtl8169_set_magic_reg(tp);
3906
3907	/* disable interrupt coalescing */
3908	RTL_W16(tp, IntrMitigate, 0x0000);
3909}
3910
3911static void rtl_hw_start(struct  rtl8169_private *tp)
3912{
3913	rtl_unlock_config_regs(tp);
3914	/* disable aspm and clock request before ephy access */
3915	rtl_hw_aspm_clkreq_enable(tp, false);
3916	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3917
3918	rtl_set_eee_txidle_timer(tp);
3919
3920	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3921		rtl_hw_start_8169(tp);
3922	else if (rtl_is_8125(tp))
3923		rtl_hw_start_8125(tp);
3924	else
3925		rtl_hw_start_8168(tp);
3926
3927	rtl_enable_exit_l1(tp);
3928	rtl_hw_aspm_clkreq_enable(tp, true);
3929	rtl_set_rx_max_size(tp);
3930	rtl_set_rx_tx_desc_registers(tp);
3931	rtl_lock_config_regs(tp);
3932
3933	rtl_jumbo_config(tp);
3934
3935	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
3936	rtl_pci_commit(tp);
3937
3938	RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
3939	rtl_init_rxcfg(tp);
3940	rtl_set_tx_config_registers(tp);
3941	rtl_set_rx_config_features(tp, tp->dev->features);
3942	rtl_set_rx_mode(tp->dev);
3943	rtl_irq_enable(tp);
3944}
3945
3946static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3947{
3948	struct rtl8169_private *tp = netdev_priv(dev);
3949
3950	WRITE_ONCE(dev->mtu, new_mtu);
3951	netdev_update_features(dev);
3952	rtl_jumbo_config(tp);
3953	rtl_set_eee_txidle_timer(tp);
 
 
 
 
 
 
 
 
3954
3955	return 0;
3956}
3957
3958static void rtl8169_mark_to_asic(struct RxDesc *desc)
3959{
3960	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
3961
3962	desc->opts2 = 0;
3963	/* Force memory writes to complete before releasing descriptor */
3964	dma_wmb();
3965	WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE));
3966}
3967
3968static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3969					  struct RxDesc *desc)
3970{
3971	struct device *d = tp_to_dev(tp);
3972	int node = dev_to_node(d);
3973	dma_addr_t mapping;
3974	struct page *data;
3975
3976	data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
3977	if (!data)
3978		return NULL;
3979
3980	mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3981	if (unlikely(dma_mapping_error(d, mapping))) {
3982		netdev_err(tp->dev, "Failed to map RX DMA!\n");
3983		__free_pages(data, get_order(R8169_RX_BUF_SIZE));
3984		return NULL;
3985	}
3986
3987	desc->addr = cpu_to_le64(mapping);
3988	rtl8169_mark_to_asic(desc);
3989
3990	return data;
3991}
3992
3993static void rtl8169_rx_clear(struct rtl8169_private *tp)
3994{
3995	int i;
3996
3997	for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
3998		dma_unmap_page(tp_to_dev(tp),
3999			       le64_to_cpu(tp->RxDescArray[i].addr),
4000			       R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
4001		__free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
4002		tp->Rx_databuff[i] = NULL;
4003		tp->RxDescArray[i].addr = 0;
4004		tp->RxDescArray[i].opts1 = 0;
4005	}
4006}
4007
4008static int rtl8169_rx_fill(struct rtl8169_private *tp)
4009{
4010	int i;
4011
4012	for (i = 0; i < NUM_RX_DESC; i++) {
4013		struct page *data;
4014
4015		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
4016		if (!data) {
4017			rtl8169_rx_clear(tp);
4018			return -ENOMEM;
4019		}
4020		tp->Rx_databuff[i] = data;
4021	}
4022
4023	/* mark as last descriptor in the ring */
4024	tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd);
4025
4026	return 0;
4027}
4028
4029static int rtl8169_init_ring(struct rtl8169_private *tp)
4030{
4031	rtl8169_init_ring_indexes(tp);
4032
4033	memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
4034	memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
4035
4036	return rtl8169_rx_fill(tp);
4037}
4038
4039static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
4040{
4041	struct ring_info *tx_skb = tp->tx_skb + entry;
4042	struct TxDesc *desc = tp->TxDescArray + entry;
4043
4044	dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
4045			 DMA_TO_DEVICE);
4046	memset(desc, 0, sizeof(*desc));
4047	memset(tx_skb, 0, sizeof(*tx_skb));
4048}
4049
4050static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
4051				   unsigned int n)
4052{
4053	unsigned int i;
4054
4055	for (i = 0; i < n; i++) {
4056		unsigned int entry = (start + i) % NUM_TX_DESC;
4057		struct ring_info *tx_skb = tp->tx_skb + entry;
4058		unsigned int len = tx_skb->len;
4059
4060		if (len) {
4061			struct sk_buff *skb = tx_skb->skb;
4062
4063			rtl8169_unmap_tx_skb(tp, entry);
4064			if (skb)
4065				dev_consume_skb_any(skb);
4066		}
4067	}
4068}
4069
4070static void rtl8169_tx_clear(struct rtl8169_private *tp)
4071{
4072	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
4073	netdev_reset_queue(tp->dev);
4074}
4075
4076static void rtl8169_cleanup(struct rtl8169_private *tp)
4077{
4078	napi_disable(&tp->napi);
4079
4080	/* Give a racing hard_start_xmit a few cycles to complete. */
4081	synchronize_net();
4082
4083	/* Disable interrupts */
4084	rtl8169_irq_mask_and_ack(tp);
4085
4086	rtl_rx_close(tp);
4087
 
 
 
4088	switch (tp->mac_version) {
 
4089	case RTL_GIGA_MAC_VER_28:
4090	case RTL_GIGA_MAC_VER_31:
4091		rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
4092		break;
4093	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
4094		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
4095		rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4096		break;
4097	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_66:
4098		rtl_enable_rxdvgate(tp);
4099		fsleep(2000);
4100		break;
4101	default:
4102		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
4103		fsleep(100);
4104		break;
4105	}
4106
4107	rtl_hw_reset(tp);
4108
4109	rtl8169_tx_clear(tp);
4110	rtl8169_init_ring_indexes(tp);
4111}
4112
4113static void rtl_reset_work(struct rtl8169_private *tp)
4114{
4115	int i;
4116
4117	netif_stop_queue(tp->dev);
4118
4119	rtl8169_cleanup(tp);
4120
4121	for (i = 0; i < NUM_RX_DESC; i++)
4122		rtl8169_mark_to_asic(tp->RxDescArray + i);
4123
4124	napi_enable(&tp->napi);
4125	rtl_hw_start(tp);
4126}
4127
4128static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
4129{
4130	struct rtl8169_private *tp = netdev_priv(dev);
4131
4132	rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
4133}
4134
4135static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
4136			  void *addr, unsigned int entry, bool desc_own)
4137{
4138	struct TxDesc *txd = tp->TxDescArray + entry;
4139	struct device *d = tp_to_dev(tp);
4140	dma_addr_t mapping;
4141	u32 opts1;
4142	int ret;
4143
4144	mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
4145	ret = dma_mapping_error(d, mapping);
4146	if (unlikely(ret)) {
4147		if (net_ratelimit())
4148			netdev_err(tp->dev, "Failed to map TX data!\n");
4149		return ret;
4150	}
4151
4152	txd->addr = cpu_to_le64(mapping);
4153	txd->opts2 = cpu_to_le32(opts[1]);
4154
4155	opts1 = opts[0] | len;
4156	if (entry == NUM_TX_DESC - 1)
4157		opts1 |= RingEnd;
4158	if (desc_own)
4159		opts1 |= DescOwn;
4160	txd->opts1 = cpu_to_le32(opts1);
4161
4162	tp->tx_skb[entry].len = len;
4163
4164	return 0;
4165}
4166
4167static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4168			      const u32 *opts, unsigned int entry)
4169{
4170	struct skb_shared_info *info = skb_shinfo(skb);
4171	unsigned int cur_frag;
4172
4173	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
4174		const skb_frag_t *frag = info->frags + cur_frag;
4175		void *addr = skb_frag_address(frag);
4176		u32 len = skb_frag_size(frag);
4177
4178		entry = (entry + 1) % NUM_TX_DESC;
4179
4180		if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true)))
4181			goto err_out;
4182	}
4183
4184	return 0;
4185
4186err_out:
4187	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
4188	return -EIO;
4189}
4190
4191static bool rtl_skb_is_udp(struct sk_buff *skb)
4192{
4193	int no = skb_network_offset(skb);
4194	struct ipv6hdr *i6h, _i6h;
4195	struct iphdr *ih, _ih;
4196
4197	switch (vlan_get_protocol(skb)) {
4198	case htons(ETH_P_IP):
4199		ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
4200		return ih && ih->protocol == IPPROTO_UDP;
4201	case htons(ETH_P_IPV6):
4202		i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
4203		return i6h && i6h->nexthdr == IPPROTO_UDP;
4204	default:
4205		return false;
4206	}
4207}
4208
4209#define RTL_MIN_PATCH_LEN	47
4210
4211/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
4212static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
4213					    struct sk_buff *skb)
4214{
4215	unsigned int padto = 0, len = skb->len;
4216
4217	if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) &&
4218	    skb_transport_header_was_set(skb)) {
4219		unsigned int trans_data_len = skb_tail_pointer(skb) -
4220					      skb_transport_header(skb);
4221
4222		if (trans_data_len >= offsetof(struct udphdr, len) &&
4223		    trans_data_len < RTL_MIN_PATCH_LEN) {
4224			u16 dest = ntohs(udp_hdr(skb)->dest);
4225
4226			/* dest is a standard PTP port */
4227			if (dest == 319 || dest == 320)
4228				padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
4229		}
4230
4231		if (trans_data_len < sizeof(struct udphdr))
4232			padto = max_t(unsigned int, padto,
4233				      len + sizeof(struct udphdr) - trans_data_len);
4234	}
4235
4236	return padto;
4237}
4238
4239static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
4240					   struct sk_buff *skb)
4241{
4242	unsigned int padto = 0;
4243
4244	switch (tp->mac_version) {
4245	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63:
4246		padto = rtl8125_quirk_udp_padto(tp, skb);
4247		break;
4248	default:
4249		break;
4250	}
4251
4252	switch (tp->mac_version) {
4253	case RTL_GIGA_MAC_VER_34:
4254	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
4255		padto = max_t(unsigned int, padto, ETH_ZLEN);
4256		break;
4257	default:
4258		break;
4259	}
4260
4261	return padto;
4262}
4263
4264static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
4265{
4266	u32 mss = skb_shinfo(skb)->gso_size;
4267
4268	if (mss) {
4269		opts[0] |= TD_LSO;
4270		opts[0] |= mss << TD0_MSS_SHIFT;
4271	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4272		const struct iphdr *ip = ip_hdr(skb);
4273
4274		if (ip->protocol == IPPROTO_TCP)
4275			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
4276		else if (ip->protocol == IPPROTO_UDP)
4277			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
4278		else
4279			WARN_ON_ONCE(1);
4280	}
4281}
4282
4283static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
4284				struct sk_buff *skb, u32 *opts)
4285{
 
4286	struct skb_shared_info *shinfo = skb_shinfo(skb);
4287	u32 mss = shinfo->gso_size;
4288
4289	if (mss) {
4290		if (shinfo->gso_type & SKB_GSO_TCPV4) {
4291			opts[0] |= TD1_GTSENV4;
4292		} else if (shinfo->gso_type & SKB_GSO_TCPV6) {
4293			if (skb_cow_head(skb, 0))
4294				return false;
4295
4296			tcp_v6_gso_csum_prep(skb);
4297			opts[0] |= TD1_GTSENV6;
4298		} else {
4299			WARN_ON_ONCE(1);
4300		}
4301
4302		opts[0] |= skb_transport_offset(skb) << GTTCPHO_SHIFT;
4303		opts[1] |= mss << TD1_MSS_SHIFT;
4304	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4305		u8 ip_protocol;
4306
4307		switch (vlan_get_protocol(skb)) {
4308		case htons(ETH_P_IP):
4309			opts[1] |= TD1_IPv4_CS;
4310			ip_protocol = ip_hdr(skb)->protocol;
4311			break;
4312
4313		case htons(ETH_P_IPV6):
4314			opts[1] |= TD1_IPv6_CS;
4315			ip_protocol = ipv6_hdr(skb)->nexthdr;
4316			break;
4317
4318		default:
4319			ip_protocol = IPPROTO_RAW;
4320			break;
4321		}
4322
4323		if (ip_protocol == IPPROTO_TCP)
4324			opts[1] |= TD1_TCP_CS;
4325		else if (ip_protocol == IPPROTO_UDP)
4326			opts[1] |= TD1_UDP_CS;
4327		else
4328			WARN_ON_ONCE(1);
4329
4330		opts[1] |= skb_transport_offset(skb) << TCPHO_SHIFT;
4331	} else {
4332		unsigned int padto = rtl_quirk_packet_padto(tp, skb);
4333
4334		/* skb_padto would free the skb on error */
4335		return !__skb_put_padto(skb, padto, false);
4336	}
4337
4338	return true;
4339}
4340
4341static unsigned int rtl_tx_slots_avail(struct rtl8169_private *tp)
 
4342{
4343	return READ_ONCE(tp->dirty_tx) + NUM_TX_DESC - READ_ONCE(tp->cur_tx);
 
 
 
4344}
4345
4346/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
4347static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
4348{
4349	switch (tp->mac_version) {
4350	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
4351	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
4352		return false;
4353	default:
4354		return true;
4355	}
4356}
4357
4358static void rtl8169_doorbell(struct rtl8169_private *tp)
4359{
4360	if (rtl_is_8125(tp))
4361		RTL_W16(tp, TxPoll_8125, BIT(0));
4362	else
4363		RTL_W8(tp, TxPoll, NPQ);
4364}
4365
4366static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4367				      struct net_device *dev)
4368{
 
4369	struct rtl8169_private *tp = netdev_priv(dev);
4370	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4371	struct TxDesc *txd_first, *txd_last;
4372	bool stop_queue, door_bell;
4373	unsigned int frags;
4374	u32 opts[2];
4375
4376	if (unlikely(!rtl_tx_slots_avail(tp))) {
 
 
4377		if (net_ratelimit())
4378			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
4379		netif_stop_queue(dev);
4380		return NETDEV_TX_BUSY;
4381	}
4382
 
 
 
4383	opts[1] = rtl8169_tx_vlan_tag(skb);
4384	opts[0] = 0;
4385
4386	if (!rtl_chip_supports_csum_v2(tp))
4387		rtl8169_tso_csum_v1(skb, opts);
4388	else if (!rtl8169_tso_csum_v2(tp, skb, opts))
4389		goto err_dma_0;
4390
4391	if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data,
4392				    entry, false)))
4393		goto err_dma_0;
4394
4395	txd_first = tp->TxDescArray + entry;
4396
4397	frags = skb_shinfo(skb)->nr_frags;
4398	if (frags) {
4399		if (rtl8169_xmit_frags(tp, skb, opts, entry))
4400			goto err_dma_1;
4401		entry = (entry + frags) % NUM_TX_DESC;
4402	}
4403
4404	txd_last = tp->TxDescArray + entry;
4405	txd_last->opts1 |= cpu_to_le32(LastFrag);
4406	tp->tx_skb[entry].skb = skb;
4407
4408	skb_tx_timestamp(skb);
4409
4410	/* Force memory writes to complete before releasing descriptor */
4411	dma_wmb();
4412
4413	door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
4414
4415	txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
4416
4417	/* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
4418	smp_wmb();
4419
4420	WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
 
 
 
 
 
 
 
 
 
 
4421
4422	stop_queue = !netif_subqueue_maybe_stop(dev, 0, rtl_tx_slots_avail(tp),
4423						R8169_TX_STOP_THRS,
4424						R8169_TX_START_THRS);
4425	if (door_bell || stop_queue)
4426		rtl8169_doorbell(tp);
4427
 
 
 
 
 
 
 
 
 
 
 
 
 
4428	return NETDEV_TX_OK;
4429
4430err_dma_1:
4431	rtl8169_unmap_tx_skb(tp, entry);
4432err_dma_0:
4433	dev_kfree_skb_any(skb);
4434	dev->stats.tx_dropped++;
4435	return NETDEV_TX_OK;
 
 
 
 
 
4436}
4437
4438static unsigned int rtl_last_frag_len(struct sk_buff *skb)
4439{
4440	struct skb_shared_info *info = skb_shinfo(skb);
4441	unsigned int nr_frags = info->nr_frags;
4442
4443	if (!nr_frags)
4444		return UINT_MAX;
4445
4446	return skb_frag_size(info->frags + nr_frags - 1);
4447}
4448
4449/* Workaround for hw issues with TSO on RTL8168evl */
4450static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb,
4451					    netdev_features_t features)
4452{
4453	/* IPv4 header has options field */
4454	if (vlan_get_protocol(skb) == htons(ETH_P_IP) &&
4455	    ip_hdrlen(skb) > sizeof(struct iphdr))
4456		features &= ~NETIF_F_ALL_TSO;
4457
4458	/* IPv4 TCP header has options field */
4459	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 &&
4460		 tcp_hdrlen(skb) > sizeof(struct tcphdr))
4461		features &= ~NETIF_F_ALL_TSO;
4462
4463	else if (rtl_last_frag_len(skb) <= 6)
4464		features &= ~NETIF_F_ALL_TSO;
4465
4466	return features;
4467}
4468
4469static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
4470						struct net_device *dev,
4471						netdev_features_t features)
4472{
 
4473	struct rtl8169_private *tp = netdev_priv(dev);
4474
4475	if (skb_is_gso(skb)) {
4476		if (tp->mac_version == RTL_GIGA_MAC_VER_34)
4477			features = rtl8168evl_fix_tso(skb, features);
4478
4479		if (skb_transport_offset(skb) > GTTCPHO_MAX &&
4480		    rtl_chip_supports_csum_v2(tp))
4481			features &= ~NETIF_F_ALL_TSO;
4482	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4483		/* work around hw bug on some chip versions */
4484		if (skb->len < ETH_ZLEN)
4485			features &= ~NETIF_F_CSUM_MASK;
4486
4487		if (rtl_quirk_packet_padto(tp, skb))
4488			features &= ~NETIF_F_CSUM_MASK;
 
 
 
 
 
 
4489
4490		if (skb_transport_offset(skb) > TCPHO_MAX &&
4491		    rtl_chip_supports_csum_v2(tp))
4492			features &= ~NETIF_F_CSUM_MASK;
4493	}
4494
4495	return vlan_features_check(skb, features);
4496}
4497
4498static void rtl8169_pcierr_interrupt(struct net_device *dev)
4499{
4500	struct rtl8169_private *tp = netdev_priv(dev);
4501	struct pci_dev *pdev = tp->pci_dev;
4502	int pci_status_errs;
4503	u16 pci_cmd;
4504
4505	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4506
4507	pci_status_errs = pci_status_get_and_clear_errors(pdev);
4508
4509	if (net_ratelimit())
4510		netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
4511			   pci_cmd, pci_status_errs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4512
4513	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4514}
4515
4516static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
4517		   int budget)
4518{
4519	unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0;
4520	struct sk_buff *skb;
4521
4522	dirty_tx = tp->dirty_tx;
 
4523
4524	while (READ_ONCE(tp->cur_tx) != dirty_tx) {
4525		unsigned int entry = dirty_tx % NUM_TX_DESC;
 
4526		u32 status;
4527
4528		status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
4529		if (status & DescOwn)
4530			break;
4531
4532		skb = tp->tx_skb[entry].skb;
4533		rtl8169_unmap_tx_skb(tp, entry);
4534
4535		if (skb) {
4536			pkts_compl++;
4537			bytes_compl += skb->len;
4538			napi_consume_skb(skb, budget);
4539		}
4540		dirty_tx++;
4541	}
4542
4543	if (tp->dirty_tx != dirty_tx) {
4544		dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
4545		WRITE_ONCE(tp->dirty_tx, dirty_tx);
4546
4547		netif_subqueue_completed_wake(dev, 0, pkts_compl, bytes_compl,
4548					      rtl_tx_slots_avail(tp),
4549					      R8169_TX_START_THRS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4550		/*
4551		 * 8168 hack: TxPoll requests are lost when the Tx packets are
4552		 * too close. Let's kick an extra TxPoll request when a burst
4553		 * of start_xmit activity is detected (if it is not detected,
4554		 * it is slow enough). -- FR
4555		 * If skb is NULL then we come here again once a tx irq is
4556		 * triggered after the last fragment is marked transmitted.
4557		 */
4558		if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
4559			rtl8169_doorbell(tp);
4560	}
4561}
4562
4563static inline int rtl8169_fragmented_frame(u32 status)
4564{
4565	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4566}
4567
4568static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4569{
4570	u32 status = opts1 & (RxProtoMask | RxCSFailMask);
4571
4572	if (status == RxProtoTCP || status == RxProtoUDP)
 
4573		skb->ip_summed = CHECKSUM_UNNECESSARY;
4574	else
4575		skb_checksum_none_assert(skb);
4576}
4577
4578static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget)
4579{
 
4580	struct device *d = tp_to_dev(tp);
4581	int count;
4582
4583	for (count = 0; count < budget; count++, tp->cur_rx++) {
4584		unsigned int pkt_size, entry = tp->cur_rx % NUM_RX_DESC;
 
 
4585		struct RxDesc *desc = tp->RxDescArray + entry;
4586		struct sk_buff *skb;
4587		const void *rx_buf;
4588		dma_addr_t addr;
4589		u32 status;
4590
4591		status = le32_to_cpu(READ_ONCE(desc->opts1));
4592		if (status & DescOwn)
4593			break;
4594
4595		/* This barrier is needed to keep us from reading
4596		 * any other fields out of the Rx descriptor until
4597		 * we know the status of DescOwn
4598		 */
4599		dma_rmb();
4600
4601		if (unlikely(status & RxRES)) {
4602			if (net_ratelimit())
4603				netdev_warn(dev, "Rx ERROR. status = %08x\n",
4604					    status);
4605			dev->stats.rx_errors++;
4606			if (status & (RxRWT | RxRUNT))
4607				dev->stats.rx_length_errors++;
4608			if (status & RxCRC)
4609				dev->stats.rx_crc_errors++;
4610
4611			if (!(dev->features & NETIF_F_RXALL))
4612				goto release_descriptor;
4613			else if (status & RxRWT || !(status & (RxRUNT | RxCRC)))
4614				goto release_descriptor;
4615		}
4616
4617		pkt_size = status & GENMASK(13, 0);
4618		if (likely(!(dev->features & NETIF_F_RXFCS)))
4619			pkt_size -= ETH_FCS_LEN;
4620
4621		/* The driver does not support incoming fragmented frames.
4622		 * They are seen as a symptom of over-mtu sized frames.
4623		 */
4624		if (unlikely(rtl8169_fragmented_frame(status))) {
4625			dev->stats.rx_dropped++;
4626			dev->stats.rx_length_errors++;
4627			goto release_descriptor;
4628		}
4629
4630		skb = napi_alloc_skb(&tp->napi, pkt_size);
4631		if (unlikely(!skb)) {
4632			dev->stats.rx_dropped++;
4633			goto release_descriptor;
4634		}
4635
4636		addr = le64_to_cpu(desc->addr);
4637		rx_buf = page_address(tp->Rx_databuff[entry]);
4638
4639		dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
4640		prefetch(rx_buf);
4641		skb_copy_to_linear_data(skb, rx_buf, pkt_size);
4642		skb->tail += pkt_size;
4643		skb->len = pkt_size;
4644		dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
4645
4646		rtl8169_rx_csum(skb, status);
4647		skb->protocol = eth_type_trans(skb, dev);
4648
4649		rtl8169_rx_vlan_tag(desc, skb);
4650
4651		if (skb->pkt_type == PACKET_MULTICAST)
4652			dev->stats.multicast++;
4653
4654		napi_gro_receive(&tp->napi, skb);
4655
4656		dev_sw_netstats_rx_add(dev, pkt_size);
 
 
 
 
4657release_descriptor:
4658		rtl8169_mark_to_asic(desc);
4659	}
4660
 
 
 
4661	return count;
4662}
4663
4664static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4665{
4666	struct rtl8169_private *tp = dev_instance;
4667	u32 status = rtl_get_events(tp);
4668
4669	if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
 
4670		return IRQ_NONE;
4671
4672	/* At least RTL8168fp may unexpectedly set the SYSErr bit */
4673	if (unlikely(status & SYSErr &&
4674	    tp->mac_version <= RTL_GIGA_MAC_VER_06)) {
4675		rtl8169_pcierr_interrupt(tp->dev);
4676		goto out;
4677	}
4678
4679	if (status & LinkChg)
4680		phy_mac_interrupt(tp->phydev);
4681
4682	if (unlikely(status & RxFIFOOver &&
4683	    tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4684		netif_stop_queue(tp->dev);
4685		rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4686	}
4687
4688	rtl_irq_disable(tp);
4689	napi_schedule(&tp->napi);
4690out:
4691	rtl_ack_events(tp, status);
4692
4693	return IRQ_HANDLED;
4694}
4695
4696static void rtl_task(struct work_struct *work)
4697{
4698	struct rtl8169_private *tp =
4699		container_of(work, struct rtl8169_private, wk.work);
4700	int ret;
4701
4702	if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
4703		/* if chip isn't accessible, reset bus to revive it */
4704		if (RTL_R32(tp, TxConfig) == ~0) {
4705			ret = pci_reset_bus(tp->pci_dev);
4706			if (ret < 0) {
4707				netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n");
4708				netif_device_detach(tp->dev);
4709				return;
4710			}
4711		}
4712
4713		/* ASPM compatibility issues are a typical reason for tx timeouts */
4714		ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
4715							  PCIE_LINK_STATE_L0S);
4716		if (!ret)
4717			netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
4718		goto reset;
4719	}
4720
4721	if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
4722reset:
4723		rtl_reset_work(tp);
4724		netif_wake_queue(tp->dev);
4725	} else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
4726		rtl_reset_work(tp);
4727	}
 
 
4728}
4729
4730static int rtl8169_poll(struct napi_struct *napi, int budget)
4731{
4732	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
4733	struct net_device *dev = tp->dev;
4734	int work_done;
4735
4736	rtl_tx(dev, tp, budget);
4737
4738	work_done = rtl_rx(dev, tp, budget);
4739
4740	if (work_done < budget && napi_complete_done(napi, work_done))
 
4741		rtl_irq_enable(tp);
 
4742
4743	return work_done;
4744}
4745
4746static void r8169_phylink_handler(struct net_device *ndev)
4747{
4748	struct rtl8169_private *tp = netdev_priv(ndev);
4749	struct device *d = tp_to_dev(tp);
4750
4751	if (netif_carrier_ok(ndev)) {
4752		rtl_link_chg_patch(tp);
4753		pm_request_resume(d);
4754	} else {
4755		pm_runtime_idle(d);
4756	}
4757
4758	phy_print_status(tp->phydev);
 
4759}
4760
4761static int r8169_phy_connect(struct rtl8169_private *tp)
4762{
4763	struct phy_device *phydev = tp->phydev;
4764	phy_interface_t phy_mode;
4765	int ret;
4766
4767	phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
4768		   PHY_INTERFACE_MODE_MII;
4769
4770	ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
4771				 phy_mode);
4772	if (ret)
4773		return ret;
4774
4775	if (!tp->supports_gmii)
4776		phy_set_max_speed(phydev, SPEED_100);
4777
 
 
4778	phy_attached_info(phydev);
4779
4780	return 0;
4781}
4782
4783static void rtl8169_down(struct rtl8169_private *tp)
4784{
4785	disable_work_sync(&tp->wk.work);
4786	/* Clear all task flags */
4787	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
4788
4789	phy_stop(tp->phydev);
4790
4791	rtl8169_update_counters(tp);
4792
4793	pci_clear_master(tp->pci_dev);
4794	rtl_pci_commit(tp);
4795
4796	rtl8169_cleanup(tp);
4797	rtl_disable_exit_l1(tp);
4798	rtl_prepare_power_down(tp);
4799
4800	if (tp->dash_type != RTL_DASH_NONE)
4801		rtl8168_driver_stop(tp);
4802}
4803
4804static void rtl8169_up(struct rtl8169_private *tp)
4805{
4806	if (tp->dash_type != RTL_DASH_NONE)
4807		rtl8168_driver_start(tp);
4808
4809	pci_set_master(tp->pci_dev);
4810	phy_init_hw(tp->phydev);
4811	phy_resume(tp->phydev);
4812	rtl8169_init_phy(tp);
4813	napi_enable(&tp->napi);
4814	enable_work(&tp->wk.work);
4815	rtl_reset_work(tp);
4816
4817	phy_start(tp->phydev);
4818}
4819
4820static int rtl8169_close(struct net_device *dev)
4821{
4822	struct rtl8169_private *tp = netdev_priv(dev);
4823	struct pci_dev *pdev = tp->pci_dev;
4824
4825	pm_runtime_get_sync(&pdev->dev);
4826
4827	netif_stop_queue(dev);
4828	rtl8169_down(tp);
4829	rtl8169_rx_clear(tp);
4830
4831	free_irq(tp->irq, tp);
4832
4833	phy_disconnect(tp->phydev);
4834
 
 
4835	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4836			  tp->RxPhyAddr);
4837	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4838			  tp->TxPhyAddr);
4839	tp->TxDescArray = NULL;
4840	tp->RxDescArray = NULL;
4841
4842	pm_runtime_put_sync(&pdev->dev);
4843
4844	return 0;
4845}
4846
4847#ifdef CONFIG_NET_POLL_CONTROLLER
4848static void rtl8169_netpoll(struct net_device *dev)
4849{
4850	struct rtl8169_private *tp = netdev_priv(dev);
4851
4852	rtl8169_interrupt(tp->irq, tp);
4853}
4854#endif
4855
4856static int rtl_open(struct net_device *dev)
4857{
4858	struct rtl8169_private *tp = netdev_priv(dev);
4859	struct pci_dev *pdev = tp->pci_dev;
4860	unsigned long irqflags;
4861	int retval = -ENOMEM;
4862
4863	pm_runtime_get_sync(&pdev->dev);
4864
4865	/*
4866	 * Rx and Tx descriptors needs 256 bytes alignment.
4867	 * dma_alloc_coherent provides more.
4868	 */
4869	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
4870					     &tp->TxPhyAddr, GFP_KERNEL);
4871	if (!tp->TxDescArray)
4872		goto out;
4873
4874	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
4875					     &tp->RxPhyAddr, GFP_KERNEL);
4876	if (!tp->RxDescArray)
4877		goto err_free_tx_0;
4878
4879	retval = rtl8169_init_ring(tp);
4880	if (retval < 0)
4881		goto err_free_rx_1;
4882
4883	rtl_request_firmware(tp);
4884
4885	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
4886	retval = request_irq(tp->irq, rtl8169_interrupt, irqflags, dev->name, tp);
4887	if (retval < 0)
4888		goto err_release_fw_2;
4889
4890	retval = r8169_phy_connect(tp);
4891	if (retval)
4892		goto err_free_irq;
4893
4894	rtl8169_up(tp);
4895	rtl8169_init_counter_offsets(tp);
4896	netif_start_queue(dev);
4897out:
4898	pm_runtime_put_sync(&pdev->dev);
4899
 
 
4900	return retval;
4901
4902err_free_irq:
4903	free_irq(tp->irq, tp);
4904err_release_fw_2:
4905	rtl_release_firmware(tp);
4906	rtl8169_rx_clear(tp);
4907err_free_rx_1:
4908	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4909			  tp->RxPhyAddr);
4910	tp->RxDescArray = NULL;
4911err_free_tx_0:
4912	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4913			  tp->TxPhyAddr);
4914	tp->TxDescArray = NULL;
 
 
4915	goto out;
4916}
4917
4918static void
4919rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4920{
4921	struct rtl8169_private *tp = netdev_priv(dev);
4922	struct pci_dev *pdev = tp->pci_dev;
4923	struct rtl8169_counters *counters = tp->counters;
 
4924
4925	pm_runtime_get_noresume(&pdev->dev);
4926
4927	netdev_stats_to_stats64(stats, &dev->stats);
4928	dev_fetch_sw_netstats(stats, dev->tstats);
 
 
 
 
 
 
 
 
 
 
 
4929
4930	/*
4931	 * Fetch additional counter values missing in stats collected by driver
4932	 * from tally counters.
4933	 */
4934	if (pm_runtime_active(&pdev->dev))
4935		rtl8169_update_counters(tp);
4936
4937	/*
4938	 * Subtract values fetched during initalization.
4939	 * See rtl8169_init_counter_offsets for a description why we do that.
4940	 */
4941	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
4942		le64_to_cpu(tp->tc_offset.tx_errors);
4943	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
4944		le32_to_cpu(tp->tc_offset.tx_multi_collision);
4945	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
4946		le16_to_cpu(tp->tc_offset.tx_aborted);
4947	stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
4948		le16_to_cpu(tp->tc_offset.rx_missed);
4949
4950	pm_runtime_put_noidle(&pdev->dev);
4951}
4952
4953static void rtl8169_net_suspend(struct rtl8169_private *tp)
4954{
4955	netif_device_detach(tp->dev);
4956
4957	if (netif_running(tp->dev))
4958		rtl8169_down(tp);
4959}
4960
4961static int rtl8169_runtime_resume(struct device *dev)
4962{
4963	struct rtl8169_private *tp = dev_get_drvdata(dev);
4964
 
 
4965	rtl_rar_set(tp, tp->dev->dev_addr);
4966	__rtl8169_set_wol(tp, tp->saved_wolopts);
4967
4968	if (tp->TxDescArray)
4969		rtl8169_up(tp);
4970
4971	netif_device_attach(tp->dev);
4972
4973	return 0;
4974}
4975
4976static int rtl8169_suspend(struct device *device)
4977{
4978	struct rtl8169_private *tp = dev_get_drvdata(device);
4979
4980	rtnl_lock();
4981	rtl8169_net_suspend(tp);
4982	if (!device_may_wakeup(tp_to_dev(tp)))
4983		clk_disable_unprepare(tp->clk);
4984	rtnl_unlock();
4985
4986	return 0;
4987}
4988
4989static int rtl8169_resume(struct device *device)
4990{
4991	struct rtl8169_private *tp = dev_get_drvdata(device);
4992
4993	if (!device_may_wakeup(tp_to_dev(tp)))
4994		clk_prepare_enable(tp->clk);
4995
4996	/* Reportedly at least Asus X453MA truncates packets otherwise */
4997	if (tp->mac_version == RTL_GIGA_MAC_VER_37)
4998		rtl_init_rxcfg(tp);
4999
5000	return rtl8169_runtime_resume(device);
5001}
5002
5003static int rtl8169_runtime_suspend(struct device *device)
5004{
5005	struct rtl8169_private *tp = dev_get_drvdata(device);
5006
5007	if (!tp->TxDescArray) {
5008		netif_device_detach(tp->dev);
5009		return 0;
5010	}
5011
5012	rtnl_lock();
5013	__rtl8169_set_wol(tp, WAKE_PHY);
5014	rtl8169_net_suspend(tp);
5015	rtnl_unlock();
5016
5017	return 0;
5018}
5019
5020static int rtl8169_runtime_idle(struct device *device)
5021{
5022	struct rtl8169_private *tp = dev_get_drvdata(device);
5023
5024	if (tp->dash_enabled)
5025		return -EBUSY;
 
 
 
 
 
 
5026
5027	if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
5028		pm_schedule_suspend(device, 10000);
5029
5030	return -EBUSY;
5031}
5032
5033static const struct dev_pm_ops rtl8169_pm_ops = {
5034	SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
5035	RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
5036		       rtl8169_runtime_idle)
5037};
5038
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5039static void rtl_shutdown(struct pci_dev *pdev)
5040{
5041	struct rtl8169_private *tp = pci_get_drvdata(pdev);
5042
5043	rtnl_lock();
5044	rtl8169_net_suspend(tp);
5045	rtnl_unlock();
5046
5047	/* Restore original MAC address */
5048	rtl_rar_set(tp, tp->dev->perm_addr);
5049
5050	if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
5051		pci_wake_from_d3(pdev, tp->saved_wolopts);
 
 
 
 
 
5052		pci_set_power_state(pdev, PCI_D3hot);
5053	}
5054}
5055
5056static void rtl_remove_one(struct pci_dev *pdev)
5057{
5058	struct rtl8169_private *tp = pci_get_drvdata(pdev);
5059
5060	if (pci_dev_run_wake(pdev))
5061		pm_runtime_get_noresume(&pdev->dev);
5062
5063	disable_work_sync(&tp->wk.work);
5064
5065	if (IS_ENABLED(CONFIG_R8169_LEDS))
5066		r8169_remove_leds(tp->leds);
5067
5068	unregister_netdev(tp->dev);
5069
5070	if (tp->dash_type != RTL_DASH_NONE)
5071		rtl8168_driver_stop(tp);
5072
5073	rtl_release_firmware(tp);
5074
5075	/* restore original MAC address */
5076	rtl_rar_set(tp, tp->dev->perm_addr);
5077}
5078
5079static const struct net_device_ops rtl_netdev_ops = {
5080	.ndo_open		= rtl_open,
5081	.ndo_stop		= rtl8169_close,
5082	.ndo_get_stats64	= rtl8169_get_stats64,
5083	.ndo_start_xmit		= rtl8169_start_xmit,
5084	.ndo_features_check	= rtl8169_features_check,
5085	.ndo_tx_timeout		= rtl8169_tx_timeout,
5086	.ndo_validate_addr	= eth_validate_addr,
5087	.ndo_change_mtu		= rtl8169_change_mtu,
5088	.ndo_fix_features	= rtl8169_fix_features,
5089	.ndo_set_features	= rtl8169_set_features,
5090	.ndo_set_mac_address	= rtl_set_mac_address,
5091	.ndo_eth_ioctl		= phy_do_ioctl_running,
5092	.ndo_set_rx_mode	= rtl_set_rx_mode,
5093#ifdef CONFIG_NET_POLL_CONTROLLER
5094	.ndo_poll_controller	= rtl8169_netpoll,
5095#endif
5096
5097};
5098
5099static void rtl_set_irq_mask(struct rtl8169_private *tp)
5100{
5101	tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
5102
5103	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
5104		tp->irq_mask |= SYSErr | RxFIFOOver;
5105	else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
5106		/* special workaround needed */
5107		tp->irq_mask |= RxFIFOOver;
 
 
5108}
5109
5110static int rtl_alloc_irq(struct rtl8169_private *tp)
5111{
5112	unsigned int flags;
5113
5114	switch (tp->mac_version) {
5115	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5116		rtl_unlock_config_regs(tp);
5117		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
5118		rtl_lock_config_regs(tp);
5119		fallthrough;
5120	case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
5121		flags = PCI_IRQ_INTX;
5122		break;
5123	default:
5124		flags = PCI_IRQ_ALL_TYPES;
5125		break;
5126	}
5127
5128	return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
5129}
5130
5131static void rtl_read_mac_address(struct rtl8169_private *tp,
5132				 u8 mac_addr[ETH_ALEN])
5133{
5134	/* Get MAC address */
5135	if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
5136		u32 value;
 
 
 
 
 
5137
5138		value = rtl_eri_read(tp, 0xe0);
5139		put_unaligned_le32(value, mac_addr);
5140		value = rtl_eri_read(tp, 0xe4);
5141		put_unaligned_le16(value, mac_addr + 4);
 
5142	} else if (rtl_is_8125(tp)) {
5143		rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
5144	}
5145}
5146
5147DECLARE_RTL_COND(rtl_link_list_ready_cond)
5148{
5149	return RTL_R8(tp, MCU) & LINK_LIST_RDY;
5150}
5151
5152static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
5153{
5154	rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
5155}
5156
5157static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
5158{
5159	struct rtl8169_private *tp = mii_bus->priv;
5160
5161	if (phyaddr > 0)
5162		return -ENODEV;
5163
5164	return rtl_readphy(tp, phyreg);
5165}
5166
5167static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
5168				int phyreg, u16 val)
5169{
5170	struct rtl8169_private *tp = mii_bus->priv;
5171
5172	if (phyaddr > 0)
5173		return -ENODEV;
5174
5175	rtl_writephy(tp, phyreg, val);
5176
5177	return 0;
5178}
5179
5180static int r8169_mdio_register(struct rtl8169_private *tp)
5181{
5182	struct pci_dev *pdev = tp->pci_dev;
5183	struct mii_bus *new_bus;
5184	int ret;
5185
5186	/* On some boards with this chip version the BIOS is buggy and misses
5187	 * to reset the PHY page selector. This results in the PHY ID read
5188	 * accessing registers on a different page, returning a more or
5189	 * less random value. Fix this by resetting the page selector first.
5190	 */
5191	if (tp->mac_version == RTL_GIGA_MAC_VER_25 ||
5192	    tp->mac_version == RTL_GIGA_MAC_VER_26)
5193		r8169_mdio_write(tp, 0x1f, 0);
5194
5195	new_bus = devm_mdiobus_alloc(&pdev->dev);
5196	if (!new_bus)
5197		return -ENOMEM;
5198
5199	new_bus->name = "r8169";
5200	new_bus->priv = tp;
5201	new_bus->parent = &pdev->dev;
5202	new_bus->irq[0] = PHY_MAC_INTERRUPT;
5203	snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x",
5204		 pci_domain_nr(pdev->bus), pci_dev_id(pdev));
5205
5206	new_bus->read = r8169_mdio_read_reg;
5207	new_bus->write = r8169_mdio_write_reg;
5208
5209	ret = devm_mdiobus_register(&pdev->dev, new_bus);
5210	if (ret)
5211		return ret;
5212
5213	tp->phydev = mdiobus_get_phy(new_bus, 0);
5214	if (!tp->phydev) {
5215		return -ENODEV;
5216	} else if (!tp->phydev->drv) {
5217		/* Most chip versions fail with the genphy driver.
5218		 * Therefore ensure that the dedicated PHY driver is loaded.
5219		 */
5220		dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n",
5221			tp->phydev->phy_id);
5222		return -EUNATCH;
5223	}
5224
5225	tp->phydev->mac_managed_pm = true;
5226	if (rtl_supports_eee(tp))
5227		phy_support_eee(tp->phydev);
5228	phy_support_asym_pause(tp->phydev);
5229
5230	/* mimic behavior of r8125/r8126 vendor drivers */
5231	if (tp->mac_version == RTL_GIGA_MAC_VER_61)
5232		phy_set_eee_broken(tp->phydev,
5233				   ETHTOOL_LINK_MODE_2500baseT_Full_BIT);
5234	phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
5235
5236	/* PHY will be woken up in rtl_open() */
5237	phy_suspend(tp->phydev);
5238
5239	return 0;
5240}
5241
5242static void rtl_hw_init_8168g(struct rtl8169_private *tp)
5243{
5244	rtl_enable_rxdvgate(tp);
5245
5246	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5247	msleep(1);
5248	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5249
5250	r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5251	r8168g_wait_ll_share_fifo_ready(tp);
5252
5253	r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
5254	r8168g_wait_ll_share_fifo_ready(tp);
5255}
5256
5257static void rtl_hw_init_8125(struct rtl8169_private *tp)
5258{
5259	rtl_enable_rxdvgate(tp);
5260
5261	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5262	msleep(1);
5263	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5264
5265	r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5266	r8168g_wait_ll_share_fifo_ready(tp);
5267
5268	r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
5269	r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
5270	r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
5271	r8168g_wait_ll_share_fifo_ready(tp);
5272}
5273
5274static void rtl_hw_initialize(struct rtl8169_private *tp)
5275{
5276	switch (tp->mac_version) {
5277	case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
5278		rtl8168ep_stop_cmac(tp);
5279		fallthrough;
5280	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
5281		rtl_hw_init_8168g(tp);
5282		break;
5283	case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_66:
5284		rtl_hw_init_8125(tp);
5285		break;
5286	default:
5287		break;
5288	}
5289}
5290
5291static int rtl_jumbo_max(struct rtl8169_private *tp)
5292{
5293	/* Non-GBit versions don't support jumbo frames */
5294	if (!tp->supports_gmii)
5295		return 0;
5296
5297	switch (tp->mac_version) {
5298	/* RTL8169 */
5299	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5300		return JUMBO_7K;
5301	/* RTL8168b */
5302	case RTL_GIGA_MAC_VER_11:
 
5303	case RTL_GIGA_MAC_VER_17:
5304		return JUMBO_4K;
5305	/* RTL8168c */
5306	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
5307		return JUMBO_6K;
5308	default:
5309		return JUMBO_9K;
5310	}
5311}
5312
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5313static void rtl_init_mac_address(struct rtl8169_private *tp)
5314{
5315	u8 mac_addr[ETH_ALEN] __aligned(2) = {};
5316	struct net_device *dev = tp->dev;
 
5317	int rc;
5318
5319	rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
5320	if (!rc)
5321		goto done;
5322
5323	rtl_read_mac_address(tp, mac_addr);
5324	if (is_valid_ether_addr(mac_addr))
5325		goto done;
5326
5327	rtl_read_mac_from_reg(tp, mac_addr, MAC0);
5328	if (is_valid_ether_addr(mac_addr))
5329		goto done;
5330
5331	eth_random_addr(mac_addr);
5332	dev->addr_assign_type = NET_ADDR_RANDOM;
5333	dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
5334done:
5335	eth_hw_addr_set(dev, mac_addr);
5336	rtl_rar_set(tp, mac_addr);
5337}
5338
5339/* register is set if system vendor successfully tested ASPM 1.2 */
5340static bool rtl_aspm_is_safe(struct rtl8169_private *tp)
5341{
5342	if (tp->mac_version >= RTL_GIGA_MAC_VER_61 &&
5343	    r8168_mac_ocp_read(tp, 0xc0b2) & 0xf)
5344		return true;
5345
5346	return false;
5347}
5348
5349static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5350{
5351	struct rtl8169_private *tp;
5352	int jumbo_max, region, rc;
5353	enum mac_version chipset;
5354	struct net_device *dev;
5355	u32 txconfig;
5356	u16 xid;
5357
5358	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
5359	if (!dev)
5360		return -ENOMEM;
5361
5362	SET_NETDEV_DEV(dev, &pdev->dev);
5363	dev->netdev_ops = &rtl_netdev_ops;
5364	tp = netdev_priv(dev);
5365	tp->dev = dev;
5366	tp->pci_dev = pdev;
5367	tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
 
5368	tp->ocp_base = OCP_STD_PHY_BASE;
5369
5370	raw_spin_lock_init(&tp->mac_ocp_lock);
5371	mutex_init(&tp->led_lock);
5372
5373	/* Get the *optional* external "ether_clk" used on some boards */
5374	tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk");
5375	if (IS_ERR(tp->clk))
5376		return dev_err_probe(&pdev->dev, PTR_ERR(tp->clk), "failed to get ether_clk\n");
 
 
 
 
 
 
 
5377
5378	/* enable device (incl. PCI PM wakeup and hotplug setup) */
5379	rc = pcim_enable_device(pdev);
5380	if (rc < 0)
5381		return dev_err_probe(&pdev->dev, rc, "enable failure\n");
 
 
5382
5383	if (pcim_set_mwi(pdev) < 0)
5384		dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
5385
5386	/* use first MMIO region */
5387	region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
5388	if (region < 0)
5389		return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n");
 
 
5390
5391	rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME);
5392	if (rc < 0)
5393		return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n");
 
 
5394
5395	tp->mmio_addr = pcim_iomap_table(pdev)[region];
 
 
 
 
5396
5397	txconfig = RTL_R32(tp, TxConfig);
5398	if (txconfig == ~0U)
5399		return dev_err_probe(&pdev->dev, -EIO, "PCI read failed\n");
5400
5401	xid = (txconfig >> 20) & 0xfcf;
5402
5403	/* Identify chip attached to board */
5404	chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
5405	if (chipset == RTL_GIGA_MAC_NONE)
5406		return dev_err_probe(&pdev->dev, -ENODEV,
5407				     "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n",
5408				     xid);
5409	tp->mac_version = chipset;
5410
5411	/* Disable ASPM L1 as that cause random device stop working
5412	 * problems as well as full system hangs for some PCIe devices users.
5413	 */
5414	if (rtl_aspm_is_safe(tp))
5415		rc = 0;
5416	else
5417		rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
5418	tp->aspm_manageable = !rc;
5419
5420	tp->dash_type = rtl_get_dash_type(tp);
5421	tp->dash_enabled = rtl_dash_is_enabled(tp);
5422
5423	tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
5424
5425	if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
5426	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
5427		dev->features |= NETIF_F_HIGHDMA;
5428
5429	rtl_init_rxcfg(tp);
5430
5431	rtl8169_irq_mask_and_ack(tp);
5432
5433	rtl_hw_initialize(tp);
5434
5435	rtl_hw_reset(tp);
5436
5437	rc = rtl_alloc_irq(tp);
5438	if (rc < 0)
5439		return dev_err_probe(&pdev->dev, rc, "Can't allocate interrupt\n");
5440
5441	tp->irq = pci_irq_vector(pdev, 0);
 
 
 
 
5442
5443	INIT_WORK(&tp->wk.work, rtl_task);
5444	disable_work(&tp->wk.work);
 
5445
5446	rtl_init_mac_address(tp);
5447
5448	dev->ethtool_ops = &rtl8169_ethtool_ops;
5449
5450	netif_napi_add(dev, &tp->napi, rtl8169_poll);
5451
5452	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
5453			   NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5454	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
5455	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5456
5457	/*
5458	 * Pretend we are using VLANs; This bypasses a nasty bug where
5459	 * Interrupts stop flowing on high load on 8110SCd controllers.
5460	 */
5461	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
5462		/* Disallow toggling */
5463		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
5464
5465	if (rtl_chip_supports_csum_v2(tp))
5466		dev->hw_features |= NETIF_F_IPV6_CSUM;
5467
5468	dev->features |= dev->hw_features;
5469
 
 
 
 
 
5470	if (rtl_chip_supports_csum_v2(tp)) {
5471		dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
5472		netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V2);
5473		netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V2);
5474	} else {
5475		dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
5476		netif_set_tso_max_size(dev, RTL_GSO_MAX_SIZE_V1);
5477		netif_set_tso_max_segs(dev, RTL_GSO_MAX_SEGS_V1);
5478	}
5479
5480	/* There has been a number of reports that using SG/TSO results in
5481	 * tx timeouts. However for a lot of people SG/TSO works fine.
5482	 * It's not fully clear which chip versions are affected. Vendor
5483	 * drivers enable SG/TSO for certain chip versions per default,
5484	 * let's mimic this here. On other chip versions users can
5485	 * use ethtool to enable SG/TSO, use at own risk!
5486	 */
5487	if (tp->mac_version >= RTL_GIGA_MAC_VER_46 &&
5488	    tp->mac_version != RTL_GIGA_MAC_VER_61)
5489		dev->features |= dev->hw_features;
5490
5491	dev->hw_features |= NETIF_F_RXALL;
5492	dev->hw_features |= NETIF_F_RXFCS;
5493
5494	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
5495
5496	netdev_sw_irq_coalesce_default_on(dev);
5497
5498	/* configure chip for default features */
5499	rtl8169_set_features(dev, dev->features);
5500
5501	if (!tp->dash_enabled) {
5502		rtl_set_d3_pll_down(tp, true);
5503	} else {
5504		rtl_set_d3_pll_down(tp, false);
5505		dev->ethtool->wol_enabled = 1;
5506	}
5507
5508	jumbo_max = rtl_jumbo_max(tp);
5509	if (jumbo_max)
5510		dev->max_mtu = jumbo_max;
5511
5512	rtl_set_irq_mask(tp);
5513
5514	tp->fw_name = rtl_chip_infos[chipset].fw_name;
5515
5516	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
5517					    &tp->counters_phys_addr,
5518					    GFP_KERNEL);
5519	if (!tp->counters)
5520		return -ENOMEM;
5521
5522	pci_set_drvdata(pdev, tp);
5523
5524	rc = r8169_mdio_register(tp);
5525	if (rc)
5526		return rc;
5527
 
 
 
5528	rc = register_netdev(dev);
5529	if (rc)
5530		return rc;
5531
5532	if (IS_ENABLED(CONFIG_R8169_LEDS)) {
5533		if (rtl_is_8125(tp))
5534			tp->leds = rtl8125_init_leds(dev);
5535		else if (tp->mac_version > RTL_GIGA_MAC_VER_06)
5536			tp->leds = rtl8168_init_leds(dev);
5537	}
5538
5539	netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
5540		    rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq);
 
5541
5542	if (jumbo_max)
5543		netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
5544			    jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
5545			    "ok" : "ko");
5546
5547	if (tp->dash_type != RTL_DASH_NONE) {
5548		netdev_info(dev, "DASH %s\n",
5549			    tp->dash_enabled ? "enabled" : "disabled");
5550		rtl8168_driver_start(tp);
5551	}
5552
5553	if (pci_dev_run_wake(pdev))
5554		pm_runtime_put_sync(&pdev->dev);
5555
5556	return 0;
5557}
5558
5559static struct pci_driver rtl8169_pci_driver = {
5560	.name		= KBUILD_MODNAME,
5561	.id_table	= rtl8169_pci_tbl,
5562	.probe		= rtl_init_one,
5563	.remove		= rtl_remove_one,
5564	.shutdown	= rtl_shutdown,
5565	.driver.pm	= pm_ptr(&rtl8169_pm_ops),
 
 
5566};
5567
5568module_pci_driver(rtl8169_pci_driver);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
   4 *
   5 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
   6 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
   7 * Copyright (c) a lot of people too. Please respect their work.
   8 *
   9 * See MAINTAINERS file for support contact information.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/netdevice.h>
  15#include <linux/etherdevice.h>
  16#include <linux/clk.h>
  17#include <linux/delay.h>
  18#include <linux/ethtool.h>
  19#include <linux/phy.h>
  20#include <linux/if_vlan.h>
  21#include <linux/in.h>
  22#include <linux/io.h>
  23#include <linux/ip.h>
  24#include <linux/tcp.h>
  25#include <linux/interrupt.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/pm_runtime.h>
  28#include <linux/bitfield.h>
  29#include <linux/prefetch.h>
  30#include <linux/ipv6.h>
 
  31#include <net/ip6_checksum.h>
 
  32
  33#include "r8169.h"
  34#include "r8169_firmware.h"
  35
  36#define MODULENAME "r8169"
  37
  38#define FIRMWARE_8168D_1	"rtl_nic/rtl8168d-1.fw"
  39#define FIRMWARE_8168D_2	"rtl_nic/rtl8168d-2.fw"
  40#define FIRMWARE_8168E_1	"rtl_nic/rtl8168e-1.fw"
  41#define FIRMWARE_8168E_2	"rtl_nic/rtl8168e-2.fw"
  42#define FIRMWARE_8168E_3	"rtl_nic/rtl8168e-3.fw"
  43#define FIRMWARE_8168F_1	"rtl_nic/rtl8168f-1.fw"
  44#define FIRMWARE_8168F_2	"rtl_nic/rtl8168f-2.fw"
  45#define FIRMWARE_8105E_1	"rtl_nic/rtl8105e-1.fw"
  46#define FIRMWARE_8402_1		"rtl_nic/rtl8402-1.fw"
  47#define FIRMWARE_8411_1		"rtl_nic/rtl8411-1.fw"
  48#define FIRMWARE_8411_2		"rtl_nic/rtl8411-2.fw"
  49#define FIRMWARE_8106E_1	"rtl_nic/rtl8106e-1.fw"
  50#define FIRMWARE_8106E_2	"rtl_nic/rtl8106e-2.fw"
  51#define FIRMWARE_8168G_2	"rtl_nic/rtl8168g-2.fw"
  52#define FIRMWARE_8168G_3	"rtl_nic/rtl8168g-3.fw"
  53#define FIRMWARE_8168H_1	"rtl_nic/rtl8168h-1.fw"
  54#define FIRMWARE_8168H_2	"rtl_nic/rtl8168h-2.fw"
  55#define FIRMWARE_8168FP_3	"rtl_nic/rtl8168fp-3.fw"
  56#define FIRMWARE_8107E_1	"rtl_nic/rtl8107e-1.fw"
  57#define FIRMWARE_8107E_2	"rtl_nic/rtl8107e-2.fw"
  58#define FIRMWARE_8125A_3	"rtl_nic/rtl8125a-3.fw"
  59#define FIRMWARE_8125B_2	"rtl_nic/rtl8125b-2.fw"
  60
  61/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  62   The RTL chips use a 64 element hash table based on the Ethernet CRC. */
  63#define	MC_FILTER_LIMIT	32
  64
  65#define TX_DMA_BURST	7	/* Maximum PCI burst, '7' is unlimited */
  66#define InterFrameGap	0x03	/* 3 means InterFrameGap = the shortest one */
  67
  68#define R8169_REGS_SIZE		256
  69#define R8169_RX_BUF_SIZE	(SZ_16K - 1)
  70#define NUM_TX_DESC	64	/* Number of Tx descriptor registers */
  71#define NUM_RX_DESC	256U	/* Number of Rx descriptor registers */
  72#define R8169_TX_RING_BYTES	(NUM_TX_DESC * sizeof(struct TxDesc))
  73#define R8169_RX_RING_BYTES	(NUM_RX_DESC * sizeof(struct RxDesc))
 
 
  74
  75#define OCP_STD_PHY_BASE	0xa400
  76
  77#define RTL_CFG_NO_GBIT	1
  78
  79/* write/read MMIO register */
  80#define RTL_W8(tp, reg, val8)	writeb((val8), tp->mmio_addr + (reg))
  81#define RTL_W16(tp, reg, val16)	writew((val16), tp->mmio_addr + (reg))
  82#define RTL_W32(tp, reg, val32)	writel((val32), tp->mmio_addr + (reg))
  83#define RTL_R8(tp, reg)		readb(tp->mmio_addr + (reg))
  84#define RTL_R16(tp, reg)		readw(tp->mmio_addr + (reg))
  85#define RTL_R32(tp, reg)		readl(tp->mmio_addr + (reg))
  86
  87#define JUMBO_4K	(4 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  88#define JUMBO_6K	(6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  89#define JUMBO_7K	(7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  90#define JUMBO_9K	(9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN)
  91
  92static const struct {
  93	const char *name;
  94	const char *fw_name;
  95} rtl_chip_infos[] = {
  96	/* PCI devices. */
  97	[RTL_GIGA_MAC_VER_02] = {"RTL8169s"				},
  98	[RTL_GIGA_MAC_VER_03] = {"RTL8110s"				},
  99	[RTL_GIGA_MAC_VER_04] = {"RTL8169sb/8110sb"			},
 100	[RTL_GIGA_MAC_VER_05] = {"RTL8169sc/8110sc"			},
 101	[RTL_GIGA_MAC_VER_06] = {"RTL8169sc/8110sc"			},
 102	/* PCI-E devices. */
 103	[RTL_GIGA_MAC_VER_07] = {"RTL8102e"				},
 104	[RTL_GIGA_MAC_VER_08] = {"RTL8102e"				},
 105	[RTL_GIGA_MAC_VER_09] = {"RTL8102e/RTL8103e"			},
 106	[RTL_GIGA_MAC_VER_10] = {"RTL8101e"				},
 107	[RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b"			},
 108	[RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b"			},
 109	[RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e"			},
 110	[RTL_GIGA_MAC_VER_14] = {"RTL8401"				},
 111	[RTL_GIGA_MAC_VER_16] = {"RTL8101e"				},
 112	[RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b"			},
 113	[RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp"			},
 114	[RTL_GIGA_MAC_VER_19] = {"RTL8168c/8111c"			},
 115	[RTL_GIGA_MAC_VER_20] = {"RTL8168c/8111c"			},
 116	[RTL_GIGA_MAC_VER_21] = {"RTL8168c/8111c"			},
 117	[RTL_GIGA_MAC_VER_22] = {"RTL8168c/8111c"			},
 118	[RTL_GIGA_MAC_VER_23] = {"RTL8168cp/8111cp"			},
 119	[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp"			},
 120	[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d",	FIRMWARE_8168D_1},
 121	[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d",	FIRMWARE_8168D_2},
 122	[RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp"			},
 123	[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp"			},
 124	[RTL_GIGA_MAC_VER_29] = {"RTL8105e",		FIRMWARE_8105E_1},
 125	[RTL_GIGA_MAC_VER_30] = {"RTL8105e",		FIRMWARE_8105E_1},
 126	[RTL_GIGA_MAC_VER_31] = {"RTL8168dp/8111dp"			},
 127	[RTL_GIGA_MAC_VER_32] = {"RTL8168e/8111e",	FIRMWARE_8168E_1},
 128	[RTL_GIGA_MAC_VER_33] = {"RTL8168e/8111e",	FIRMWARE_8168E_2},
 129	[RTL_GIGA_MAC_VER_34] = {"RTL8168evl/8111evl",	FIRMWARE_8168E_3},
 130	[RTL_GIGA_MAC_VER_35] = {"RTL8168f/8111f",	FIRMWARE_8168F_1},
 131	[RTL_GIGA_MAC_VER_36] = {"RTL8168f/8111f",	FIRMWARE_8168F_2},
 132	[RTL_GIGA_MAC_VER_37] = {"RTL8402",		FIRMWARE_8402_1 },
 133	[RTL_GIGA_MAC_VER_38] = {"RTL8411",		FIRMWARE_8411_1 },
 134	[RTL_GIGA_MAC_VER_39] = {"RTL8106e",		FIRMWARE_8106E_1},
 135	[RTL_GIGA_MAC_VER_40] = {"RTL8168g/8111g",	FIRMWARE_8168G_2},
 136	[RTL_GIGA_MAC_VER_41] = {"RTL8168g/8111g"			},
 137	[RTL_GIGA_MAC_VER_42] = {"RTL8168gu/8111gu",	FIRMWARE_8168G_3},
 138	[RTL_GIGA_MAC_VER_43] = {"RTL8106eus",		FIRMWARE_8106E_2},
 139	[RTL_GIGA_MAC_VER_44] = {"RTL8411b",		FIRMWARE_8411_2 },
 140	[RTL_GIGA_MAC_VER_45] = {"RTL8168h/8111h",	FIRMWARE_8168H_1},
 141	[RTL_GIGA_MAC_VER_46] = {"RTL8168h/8111h",	FIRMWARE_8168H_2},
 142	[RTL_GIGA_MAC_VER_47] = {"RTL8107e",		FIRMWARE_8107E_1},
 143	[RTL_GIGA_MAC_VER_48] = {"RTL8107e",		FIRMWARE_8107E_2},
 144	[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep"			},
 145	[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep"			},
 146	[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep"			},
 147	[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117",  FIRMWARE_8168FP_3},
 148	[RTL_GIGA_MAC_VER_60] = {"RTL8125A"				},
 149	[RTL_GIGA_MAC_VER_61] = {"RTL8125A",		FIRMWARE_8125A_3},
 150	/* reserve 62 for CFG_METHOD_4 in the vendor driver */
 151	[RTL_GIGA_MAC_VER_63] = {"RTL8125B",		FIRMWARE_8125B_2},
 
 
 
 152};
 153
 154static const struct pci_device_id rtl8169_pci_tbl[] = {
 155	{ PCI_VDEVICE(REALTEK,	0x2502) },
 156	{ PCI_VDEVICE(REALTEK,	0x2600) },
 157	{ PCI_VDEVICE(REALTEK,	0x8129) },
 158	{ PCI_VDEVICE(REALTEK,	0x8136), RTL_CFG_NO_GBIT },
 159	{ PCI_VDEVICE(REALTEK,	0x8161) },
 
 160	{ PCI_VDEVICE(REALTEK,	0x8167) },
 161	{ PCI_VDEVICE(REALTEK,	0x8168) },
 162	{ PCI_VDEVICE(NCUBE,	0x8168) },
 163	{ PCI_VDEVICE(REALTEK,	0x8169) },
 164	{ PCI_VENDOR_ID_DLINK,	0x4300,
 165		PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0 },
 166	{ PCI_VDEVICE(DLINK,	0x4300) },
 167	{ PCI_VDEVICE(DLINK,	0x4302) },
 168	{ PCI_VDEVICE(AT,	0xc107) },
 169	{ PCI_VDEVICE(USR,	0x0116) },
 170	{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
 171	{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
 172	{ PCI_VDEVICE(REALTEK,	0x8125) },
 
 173	{ PCI_VDEVICE(REALTEK,	0x3000) },
 174	{}
 175};
 176
 177MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 178
 179enum rtl_registers {
 180	MAC0		= 0,	/* Ethernet hardware address. */
 181	MAC4		= 4,
 182	MAR0		= 8,	/* Multicast filter. */
 183	CounterAddrLow		= 0x10,
 184	CounterAddrHigh		= 0x14,
 185	TxDescStartAddrLow	= 0x20,
 186	TxDescStartAddrHigh	= 0x24,
 187	TxHDescStartAddrLow	= 0x28,
 188	TxHDescStartAddrHigh	= 0x2c,
 189	FLASH		= 0x30,
 190	ERSR		= 0x36,
 191	ChipCmd		= 0x37,
 192	TxPoll		= 0x38,
 193	IntrMask	= 0x3c,
 194	IntrStatus	= 0x3e,
 195
 196	TxConfig	= 0x40,
 197#define	TXCFG_AUTO_FIFO			(1 << 7)	/* 8111e-vl */
 198#define	TXCFG_EMPTY			(1 << 11)	/* 8111e-vl */
 199
 200	RxConfig	= 0x44,
 201#define	RX128_INT_EN			(1 << 15)	/* 8111c and later */
 202#define	RX_MULTI_EN			(1 << 14)	/* 8111c only */
 203#define	RXCFG_FIFO_SHIFT		13
 204					/* No threshold before first PCI xfer */
 205#define	RX_FIFO_THRESH			(7 << RXCFG_FIFO_SHIFT)
 206#define	RX_EARLY_OFF			(1 << 11)
 
 207#define	RXCFG_DMA_SHIFT			8
 208					/* Unlimited maximum PCI burst. */
 209#define	RX_DMA_BURST			(7 << RXCFG_DMA_SHIFT)
 210
 211	Cfg9346		= 0x50,
 212	Config0		= 0x51,
 213	Config1		= 0x52,
 214	Config2		= 0x53,
 215#define PME_SIGNAL			(1 << 5)	/* 8168c and later */
 216
 217	Config3		= 0x54,
 218	Config4		= 0x55,
 219	Config5		= 0x56,
 220	PHYAR		= 0x60,
 221	PHYstatus	= 0x6c,
 222	RxMaxSize	= 0xda,
 223	CPlusCmd	= 0xe0,
 224	IntrMitigate	= 0xe2,
 225
 226#define RTL_COALESCE_TX_USECS	GENMASK(15, 12)
 227#define RTL_COALESCE_TX_FRAMES	GENMASK(11, 8)
 228#define RTL_COALESCE_RX_USECS	GENMASK(7, 4)
 229#define RTL_COALESCE_RX_FRAMES	GENMASK(3, 0)
 230
 231#define RTL_COALESCE_T_MAX	0x0fU
 232#define RTL_COALESCE_FRAME_MAX	(RTL_COALESCE_T_MAX * 4)
 233
 234	RxDescAddrLow	= 0xe4,
 235	RxDescAddrHigh	= 0xe8,
 236	EarlyTxThres	= 0xec,	/* 8169. Unit of 32 bytes. */
 237
 238#define NoEarlyTx	0x3f	/* Max value : no early transmit. */
 239
 240	MaxTxPacketSize	= 0xec,	/* 8101/8168. Unit of 128 bytes. */
 241
 242#define TxPacketMax	(8064 >> 7)
 243#define EarlySize	0x27
 244
 245	FuncEvent	= 0xf0,
 246	FuncEventMask	= 0xf4,
 247	FuncPresetState	= 0xf8,
 248	IBCR0           = 0xf8,
 249	IBCR2           = 0xf9,
 250	IBIMR0          = 0xfa,
 251	IBISR0          = 0xfb,
 252	FuncForceEvent	= 0xfc,
 253};
 254
 255enum rtl8168_8101_registers {
 256	CSIDR			= 0x64,
 257	CSIAR			= 0x68,
 258#define	CSIAR_FLAG			0x80000000
 259#define	CSIAR_WRITE_CMD			0x80000000
 260#define	CSIAR_BYTE_ENABLE		0x0000f000
 261#define	CSIAR_ADDR_MASK			0x00000fff
 262	PMCH			= 0x6f,
 
 
 
 263	EPHYAR			= 0x80,
 264#define	EPHYAR_FLAG			0x80000000
 265#define	EPHYAR_WRITE_CMD		0x80000000
 266#define	EPHYAR_REG_MASK			0x1f
 267#define	EPHYAR_REG_SHIFT		16
 268#define	EPHYAR_DATA_MASK		0xffff
 269	DLLPR			= 0xd0,
 270#define	PFM_EN				(1 << 6)
 271#define	TX_10M_PS_EN			(1 << 7)
 272	DBG_REG			= 0xd1,
 273#define	FIX_NAK_1			(1 << 4)
 274#define	FIX_NAK_2			(1 << 3)
 275	TWSI			= 0xd2,
 276	MCU			= 0xd3,
 277#define	NOW_IS_OOB			(1 << 7)
 278#define	TX_EMPTY			(1 << 5)
 279#define	RX_EMPTY			(1 << 4)
 280#define	RXTX_EMPTY			(TX_EMPTY | RX_EMPTY)
 281#define	EN_NDP				(1 << 3)
 282#define	EN_OOB_RESET			(1 << 2)
 283#define	LINK_LIST_RDY			(1 << 1)
 284	EFUSEAR			= 0xdc,
 285#define	EFUSEAR_FLAG			0x80000000
 286#define	EFUSEAR_WRITE_CMD		0x80000000
 287#define	EFUSEAR_READ_CMD		0x00000000
 288#define	EFUSEAR_REG_MASK		0x03ff
 289#define	EFUSEAR_REG_SHIFT		8
 290#define	EFUSEAR_DATA_MASK		0xff
 291	MISC_1			= 0xf2,
 292#define	PFM_D3COLD_EN			(1 << 6)
 293};
 294
 295enum rtl8168_registers {
 
 296	LED_FREQ		= 0x1a,
 297	EEE_LED			= 0x1b,
 298	ERIDR			= 0x70,
 299	ERIAR			= 0x74,
 300#define ERIAR_FLAG			0x80000000
 301#define ERIAR_WRITE_CMD			0x80000000
 302#define ERIAR_READ_CMD			0x00000000
 303#define ERIAR_ADDR_BYTE_ALIGN		4
 304#define ERIAR_TYPE_SHIFT		16
 305#define ERIAR_EXGMAC			(0x00 << ERIAR_TYPE_SHIFT)
 306#define ERIAR_MSIX			(0x01 << ERIAR_TYPE_SHIFT)
 307#define ERIAR_ASF			(0x02 << ERIAR_TYPE_SHIFT)
 308#define ERIAR_OOB			(0x02 << ERIAR_TYPE_SHIFT)
 309#define ERIAR_MASK_SHIFT		12
 310#define ERIAR_MASK_0001			(0x1 << ERIAR_MASK_SHIFT)
 311#define ERIAR_MASK_0011			(0x3 << ERIAR_MASK_SHIFT)
 312#define ERIAR_MASK_0100			(0x4 << ERIAR_MASK_SHIFT)
 313#define ERIAR_MASK_0101			(0x5 << ERIAR_MASK_SHIFT)
 314#define ERIAR_MASK_1111			(0xf << ERIAR_MASK_SHIFT)
 315	EPHY_RXER_NUM		= 0x7c,
 316	OCPDR			= 0xb0,	/* OCP GPHY access */
 317#define OCPDR_WRITE_CMD			0x80000000
 318#define OCPDR_READ_CMD			0x00000000
 319#define OCPDR_REG_MASK			0x7f
 320#define OCPDR_GPHY_REG_SHIFT		16
 321#define OCPDR_DATA_MASK			0xffff
 322	OCPAR			= 0xb4,
 323#define OCPAR_FLAG			0x80000000
 324#define OCPAR_GPHY_WRITE_CMD		0x8000f060
 325#define OCPAR_GPHY_READ_CMD		0x0000f060
 326	GPHY_OCP		= 0xb8,
 327	RDSAR1			= 0xd0,	/* 8168c only. Undocumented on 8168dp */
 328	MISC			= 0xf0,	/* 8168e only. */
 329#define TXPLA_RST			(1 << 29)
 330#define DISABLE_LAN_EN			(1 << 23) /* Enable GPIO pin */
 331#define PWM_EN				(1 << 22)
 332#define RXDV_GATED_EN			(1 << 19)
 333#define EARLY_TALLY_EN			(1 << 16)
 334};
 335
 336enum rtl8125_registers {
 
 
 
 
 337	IntrMask_8125		= 0x38,
 338	IntrStatus_8125		= 0x3c,
 
 
 
 339	TxPoll_8125		= 0x90,
 
 340	MAC0_BKP		= 0x19e0,
 
 
 341	EEE_TXIDLE_TIMER_8125	= 0x6048,
 342};
 343
 
 
 344#define RX_VLAN_INNER_8125	BIT(22)
 345#define RX_VLAN_OUTER_8125	BIT(23)
 346#define RX_VLAN_8125		(RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
 347
 348#define RX_FETCH_DFLT_8125	(8 << 27)
 349
 350enum rtl_register_content {
 351	/* InterruptStatusBits */
 352	SYSErr		= 0x8000,
 353	PCSTimeout	= 0x4000,
 354	SWInt		= 0x0100,
 355	TxDescUnavail	= 0x0080,
 356	RxFIFOOver	= 0x0040,
 357	LinkChg		= 0x0020,
 358	RxOverflow	= 0x0010,
 359	TxErr		= 0x0008,
 360	TxOK		= 0x0004,
 361	RxErr		= 0x0002,
 362	RxOK		= 0x0001,
 363
 364	/* RxStatusDesc */
 365	RxRWT	= (1 << 22),
 366	RxRES	= (1 << 21),
 367	RxRUNT	= (1 << 20),
 368	RxCRC	= (1 << 19),
 369
 370	/* ChipCmdBits */
 371	StopReq		= 0x80,
 372	CmdReset	= 0x10,
 373	CmdRxEnb	= 0x08,
 374	CmdTxEnb	= 0x04,
 375	RxBufEmpty	= 0x01,
 376
 377	/* TXPoll register p.5 */
 378	HPQ		= 0x80,		/* Poll cmd on the high prio queue */
 379	NPQ		= 0x40,		/* Poll cmd on the low prio queue */
 380	FSWInt		= 0x01,		/* Forced software interrupt */
 381
 382	/* Cfg9346Bits */
 383	Cfg9346_Lock	= 0x00,
 384	Cfg9346_Unlock	= 0xc0,
 385
 386	/* rx_mode_bits */
 387	AcceptErr	= 0x20,
 388	AcceptRunt	= 0x10,
 389#define RX_CONFIG_ACCEPT_ERR_MASK	0x30
 390	AcceptBroadcast	= 0x08,
 391	AcceptMulticast	= 0x04,
 392	AcceptMyPhys	= 0x02,
 393	AcceptAllPhys	= 0x01,
 394#define RX_CONFIG_ACCEPT_OK_MASK	0x0f
 395#define RX_CONFIG_ACCEPT_MASK		0x3f
 396
 397	/* TxConfigBits */
 398	TxInterFrameGapShift = 24,
 399	TxDMAShift = 8,	/* DMA burst value (0-7) is shift this many bits */
 400
 401	/* Config1 register p.24 */
 402	LEDS1		= (1 << 7),
 403	LEDS0		= (1 << 6),
 404	Speed_down	= (1 << 4),
 405	MEMMAP		= (1 << 3),
 406	IOMAP		= (1 << 2),
 407	VPD		= (1 << 1),
 408	PMEnable	= (1 << 0),	/* Power Management Enable */
 409
 410	/* Config2 register p. 25 */
 411	ClkReqEn	= (1 << 7),	/* Clock Request Enable */
 412	MSIEnable	= (1 << 5),	/* 8169 only. Reserved in the 8168. */
 413	PCI_Clock_66MHz = 0x01,
 414	PCI_Clock_33MHz = 0x00,
 415
 416	/* Config3 register p.25 */
 417	MagicPacket	= (1 << 5),	/* Wake up when receives a Magic Packet */
 418	LinkUp		= (1 << 4),	/* Wake up when the cable connection is re-established */
 419	Jumbo_En0	= (1 << 2),	/* 8168 only. Reserved in the 8168b */
 420	Rdy_to_L23	= (1 << 1),	/* L23 Enable */
 421	Beacon_en	= (1 << 0),	/* 8168 only. Reserved in the 8168b */
 422
 423	/* Config4 register */
 424	Jumbo_En1	= (1 << 1),	/* 8168 only. Reserved in the 8168b */
 425
 426	/* Config5 register p.27 */
 427	BWF		= (1 << 6),	/* Accept Broadcast wakeup frame */
 428	MWF		= (1 << 5),	/* Accept Multicast wakeup frame */
 429	UWF		= (1 << 4),	/* Accept Unicast wakeup frame */
 430	Spi_en		= (1 << 3),
 431	LanWake		= (1 << 1),	/* LanWake enable/disable */
 432	PMEStatus	= (1 << 0),	/* PME status can be reset by PCI RST# */
 433	ASPM_en		= (1 << 0),	/* ASPM enable */
 434
 435	/* CPlusCmd p.31 */
 436	EnableBist	= (1 << 15),	// 8168 8101
 437	Mac_dbgo_oe	= (1 << 14),	// 8168 8101
 438	EnAnaPLL	= (1 << 14),	// 8169
 439	Normal_mode	= (1 << 13),	// unused
 440	Force_half_dup	= (1 << 12),	// 8168 8101
 441	Force_rxflow_en	= (1 << 11),	// 8168 8101
 442	Force_txflow_en	= (1 << 10),	// 8168 8101
 443	Cxpl_dbg_sel	= (1 << 9),	// 8168 8101
 444	ASF		= (1 << 8),	// 8168 8101
 445	PktCntrDisable	= (1 << 7),	// 8168 8101
 446	Mac_dbgo_sel	= 0x001c,	// 8168
 447	RxVlan		= (1 << 6),
 448	RxChkSum	= (1 << 5),
 449	PCIDAC		= (1 << 4),
 450	PCIMulRW	= (1 << 3),
 451#define INTT_MASK	GENMASK(1, 0)
 452#define CPCMD_MASK	(Normal_mode | RxVlan | RxChkSum | INTT_MASK)
 453
 454	/* rtl8169_PHYstatus */
 455	TBI_Enable	= 0x80,
 456	TxFlowCtrl	= 0x40,
 457	RxFlowCtrl	= 0x20,
 458	_1000bpsF	= 0x10,
 459	_100bps		= 0x08,
 460	_10bps		= 0x04,
 461	LinkStatus	= 0x02,
 462	FullDup		= 0x01,
 463
 464	/* ResetCounterCommand */
 465	CounterReset	= 0x1,
 466
 467	/* DumpCounterCommand */
 468	CounterDump	= 0x8,
 469
 470	/* magic enable v2 */
 471	MagicPacket_v2	= (1 << 16),	/* Wake up when receives a Magic Packet */
 472};
 473
 474enum rtl_desc_bit {
 475	/* First doubleword. */
 476	DescOwn		= (1 << 31), /* Descriptor is owned by NIC */
 477	RingEnd		= (1 << 30), /* End of descriptor ring */
 478	FirstFrag	= (1 << 29), /* First segment of a packet */
 479	LastFrag	= (1 << 28), /* Final segment of a packet */
 480};
 481
 482/* Generic case. */
 483enum rtl_tx_desc_bit {
 484	/* First doubleword. */
 485	TD_LSO		= (1 << 27),		/* Large Send Offload */
 486#define TD_MSS_MAX			0x07ffu	/* MSS value */
 487
 488	/* Second doubleword. */
 489	TxVlanTag	= (1 << 17),		/* Add VLAN tag */
 490};
 491
 492/* 8169, 8168b and 810x except 8102e. */
 493enum rtl_tx_desc_bit_0 {
 494	/* First doubleword. */
 495#define TD0_MSS_SHIFT			16	/* MSS position (11 bits) */
 496	TD0_TCP_CS	= (1 << 16),		/* Calculate TCP/IP checksum */
 497	TD0_UDP_CS	= (1 << 17),		/* Calculate UDP/IP checksum */
 498	TD0_IP_CS	= (1 << 18),		/* Calculate IP checksum */
 499};
 500
 501/* 8102e, 8168c and beyond. */
 502enum rtl_tx_desc_bit_1 {
 503	/* First doubleword. */
 504	TD1_GTSENV4	= (1 << 26),		/* Giant Send for IPv4 */
 505	TD1_GTSENV6	= (1 << 25),		/* Giant Send for IPv6 */
 506#define GTTCPHO_SHIFT			18
 507#define GTTCPHO_MAX			0x7f
 508
 509	/* Second doubleword. */
 510#define TCPHO_SHIFT			18
 511#define TCPHO_MAX			0x3ff
 512#define TD1_MSS_SHIFT			18	/* MSS position (11 bits) */
 513	TD1_IPv6_CS	= (1 << 28),		/* Calculate IPv6 checksum */
 514	TD1_IPv4_CS	= (1 << 29),		/* Calculate IPv4 checksum */
 515	TD1_TCP_CS	= (1 << 30),		/* Calculate TCP/IP checksum */
 516	TD1_UDP_CS	= (1 << 31),		/* Calculate UDP/IP checksum */
 517};
 518
 519enum rtl_rx_desc_bit {
 520	/* Rx private */
 521	PID1		= (1 << 18), /* Protocol ID bit 1/2 */
 522	PID0		= (1 << 17), /* Protocol ID bit 0/2 */
 523
 524#define RxProtoUDP	(PID1)
 525#define RxProtoTCP	(PID0)
 526#define RxProtoIP	(PID1 | PID0)
 527#define RxProtoMask	RxProtoIP
 528
 529	IPFail		= (1 << 16), /* IP checksum failed */
 530	UDPFail		= (1 << 15), /* UDP/IP checksum failed */
 531	TCPFail		= (1 << 14), /* TCP/IP checksum failed */
 
 
 
 532	RxVlanTag	= (1 << 16), /* VLAN tag available */
 533};
 534
 535#define RTL_GSO_MAX_SIZE_V1	32000
 536#define RTL_GSO_MAX_SEGS_V1	24
 537#define RTL_GSO_MAX_SIZE_V2	64000
 538#define RTL_GSO_MAX_SEGS_V2	64
 539
 540struct TxDesc {
 541	__le32 opts1;
 542	__le32 opts2;
 543	__le64 addr;
 544};
 545
 546struct RxDesc {
 547	__le32 opts1;
 548	__le32 opts2;
 549	__le64 addr;
 550};
 551
 552struct ring_info {
 553	struct sk_buff	*skb;
 554	u32		len;
 555};
 556
 557struct rtl8169_counters {
 558	__le64	tx_packets;
 559	__le64	rx_packets;
 560	__le64	tx_errors;
 561	__le32	rx_errors;
 562	__le16	rx_missed;
 563	__le16	align_errors;
 564	__le32	tx_one_collision;
 565	__le32	tx_multi_collision;
 566	__le64	rx_unicast;
 567	__le64	rx_broadcast;
 568	__le32	rx_multicast;
 569	__le16	tx_aborted;
 570	__le16	tx_underun;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571};
 572
 573struct rtl8169_tc_offsets {
 574	bool	inited;
 575	__le64	tx_errors;
 576	__le32	tx_multi_collision;
 577	__le16	tx_aborted;
 578	__le16	rx_missed;
 579};
 580
 581enum rtl_flag {
 582	RTL_FLAG_TASK_ENABLED = 0,
 583	RTL_FLAG_TASK_RESET_PENDING,
 
 
 584	RTL_FLAG_MAX
 585};
 586
 587struct rtl8169_stats {
 588	u64			packets;
 589	u64			bytes;
 590	struct u64_stats_sync	syncp;
 591};
 592
 593struct rtl8169_private {
 594	void __iomem *mmio_addr;	/* memory map physical address */
 595	struct pci_dev *pci_dev;
 596	struct net_device *dev;
 597	struct phy_device *phydev;
 598	struct napi_struct napi;
 599	enum mac_version mac_version;
 
 600	u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
 601	u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
 602	u32 dirty_tx;
 603	struct rtl8169_stats rx_stats;
 604	struct rtl8169_stats tx_stats;
 605	struct TxDesc *TxDescArray;	/* 256-aligned Tx descriptor ring */
 606	struct RxDesc *RxDescArray;	/* 256-aligned Rx descriptor ring */
 607	dma_addr_t TxPhyAddr;
 608	dma_addr_t RxPhyAddr;
 609	struct page *Rx_databuff[NUM_RX_DESC];	/* Rx data buffers */
 610	struct ring_info tx_skb[NUM_TX_DESC];	/* Tx data buffers */
 611	u16 cp_cmd;
 
 612	u32 irq_mask;
 
 613	struct clk *clk;
 614
 615	struct {
 616		DECLARE_BITMAP(flags, RTL_FLAG_MAX);
 617		struct work_struct work;
 618	} wk;
 619
 620	unsigned irq_enabled:1;
 
 
 621	unsigned supports_gmii:1;
 622	unsigned aspm_manageable:1;
 
 623	dma_addr_t counters_phys_addr;
 624	struct rtl8169_counters *counters;
 625	struct rtl8169_tc_offsets tc_offset;
 626	u32 saved_wolopts;
 627	int eee_adv;
 628
 629	const char *fw_name;
 630	struct rtl_fw *rtl_fw;
 631
 
 
 632	u32 ocp_base;
 633};
 634
 635typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
 636
 637MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
 638MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
 639MODULE_SOFTDEP("pre: realtek");
 640MODULE_LICENSE("GPL");
 641MODULE_FIRMWARE(FIRMWARE_8168D_1);
 642MODULE_FIRMWARE(FIRMWARE_8168D_2);
 643MODULE_FIRMWARE(FIRMWARE_8168E_1);
 644MODULE_FIRMWARE(FIRMWARE_8168E_2);
 645MODULE_FIRMWARE(FIRMWARE_8168E_3);
 646MODULE_FIRMWARE(FIRMWARE_8105E_1);
 647MODULE_FIRMWARE(FIRMWARE_8168F_1);
 648MODULE_FIRMWARE(FIRMWARE_8168F_2);
 649MODULE_FIRMWARE(FIRMWARE_8402_1);
 650MODULE_FIRMWARE(FIRMWARE_8411_1);
 651MODULE_FIRMWARE(FIRMWARE_8411_2);
 652MODULE_FIRMWARE(FIRMWARE_8106E_1);
 653MODULE_FIRMWARE(FIRMWARE_8106E_2);
 654MODULE_FIRMWARE(FIRMWARE_8168G_2);
 655MODULE_FIRMWARE(FIRMWARE_8168G_3);
 656MODULE_FIRMWARE(FIRMWARE_8168H_1);
 657MODULE_FIRMWARE(FIRMWARE_8168H_2);
 658MODULE_FIRMWARE(FIRMWARE_8168FP_3);
 659MODULE_FIRMWARE(FIRMWARE_8107E_1);
 660MODULE_FIRMWARE(FIRMWARE_8107E_2);
 661MODULE_FIRMWARE(FIRMWARE_8125A_3);
 662MODULE_FIRMWARE(FIRMWARE_8125B_2);
 
 
 
 663
 664static inline struct device *tp_to_dev(struct rtl8169_private *tp)
 665{
 666	return &tp->pci_dev->dev;
 667}
 668
 669static void rtl_lock_config_regs(struct rtl8169_private *tp)
 670{
 671	RTL_W8(tp, Cfg9346, Cfg9346_Lock);
 672}
 673
 674static void rtl_unlock_config_regs(struct rtl8169_private *tp)
 675{
 676	RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
 677}
 678
 679static void rtl_pci_commit(struct rtl8169_private *tp)
 680{
 681	/* Read an arbitrary register to commit a preceding PCI write */
 682	RTL_R8(tp, ChipCmd);
 683}
 684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685static bool rtl_is_8125(struct rtl8169_private *tp)
 686{
 687	return tp->mac_version >= RTL_GIGA_MAC_VER_60;
 688}
 689
 690static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
 691{
 692	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
 693	       tp->mac_version != RTL_GIGA_MAC_VER_39 &&
 694	       tp->mac_version <= RTL_GIGA_MAC_VER_52;
 695}
 696
 697static bool rtl_supports_eee(struct rtl8169_private *tp)
 698{
 699	return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
 700	       tp->mac_version != RTL_GIGA_MAC_VER_37 &&
 701	       tp->mac_version != RTL_GIGA_MAC_VER_39;
 702}
 703
 704static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
 705{
 706	int i;
 707
 708	for (i = 0; i < ETH_ALEN; i++)
 709		mac[i] = RTL_R8(tp, reg + i);
 710}
 711
 712struct rtl_cond {
 713	bool (*check)(struct rtl8169_private *);
 714	const char *msg;
 715};
 716
 717static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
 718			  unsigned long usecs, int n, bool high)
 719{
 720	int i;
 721
 722	for (i = 0; i < n; i++) {
 723		if (c->check(tp) == high)
 724			return true;
 725		fsleep(usecs);
 726	}
 727
 728	if (net_ratelimit())
 729		netdev_err(tp->dev, "%s == %d (loop: %d, delay: %lu).\n",
 730			   c->msg, !high, n, usecs);
 731	return false;
 732}
 733
 734static bool rtl_loop_wait_high(struct rtl8169_private *tp,
 735			       const struct rtl_cond *c,
 736			       unsigned long d, int n)
 737{
 738	return rtl_loop_wait(tp, c, d, n, true);
 739}
 740
 741static bool rtl_loop_wait_low(struct rtl8169_private *tp,
 742			      const struct rtl_cond *c,
 743			      unsigned long d, int n)
 744{
 745	return rtl_loop_wait(tp, c, d, n, false);
 746}
 747
 748#define DECLARE_RTL_COND(name)				\
 749static bool name ## _check(struct rtl8169_private *);	\
 750							\
 751static const struct rtl_cond name = {			\
 752	.check	= name ## _check,			\
 753	.msg	= #name					\
 754};							\
 755							\
 756static bool name ## _check(struct rtl8169_private *tp)
 757
 758static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
 759{
 760	if (reg & 0xffff0001) {
 761		if (net_ratelimit())
 762			netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
 763		return true;
 764	}
 765	return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766}
 767
 768DECLARE_RTL_COND(rtl_ocp_gphy_cond)
 769{
 770	return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
 771}
 772
 773static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
 774{
 775	if (rtl_ocp_reg_failure(tp, reg))
 776		return;
 777
 778	RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
 779
 780	rtl_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
 781}
 782
 783static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
 784{
 785	if (rtl_ocp_reg_failure(tp, reg))
 786		return 0;
 787
 788	RTL_W32(tp, GPHY_OCP, reg << 15);
 789
 790	return rtl_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
 791		(RTL_R32(tp, GPHY_OCP) & 0xffff) : -ETIMEDOUT;
 792}
 793
 794static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
 795{
 796	if (rtl_ocp_reg_failure(tp, reg))
 797		return;
 798
 799	RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
 800}
 801
 802static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
 
 
 
 
 
 
 
 
 
 803{
 804	if (rtl_ocp_reg_failure(tp, reg))
 805		return 0;
 806
 807	RTL_W32(tp, OCPDR, reg << 15);
 808
 809	return RTL_R32(tp, OCPDR);
 810}
 811
 
 
 
 
 
 
 
 
 
 
 
 
 812static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
 813				 u16 set)
 814{
 815	u16 data = r8168_mac_ocp_read(tp, reg);
 
 816
 817	r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
 
 
 
 818}
 819
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 820static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
 821{
 822	if (reg == 0x1f) {
 823		tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
 824		return;
 825	}
 826
 827	if (tp->ocp_base != OCP_STD_PHY_BASE)
 828		reg -= 0x10;
 829
 
 
 
 830	r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
 831}
 832
 833static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
 834{
 835	if (reg == 0x1f)
 836		return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
 837
 838	if (tp->ocp_base != OCP_STD_PHY_BASE)
 839		reg -= 0x10;
 840
 841	return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
 842}
 843
 844static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
 845{
 846	if (reg == 0x1f) {
 847		tp->ocp_base = value << 4;
 848		return;
 849	}
 850
 851	r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
 852}
 853
 854static int mac_mcu_read(struct rtl8169_private *tp, int reg)
 855{
 856	return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
 857}
 858
 859DECLARE_RTL_COND(rtl_phyar_cond)
 860{
 861	return RTL_R32(tp, PHYAR) & 0x80000000;
 862}
 863
 864static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
 865{
 866	RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
 867
 868	rtl_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
 869	/*
 870	 * According to hardware specs a 20us delay is required after write
 871	 * complete indication, but before sending next command.
 872	 */
 873	udelay(20);
 874}
 875
 876static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
 877{
 878	int value;
 879
 880	RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
 881
 882	value = rtl_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
 883		RTL_R32(tp, PHYAR) & 0xffff : -ETIMEDOUT;
 884
 885	/*
 886	 * According to hardware specs a 20us delay is required after read
 887	 * complete indication, but before sending next command.
 888	 */
 889	udelay(20);
 890
 891	return value;
 892}
 893
 894DECLARE_RTL_COND(rtl_ocpar_cond)
 895{
 896	return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
 897}
 898
 899static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
 900{
 901	RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
 902	RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
 903	RTL_W32(tp, EPHY_RXER_NUM, 0);
 904
 905	rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
 906}
 907
 908static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
 909{
 910	r8168dp_1_mdio_access(tp, reg,
 911			      OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
 912}
 913
 914static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
 915{
 916	r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
 917
 918	mdelay(1);
 919	RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
 920	RTL_W32(tp, EPHY_RXER_NUM, 0);
 921
 922	return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
 923		RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
 924}
 925
 926#define R8168DP_1_MDIO_ACCESS_BIT	0x00020000
 927
 928static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
 929{
 930	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
 931}
 932
 933static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
 934{
 935	RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
 936}
 937
 938static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
 939{
 940	r8168dp_2_mdio_start(tp);
 941
 942	r8169_mdio_write(tp, reg, value);
 943
 944	r8168dp_2_mdio_stop(tp);
 945}
 946
 947static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
 948{
 949	int value;
 950
 951	/* Work around issue with chip reporting wrong PHY ID */
 952	if (reg == MII_PHYSID2)
 953		return 0xc912;
 954
 955	r8168dp_2_mdio_start(tp);
 956
 957	value = r8169_mdio_read(tp, reg);
 958
 959	r8168dp_2_mdio_stop(tp);
 960
 961	return value;
 962}
 963
 964static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
 965{
 966	switch (tp->mac_version) {
 967	case RTL_GIGA_MAC_VER_27:
 968		r8168dp_1_mdio_write(tp, location, val);
 969		break;
 970	case RTL_GIGA_MAC_VER_28:
 971	case RTL_GIGA_MAC_VER_31:
 972		r8168dp_2_mdio_write(tp, location, val);
 973		break;
 974	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
 975		r8168g_mdio_write(tp, location, val);
 976		break;
 977	default:
 978		r8169_mdio_write(tp, location, val);
 979		break;
 980	}
 981}
 982
 983static int rtl_readphy(struct rtl8169_private *tp, int location)
 984{
 985	switch (tp->mac_version) {
 986	case RTL_GIGA_MAC_VER_27:
 987		return r8168dp_1_mdio_read(tp, location);
 988	case RTL_GIGA_MAC_VER_28:
 989	case RTL_GIGA_MAC_VER_31:
 990		return r8168dp_2_mdio_read(tp, location);
 991	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
 992		return r8168g_mdio_read(tp, location);
 993	default:
 994		return r8169_mdio_read(tp, location);
 995	}
 996}
 997
 998DECLARE_RTL_COND(rtl_ephyar_cond)
 999{
1000	return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
1001}
1002
1003static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1004{
1005	RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1006		(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1007
1008	rtl_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1009
1010	udelay(10);
1011}
1012
1013static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1014{
1015	RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1016
1017	return rtl_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1018		RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
1019}
1020
1021static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
1022{
1023	/* based on RTL8168FP_OOBMAC_BASE in vendor driver */
1024	if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
1025		*cmd |= 0x7f0 << 18;
1026}
1027
1028DECLARE_RTL_COND(rtl_eriar_cond)
1029{
1030	return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
1031}
1032
1033static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1034			   u32 val, int type)
1035{
1036	u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
1037
1038	BUG_ON((addr & 3) || (mask == 0));
1039	RTL_W32(tp, ERIDR, val);
1040	r8168fp_adjust_ocp_cmd(tp, &cmd, type);
1041	RTL_W32(tp, ERIAR, cmd);
1042
1043	rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1044}
1045
1046static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1047			  u32 val)
1048{
1049	_rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
1050}
1051
1052static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1053{
1054	u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
1055
1056	r8168fp_adjust_ocp_cmd(tp, &cmd, type);
1057	RTL_W32(tp, ERIAR, cmd);
1058
1059	return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1060		RTL_R32(tp, ERIDR) : ~0;
1061}
1062
1063static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
1064{
1065	return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
1066}
1067
1068static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
1069{
1070	u32 val = rtl_eri_read(tp, addr);
1071
1072	rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
1073}
1074
1075static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
1076{
1077	rtl_w0w1_eri(tp, addr, p, 0);
1078}
1079
1080static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
1081{
1082	rtl_w0w1_eri(tp, addr, 0, m);
1083}
1084
1085static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
1086{
1087	RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
1088	return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
1089		RTL_R32(tp, OCPDR) : ~0;
1090}
1091
1092static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u16 reg)
1093{
1094	return _rtl_eri_read(tp, reg, ERIAR_OOB);
1095}
1096
1097static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1098			      u32 data)
1099{
1100	RTL_W32(tp, OCPDR, data);
1101	RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
1102	rtl_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
1103}
1104
1105static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
1106			      u32 data)
1107{
1108	_rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
1109		       data, ERIAR_OOB);
1110}
1111
1112static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
1113{
1114	rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
1115
1116	r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
1117}
1118
1119#define OOB_CMD_RESET		0x00
1120#define OOB_CMD_DRIVER_START	0x05
1121#define OOB_CMD_DRIVER_STOP	0x06
1122
1123static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
1124{
1125	return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
1126}
1127
1128DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
1129{
1130	u16 reg;
1131
1132	reg = rtl8168_get_ocp_reg(tp);
1133
1134	return r8168dp_ocp_read(tp, reg) & 0x00000800;
1135}
1136
1137DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
1138{
1139	return r8168ep_ocp_read(tp, 0x124) & 0x00000001;
1140}
1141
1142DECLARE_RTL_COND(rtl_ocp_tx_cond)
1143{
1144	return RTL_R8(tp, IBISR0) & 0x20;
1145}
1146
1147static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
1148{
1149	RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
1150	rtl_loop_wait_high(tp, &rtl_ocp_tx_cond, 50000, 2000);
1151	RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
1152	RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
1153}
1154
1155static void rtl8168dp_driver_start(struct rtl8169_private *tp)
1156{
1157	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
1158	rtl_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10000, 10);
 
1159}
1160
1161static void rtl8168ep_driver_start(struct rtl8169_private *tp)
1162{
1163	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
1164	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
1165	rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10);
 
1166}
1167
1168static void rtl8168_driver_start(struct rtl8169_private *tp)
1169{
1170	switch (tp->mac_version) {
1171	case RTL_GIGA_MAC_VER_27:
1172	case RTL_GIGA_MAC_VER_28:
1173	case RTL_GIGA_MAC_VER_31:
1174		rtl8168dp_driver_start(tp);
1175		break;
1176	case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
1177		rtl8168ep_driver_start(tp);
1178		break;
1179	default:
1180		BUG();
1181		break;
1182	}
1183}
1184
1185static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
1186{
1187	r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
1188	rtl_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10000, 10);
 
1189}
1190
1191static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
1192{
1193	rtl8168ep_stop_cmac(tp);
1194	r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
1195	r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01);
1196	rtl_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10000, 10);
 
1197}
1198
1199static void rtl8168_driver_stop(struct rtl8169_private *tp)
1200{
1201	switch (tp->mac_version) {
1202	case RTL_GIGA_MAC_VER_27:
1203	case RTL_GIGA_MAC_VER_28:
1204	case RTL_GIGA_MAC_VER_31:
1205		rtl8168dp_driver_stop(tp);
1206		break;
1207	case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
1208		rtl8168ep_driver_stop(tp);
1209		break;
1210	default:
1211		BUG();
1212		break;
1213	}
1214}
1215
1216static bool r8168dp_check_dash(struct rtl8169_private *tp)
1217{
1218	u16 reg = rtl8168_get_ocp_reg(tp);
1219
1220	return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
1221}
1222
1223static bool r8168ep_check_dash(struct rtl8169_private *tp)
1224{
1225	return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
1226}
1227
1228static bool r8168_check_dash(struct rtl8169_private *tp)
 
 
 
 
 
 
 
 
 
 
 
 
1229{
1230	switch (tp->mac_version) {
1231	case RTL_GIGA_MAC_VER_27:
1232	case RTL_GIGA_MAC_VER_28:
1233	case RTL_GIGA_MAC_VER_31:
1234		return r8168dp_check_dash(tp);
1235	case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
1236		return r8168ep_check_dash(tp);
1237	default:
1238		return false;
1239	}
1240}
1241
 
 
 
 
 
 
 
 
 
1242static void rtl_reset_packet_filter(struct rtl8169_private *tp)
1243{
1244	rtl_eri_clear_bits(tp, 0xdc, BIT(0));
1245	rtl_eri_set_bits(tp, 0xdc, BIT(0));
1246}
1247
1248DECLARE_RTL_COND(rtl_efusear_cond)
1249{
1250	return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
1251}
1252
1253u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1254{
1255	RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1256
1257	return rtl_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1258		RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1259}
1260
1261static u32 rtl_get_events(struct rtl8169_private *tp)
1262{
1263	if (rtl_is_8125(tp))
1264		return RTL_R32(tp, IntrStatus_8125);
1265	else
1266		return RTL_R16(tp, IntrStatus);
1267}
1268
1269static void rtl_ack_events(struct rtl8169_private *tp, u32 bits)
1270{
1271	if (rtl_is_8125(tp))
1272		RTL_W32(tp, IntrStatus_8125, bits);
1273	else
1274		RTL_W16(tp, IntrStatus, bits);
1275}
1276
1277static void rtl_irq_disable(struct rtl8169_private *tp)
1278{
1279	if (rtl_is_8125(tp))
1280		RTL_W32(tp, IntrMask_8125, 0);
1281	else
1282		RTL_W16(tp, IntrMask, 0);
1283	tp->irq_enabled = 0;
1284}
1285
1286static void rtl_irq_enable(struct rtl8169_private *tp)
1287{
1288	tp->irq_enabled = 1;
1289	if (rtl_is_8125(tp))
1290		RTL_W32(tp, IntrMask_8125, tp->irq_mask);
1291	else
1292		RTL_W16(tp, IntrMask, tp->irq_mask);
1293}
1294
1295static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1296{
1297	rtl_irq_disable(tp);
1298	rtl_ack_events(tp, 0xffffffff);
1299	rtl_pci_commit(tp);
1300}
1301
1302static void rtl_link_chg_patch(struct rtl8169_private *tp)
1303{
1304	struct phy_device *phydev = tp->phydev;
1305
1306	if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1307	    tp->mac_version == RTL_GIGA_MAC_VER_38) {
1308		if (phydev->speed == SPEED_1000) {
1309			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1310			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1311		} else if (phydev->speed == SPEED_100) {
1312			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1313			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1314		} else {
1315			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1316			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1317		}
1318		rtl_reset_packet_filter(tp);
1319	} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1320		   tp->mac_version == RTL_GIGA_MAC_VER_36) {
1321		if (phydev->speed == SPEED_1000) {
1322			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
1323			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
1324		} else {
1325			rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
1326			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
1327		}
1328	} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1329		if (phydev->speed == SPEED_10) {
1330			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
1331			rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
1332		} else {
1333			rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
1334		}
1335	}
1336}
1337
1338#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1339
1340static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1341{
1342	struct rtl8169_private *tp = netdev_priv(dev);
1343
1344	wol->supported = WAKE_ANY;
1345	wol->wolopts = tp->saved_wolopts;
1346}
1347
1348static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1349{
1350	static const struct {
1351		u32 opt;
1352		u16 reg;
1353		u8  mask;
1354	} cfg[] = {
1355		{ WAKE_PHY,   Config3, LinkUp },
1356		{ WAKE_UCAST, Config5, UWF },
1357		{ WAKE_BCAST, Config5, BWF },
1358		{ WAKE_MCAST, Config5, MWF },
1359		{ WAKE_ANY,   Config5, LanWake },
1360		{ WAKE_MAGIC, Config3, MagicPacket }
1361	};
1362	unsigned int i, tmp = ARRAY_SIZE(cfg);
1363	u8 options;
1364
1365	rtl_unlock_config_regs(tp);
1366
1367	if (rtl_is_8168evl_up(tp)) {
1368		tmp--;
1369		if (wolopts & WAKE_MAGIC)
1370			rtl_eri_set_bits(tp, 0x0dc, MagicPacket_v2);
1371		else
1372			rtl_eri_clear_bits(tp, 0x0dc, MagicPacket_v2);
1373	} else if (rtl_is_8125(tp)) {
1374		tmp--;
1375		if (wolopts & WAKE_MAGIC)
1376			r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
1377		else
1378			r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
 
 
 
1379	}
1380
1381	for (i = 0; i < tmp; i++) {
1382		options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
1383		if (wolopts & cfg[i].opt)
1384			options |= cfg[i].mask;
1385		RTL_W8(tp, cfg[i].reg, options);
1386	}
 
 
1387
1388	switch (tp->mac_version) {
1389	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
1390		options = RTL_R8(tp, Config1) & ~PMEnable;
1391		if (wolopts)
1392			options |= PMEnable;
1393		RTL_W8(tp, Config1, options);
1394		break;
1395	case RTL_GIGA_MAC_VER_34:
1396	case RTL_GIGA_MAC_VER_37:
1397	case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
1398		options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
1399		if (wolopts)
1400			options |= PME_SIGNAL;
1401		RTL_W8(tp, Config2, options);
1402		break;
1403	default:
1404		break;
1405	}
1406
1407	rtl_lock_config_regs(tp);
1408
1409	device_set_wakeup_enable(tp_to_dev(tp), wolopts);
1410	tp->dev->wol_enabled = wolopts ? 1 : 0;
 
 
 
 
1411}
1412
1413static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1414{
1415	struct rtl8169_private *tp = netdev_priv(dev);
1416
1417	if (wol->wolopts & ~WAKE_ANY)
1418		return -EINVAL;
1419
1420	tp->saved_wolopts = wol->wolopts;
1421	__rtl8169_set_wol(tp, tp->saved_wolopts);
1422
1423	return 0;
1424}
1425
1426static void rtl8169_get_drvinfo(struct net_device *dev,
1427				struct ethtool_drvinfo *info)
1428{
1429	struct rtl8169_private *tp = netdev_priv(dev);
1430	struct rtl_fw *rtl_fw = tp->rtl_fw;
1431
1432	strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1433	strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1434	BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1435	if (rtl_fw)
1436		strlcpy(info->fw_version, rtl_fw->version,
1437			sizeof(info->fw_version));
1438}
1439
1440static int rtl8169_get_regs_len(struct net_device *dev)
1441{
1442	return R8169_REGS_SIZE;
1443}
1444
1445static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1446	netdev_features_t features)
1447{
1448	struct rtl8169_private *tp = netdev_priv(dev);
1449
1450	if (dev->mtu > TD_MSS_MAX)
1451		features &= ~NETIF_F_ALL_TSO;
1452
1453	if (dev->mtu > ETH_DATA_LEN &&
1454	    tp->mac_version > RTL_GIGA_MAC_VER_06)
1455		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO);
1456
1457	return features;
1458}
1459
1460static void rtl_set_rx_config_features(struct rtl8169_private *tp,
1461				       netdev_features_t features)
1462{
1463	u32 rx_config = RTL_R32(tp, RxConfig);
1464
1465	if (features & NETIF_F_RXALL)
1466		rx_config |= RX_CONFIG_ACCEPT_ERR_MASK;
1467	else
1468		rx_config &= ~RX_CONFIG_ACCEPT_ERR_MASK;
1469
1470	if (rtl_is_8125(tp)) {
1471		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1472			rx_config |= RX_VLAN_8125;
1473		else
1474			rx_config &= ~RX_VLAN_8125;
1475	}
1476
1477	RTL_W32(tp, RxConfig, rx_config);
1478}
1479
1480static int rtl8169_set_features(struct net_device *dev,
1481				netdev_features_t features)
1482{
1483	struct rtl8169_private *tp = netdev_priv(dev);
1484
1485	rtl_set_rx_config_features(tp, features);
1486
1487	if (features & NETIF_F_RXCSUM)
1488		tp->cp_cmd |= RxChkSum;
1489	else
1490		tp->cp_cmd &= ~RxChkSum;
1491
1492	if (!rtl_is_8125(tp)) {
1493		if (features & NETIF_F_HW_VLAN_CTAG_RX)
1494			tp->cp_cmd |= RxVlan;
1495		else
1496			tp->cp_cmd &= ~RxVlan;
1497	}
1498
1499	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1500	rtl_pci_commit(tp);
1501
1502	return 0;
1503}
1504
1505static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1506{
1507	return (skb_vlan_tag_present(skb)) ?
1508		TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
1509}
1510
1511static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1512{
1513	u32 opts2 = le32_to_cpu(desc->opts2);
1514
1515	if (opts2 & RxVlanTag)
1516		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
1517}
1518
1519static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1520			     void *p)
1521{
1522	struct rtl8169_private *tp = netdev_priv(dev);
1523	u32 __iomem *data = tp->mmio_addr;
1524	u32 *dw = p;
1525	int i;
1526
1527	for (i = 0; i < R8169_REGS_SIZE; i += 4)
1528		memcpy_fromio(dw++, data++, 4);
1529}
1530
1531static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1532	"tx_packets",
1533	"rx_packets",
1534	"tx_errors",
1535	"rx_errors",
1536	"rx_missed",
1537	"align_errors",
1538	"tx_single_collisions",
1539	"tx_multi_collisions",
1540	"unicast",
1541	"broadcast",
1542	"multicast",
1543	"tx_aborted",
1544	"tx_underrun",
1545};
1546
1547static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1548{
1549	switch (sset) {
1550	case ETH_SS_STATS:
1551		return ARRAY_SIZE(rtl8169_gstrings);
1552	default:
1553		return -EOPNOTSUPP;
1554	}
1555}
1556
1557DECLARE_RTL_COND(rtl_counters_cond)
1558{
1559	return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
1560}
1561
1562static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
1563{
1564	dma_addr_t paddr = tp->counters_phys_addr;
1565	u32 cmd;
1566
1567	RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
1568	rtl_pci_commit(tp);
1569	cmd = (u64)paddr & DMA_BIT_MASK(32);
1570	RTL_W32(tp, CounterAddrLow, cmd);
1571	RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
1572
1573	rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
1574}
1575
1576static void rtl8169_reset_counters(struct rtl8169_private *tp)
1577{
1578	/*
1579	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
1580	 * tally counters.
1581	 */
1582	if (tp->mac_version >= RTL_GIGA_MAC_VER_19)
1583		rtl8169_do_counters(tp, CounterReset);
1584}
1585
1586static void rtl8169_update_counters(struct rtl8169_private *tp)
1587{
1588	u8 val = RTL_R8(tp, ChipCmd);
1589
1590	/*
1591	 * Some chips are unable to dump tally counters when the receiver
1592	 * is disabled. If 0xff chip may be in a PCI power-save state.
1593	 */
1594	if (val & CmdRxEnb && val != 0xff)
1595		rtl8169_do_counters(tp, CounterDump);
1596}
1597
1598static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
1599{
1600	struct rtl8169_counters *counters = tp->counters;
1601
1602	/*
1603	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
1604	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
1605	 * reset by a power cycle, while the counter values collected by the
1606	 * driver are reset at every driver unload/load cycle.
1607	 *
1608	 * To make sure the HW values returned by @get_stats64 match the SW
1609	 * values, we collect the initial values at first open(*) and use them
1610	 * as offsets to normalize the values returned by @get_stats64.
1611	 *
1612	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
1613	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
1614	 * set at open time by rtl_hw_start.
1615	 */
1616
1617	if (tp->tc_offset.inited)
1618		return;
1619
1620	rtl8169_reset_counters(tp);
1621	rtl8169_update_counters(tp);
 
 
 
 
 
 
 
1622
1623	tp->tc_offset.tx_errors = counters->tx_errors;
1624	tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
1625	tp->tc_offset.tx_aborted = counters->tx_aborted;
1626	tp->tc_offset.rx_missed = counters->rx_missed;
1627	tp->tc_offset.inited = true;
1628}
1629
1630static void rtl8169_get_ethtool_stats(struct net_device *dev,
1631				      struct ethtool_stats *stats, u64 *data)
1632{
1633	struct rtl8169_private *tp = netdev_priv(dev);
1634	struct rtl8169_counters *counters;
1635
1636	counters = tp->counters;
1637	rtl8169_update_counters(tp);
1638
1639	data[0] = le64_to_cpu(counters->tx_packets);
1640	data[1] = le64_to_cpu(counters->rx_packets);
1641	data[2] = le64_to_cpu(counters->tx_errors);
1642	data[3] = le32_to_cpu(counters->rx_errors);
1643	data[4] = le16_to_cpu(counters->rx_missed);
1644	data[5] = le16_to_cpu(counters->align_errors);
1645	data[6] = le32_to_cpu(counters->tx_one_collision);
1646	data[7] = le32_to_cpu(counters->tx_multi_collision);
1647	data[8] = le64_to_cpu(counters->rx_unicast);
1648	data[9] = le64_to_cpu(counters->rx_broadcast);
1649	data[10] = le32_to_cpu(counters->rx_multicast);
1650	data[11] = le16_to_cpu(counters->tx_aborted);
1651	data[12] = le16_to_cpu(counters->tx_underun);
1652}
1653
1654static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1655{
1656	switch(stringset) {
1657	case ETH_SS_STATS:
1658		memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1659		break;
1660	}
1661}
1662
1663/*
1664 * Interrupt coalescing
1665 *
1666 * > 1 - the availability of the IntrMitigate (0xe2) register through the
1667 * >     8169, 8168 and 810x line of chipsets
1668 *
1669 * 8169, 8168, and 8136(810x) serial chipsets support it.
1670 *
1671 * > 2 - the Tx timer unit at gigabit speed
1672 *
1673 * The unit of the timer depends on both the speed and the setting of CPlusCmd
1674 * (0xe0) bit 1 and bit 0.
1675 *
1676 * For 8169
1677 * bit[1:0] \ speed        1000M           100M            10M
1678 * 0 0                     320ns           2.56us          40.96us
1679 * 0 1                     2.56us          20.48us         327.7us
1680 * 1 0                     5.12us          40.96us         655.4us
1681 * 1 1                     10.24us         81.92us         1.31ms
1682 *
1683 * For the other
1684 * bit[1:0] \ speed        1000M           100M            10M
1685 * 0 0                     5us             2.56us          40.96us
1686 * 0 1                     40us            20.48us         327.7us
1687 * 1 0                     80us            40.96us         655.4us
1688 * 1 1                     160us           81.92us         1.31ms
1689 */
1690
1691/* rx/tx scale factors for all CPlusCmd[0:1] cases */
1692struct rtl_coalesce_info {
1693	u32 speed;
1694	u32 scale_nsecs[4];
1695};
1696
1697/* produce array with base delay *1, *8, *8*2, *8*2*2 */
1698#define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
1699
1700static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
1701	{ SPEED_1000,	COALESCE_DELAY(320) },
1702	{ SPEED_100,	COALESCE_DELAY(2560) },
1703	{ SPEED_10,	COALESCE_DELAY(40960) },
1704	{ 0 },
1705};
1706
1707static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
1708	{ SPEED_1000,	COALESCE_DELAY(5000) },
1709	{ SPEED_100,	COALESCE_DELAY(2560) },
1710	{ SPEED_10,	COALESCE_DELAY(40960) },
1711	{ 0 },
1712};
1713#undef COALESCE_DELAY
1714
1715/* get rx/tx scale vector corresponding to current speed */
1716static const struct rtl_coalesce_info *
1717rtl_coalesce_info(struct rtl8169_private *tp)
1718{
1719	const struct rtl_coalesce_info *ci;
1720
1721	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
1722		ci = rtl_coalesce_info_8169;
1723	else
1724		ci = rtl_coalesce_info_8168_8136;
1725
1726	/* if speed is unknown assume highest one */
1727	if (tp->phydev->speed == SPEED_UNKNOWN)
1728		return ci;
1729
1730	for (; ci->speed; ci++) {
1731		if (tp->phydev->speed == ci->speed)
1732			return ci;
1733	}
1734
1735	return ERR_PTR(-ELNRNG);
1736}
1737
1738static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 
 
 
1739{
1740	struct rtl8169_private *tp = netdev_priv(dev);
1741	const struct rtl_coalesce_info *ci;
1742	u32 scale, c_us, c_fr;
1743	u16 intrmit;
1744
1745	if (rtl_is_8125(tp))
1746		return -EOPNOTSUPP;
1747
1748	memset(ec, 0, sizeof(*ec));
1749
1750	/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
1751	ci = rtl_coalesce_info(tp);
1752	if (IS_ERR(ci))
1753		return PTR_ERR(ci);
1754
1755	scale = ci->scale_nsecs[tp->cp_cmd & INTT_MASK];
1756
1757	intrmit = RTL_R16(tp, IntrMitigate);
1758
1759	c_us = FIELD_GET(RTL_COALESCE_TX_USECS, intrmit);
1760	ec->tx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
1761
1762	c_fr = FIELD_GET(RTL_COALESCE_TX_FRAMES, intrmit);
1763	/* ethtool_coalesce states usecs and max_frames must not both be 0 */
1764	ec->tx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
1765
1766	c_us = FIELD_GET(RTL_COALESCE_RX_USECS, intrmit);
1767	ec->rx_coalesce_usecs = DIV_ROUND_UP(c_us * scale, 1000);
1768
1769	c_fr = FIELD_GET(RTL_COALESCE_RX_FRAMES, intrmit);
1770	ec->rx_max_coalesced_frames = (c_us || c_fr) ? c_fr * 4 : 1;
1771
1772	return 0;
1773}
1774
1775/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, usec) */
1776static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec,
1777				     u16 *cp01)
1778{
1779	const struct rtl_coalesce_info *ci;
1780	u16 i;
1781
1782	ci = rtl_coalesce_info(tp);
1783	if (IS_ERR(ci))
1784		return PTR_ERR(ci);
1785
1786	for (i = 0; i < 4; i++) {
1787		if (usec <= ci->scale_nsecs[i] * RTL_COALESCE_T_MAX / 1000U) {
1788			*cp01 = i;
1789			return ci->scale_nsecs[i];
1790		}
1791	}
1792
1793	return -ERANGE;
1794}
1795
1796static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 
 
 
1797{
1798	struct rtl8169_private *tp = netdev_priv(dev);
1799	u32 tx_fr = ec->tx_max_coalesced_frames;
1800	u32 rx_fr = ec->rx_max_coalesced_frames;
1801	u32 coal_usec_max, units;
1802	u16 w = 0, cp01 = 0;
1803	int scale;
1804
1805	if (rtl_is_8125(tp))
1806		return -EOPNOTSUPP;
1807
1808	if (rx_fr > RTL_COALESCE_FRAME_MAX || tx_fr > RTL_COALESCE_FRAME_MAX)
1809		return -ERANGE;
1810
1811	coal_usec_max = max(ec->rx_coalesce_usecs, ec->tx_coalesce_usecs);
1812	scale = rtl_coalesce_choose_scale(tp, coal_usec_max, &cp01);
1813	if (scale < 0)
1814		return scale;
1815
1816	/* Accept max_frames=1 we returned in rtl_get_coalesce. Accept it
1817	 * not only when usecs=0 because of e.g. the following scenario:
1818	 *
1819	 * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX)
1820	 * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1
1821	 * - then user does `ethtool -C eth0 rx-usecs 100`
1822	 *
1823	 * Since ethtool sends to kernel whole ethtool_coalesce settings,
1824	 * if we want to ignore rx_frames then it has to be set to 0.
1825	 */
1826	if (rx_fr == 1)
1827		rx_fr = 0;
1828	if (tx_fr == 1)
1829		tx_fr = 0;
1830
1831	/* HW requires time limit to be set if frame limit is set */
1832	if ((tx_fr && !ec->tx_coalesce_usecs) ||
1833	    (rx_fr && !ec->rx_coalesce_usecs))
1834		return -EINVAL;
1835
1836	w |= FIELD_PREP(RTL_COALESCE_TX_FRAMES, DIV_ROUND_UP(tx_fr, 4));
1837	w |= FIELD_PREP(RTL_COALESCE_RX_FRAMES, DIV_ROUND_UP(rx_fr, 4));
1838
1839	units = DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000U, scale);
1840	w |= FIELD_PREP(RTL_COALESCE_TX_USECS, units);
1841	units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
1842	w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
1843
1844	RTL_W16(tp, IntrMitigate, w);
1845
1846	/* Meaning of PktCntrDisable bit changed from RTL8168e-vl */
1847	if (rtl_is_8168evl_up(tp)) {
1848		if (!rx_fr && !tx_fr)
1849			/* disable packet counter */
1850			tp->cp_cmd |= PktCntrDisable;
1851		else
1852			tp->cp_cmd &= ~PktCntrDisable;
1853	}
1854
1855	tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01;
1856	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
1857	rtl_pci_commit(tp);
1858
1859	return 0;
1860}
1861
1862static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1863{
1864	struct rtl8169_private *tp = netdev_priv(dev);
 
1865
1866	if (!rtl_supports_eee(tp))
1867		return -EOPNOTSUPP;
1868
1869	return phy_ethtool_get_eee(tp->phydev, data);
 
 
 
 
 
 
1870}
1871
1872static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
1873{
1874	struct rtl8169_private *tp = netdev_priv(dev);
1875	int ret;
1876
1877	if (!rtl_supports_eee(tp))
1878		return -EOPNOTSUPP;
1879
1880	ret = phy_ethtool_set_eee(tp->phydev, data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881
1882	if (!ret)
1883		tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
1884					   MDIO_AN_EEE_ADV);
1885	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1886}
1887
1888static const struct ethtool_ops rtl8169_ethtool_ops = {
1889	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1890				     ETHTOOL_COALESCE_MAX_FRAMES,
1891	.get_drvinfo		= rtl8169_get_drvinfo,
1892	.get_regs_len		= rtl8169_get_regs_len,
1893	.get_link		= ethtool_op_get_link,
1894	.get_coalesce		= rtl_get_coalesce,
1895	.set_coalesce		= rtl_set_coalesce,
1896	.get_regs		= rtl8169_get_regs,
1897	.get_wol		= rtl8169_get_wol,
1898	.set_wol		= rtl8169_set_wol,
1899	.get_strings		= rtl8169_get_strings,
1900	.get_sset_count		= rtl8169_get_sset_count,
1901	.get_ethtool_stats	= rtl8169_get_ethtool_stats,
1902	.get_ts_info		= ethtool_op_get_ts_info,
1903	.nway_reset		= phy_ethtool_nway_reset,
1904	.get_eee		= rtl8169_get_eee,
1905	.set_eee		= rtl8169_set_eee,
1906	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1907	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
 
 
 
 
 
 
1908};
1909
1910static void rtl_enable_eee(struct rtl8169_private *tp)
1911{
1912	struct phy_device *phydev = tp->phydev;
1913	int adv;
1914
1915	/* respect EEE advertisement the user may have set */
1916	if (tp->eee_adv >= 0)
1917		adv = tp->eee_adv;
1918	else
1919		adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
1920
1921	if (adv >= 0)
1922		phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
1923}
1924
1925static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
1926{
1927	/*
1928	 * The driver currently handles the 8168Bf and the 8168Be identically
1929	 * but they can be identified more specifically through the test below
1930	 * if needed:
1931	 *
1932	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1933	 *
1934	 * Same thing for the 8101Eb and the 8101Ec:
1935	 *
1936	 * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1937	 */
1938	static const struct rtl_mac_info {
1939		u16 mask;
1940		u16 val;
1941		enum mac_version ver;
1942	} mac_info[] = {
 
 
 
 
 
 
 
1943		/* 8125B family. */
1944		{ 0x7cf, 0x641,	RTL_GIGA_MAC_VER_63 },
1945
1946		/* 8125A family. */
1947		{ 0x7cf, 0x608,	RTL_GIGA_MAC_VER_60 },
1948		{ 0x7c8, 0x608,	RTL_GIGA_MAC_VER_61 },
 
 
 
1949
1950		/* RTL8117 */
 
1951		{ 0x7cf, 0x54a,	RTL_GIGA_MAC_VER_52 },
1952
1953		/* 8168EP family. */
1954		{ 0x7cf, 0x502,	RTL_GIGA_MAC_VER_51 },
1955		{ 0x7cf, 0x501,	RTL_GIGA_MAC_VER_50 },
1956		{ 0x7cf, 0x500,	RTL_GIGA_MAC_VER_49 },
 
 
 
1957
1958		/* 8168H family. */
1959		{ 0x7cf, 0x541,	RTL_GIGA_MAC_VER_46 },
1960		{ 0x7cf, 0x540,	RTL_GIGA_MAC_VER_45 },
 
 
 
 
 
1961
1962		/* 8168G family. */
1963		{ 0x7cf, 0x5c8,	RTL_GIGA_MAC_VER_44 },
1964		{ 0x7cf, 0x509,	RTL_GIGA_MAC_VER_42 },
1965		{ 0x7cf, 0x4c1,	RTL_GIGA_MAC_VER_41 },
 
 
 
1966		{ 0x7cf, 0x4c0,	RTL_GIGA_MAC_VER_40 },
1967
1968		/* 8168F family. */
1969		{ 0x7c8, 0x488,	RTL_GIGA_MAC_VER_38 },
1970		{ 0x7cf, 0x481,	RTL_GIGA_MAC_VER_36 },
1971		{ 0x7cf, 0x480,	RTL_GIGA_MAC_VER_35 },
1972
1973		/* 8168E family. */
1974		{ 0x7c8, 0x2c8,	RTL_GIGA_MAC_VER_34 },
1975		{ 0x7cf, 0x2c1,	RTL_GIGA_MAC_VER_32 },
1976		{ 0x7c8, 0x2c0,	RTL_GIGA_MAC_VER_33 },
1977
1978		/* 8168D family. */
1979		{ 0x7cf, 0x281,	RTL_GIGA_MAC_VER_25 },
1980		{ 0x7c8, 0x280,	RTL_GIGA_MAC_VER_26 },
1981
1982		/* 8168DP family. */
1983		{ 0x7cf, 0x288,	RTL_GIGA_MAC_VER_27 },
 
 
 
1984		{ 0x7cf, 0x28a,	RTL_GIGA_MAC_VER_28 },
1985		{ 0x7cf, 0x28b,	RTL_GIGA_MAC_VER_31 },
1986
1987		/* 8168C family. */
1988		{ 0x7cf, 0x3c9,	RTL_GIGA_MAC_VER_23 },
1989		{ 0x7cf, 0x3c8,	RTL_GIGA_MAC_VER_18 },
1990		{ 0x7c8, 0x3c8,	RTL_GIGA_MAC_VER_24 },
1991		{ 0x7cf, 0x3c0,	RTL_GIGA_MAC_VER_19 },
1992		{ 0x7cf, 0x3c2,	RTL_GIGA_MAC_VER_20 },
1993		{ 0x7cf, 0x3c3,	RTL_GIGA_MAC_VER_21 },
1994		{ 0x7c8, 0x3c0,	RTL_GIGA_MAC_VER_22 },
1995
1996		/* 8168B family. */
1997		{ 0x7cf, 0x380,	RTL_GIGA_MAC_VER_12 },
1998		{ 0x7c8, 0x380,	RTL_GIGA_MAC_VER_17 },
1999		{ 0x7c8, 0x300,	RTL_GIGA_MAC_VER_11 },
 
 
2000
2001		/* 8101 family. */
2002		{ 0x7c8, 0x448,	RTL_GIGA_MAC_VER_39 },
2003		{ 0x7c8, 0x440,	RTL_GIGA_MAC_VER_37 },
2004		{ 0x7cf, 0x409,	RTL_GIGA_MAC_VER_29 },
2005		{ 0x7c8, 0x408,	RTL_GIGA_MAC_VER_30 },
2006		{ 0x7cf, 0x349,	RTL_GIGA_MAC_VER_08 },
2007		{ 0x7cf, 0x249,	RTL_GIGA_MAC_VER_08 },
2008		{ 0x7cf, 0x348,	RTL_GIGA_MAC_VER_07 },
2009		{ 0x7cf, 0x248,	RTL_GIGA_MAC_VER_07 },
2010		{ 0x7cf, 0x340,	RTL_GIGA_MAC_VER_13 },
2011		{ 0x7cf, 0x240,	RTL_GIGA_MAC_VER_14 },
2012		{ 0x7cf, 0x343,	RTL_GIGA_MAC_VER_10 },
2013		{ 0x7cf, 0x342,	RTL_GIGA_MAC_VER_16 },
2014		{ 0x7c8, 0x348,	RTL_GIGA_MAC_VER_09 },
2015		{ 0x7c8, 0x248,	RTL_GIGA_MAC_VER_09 },
2016		{ 0x7c8, 0x340,	RTL_GIGA_MAC_VER_16 },
2017		/* FIXME: where did these entries come from ? -- FR */
2018		{ 0xfc8, 0x388,	RTL_GIGA_MAC_VER_13 },
2019		{ 0xfc8, 0x308,	RTL_GIGA_MAC_VER_13 },
2020
2021		/* 8110 family. */
2022		{ 0xfc8, 0x980,	RTL_GIGA_MAC_VER_06 },
2023		{ 0xfc8, 0x180,	RTL_GIGA_MAC_VER_05 },
2024		{ 0xfc8, 0x100,	RTL_GIGA_MAC_VER_04 },
2025		{ 0xfc8, 0x040,	RTL_GIGA_MAC_VER_03 },
2026		{ 0xfc8, 0x008,	RTL_GIGA_MAC_VER_02 },
2027
2028		/* Catch-all */
2029		{ 0x000, 0x000,	RTL_GIGA_MAC_NONE   }
2030	};
2031	const struct rtl_mac_info *p = mac_info;
2032	enum mac_version ver;
2033
2034	while ((xid & p->mask) != p->val)
2035		p++;
2036	ver = p->ver;
2037
2038	if (ver != RTL_GIGA_MAC_NONE && !gmii) {
2039		if (ver == RTL_GIGA_MAC_VER_42)
2040			ver = RTL_GIGA_MAC_VER_43;
2041		else if (ver == RTL_GIGA_MAC_VER_45)
2042			ver = RTL_GIGA_MAC_VER_47;
2043		else if (ver == RTL_GIGA_MAC_VER_46)
2044			ver = RTL_GIGA_MAC_VER_48;
2045	}
2046
2047	return ver;
2048}
2049
2050static void rtl_release_firmware(struct rtl8169_private *tp)
2051{
2052	if (tp->rtl_fw) {
2053		rtl_fw_release_firmware(tp->rtl_fw);
2054		kfree(tp->rtl_fw);
2055		tp->rtl_fw = NULL;
2056	}
2057}
2058
2059void r8169_apply_firmware(struct rtl8169_private *tp)
2060{
2061	int val;
2062
2063	/* TODO: release firmware if rtl_fw_write_firmware signals failure. */
2064	if (tp->rtl_fw) {
2065		rtl_fw_write_firmware(tp, tp->rtl_fw);
2066		/* At least one firmware doesn't reset tp->ocp_base. */
2067		tp->ocp_base = OCP_STD_PHY_BASE;
2068
2069		/* PHY soft reset may still be in progress */
2070		phy_read_poll_timeout(tp->phydev, MII_BMCR, val,
2071				      !(val & BMCR_RESET),
2072				      50000, 600000, true);
2073	}
2074}
2075
2076static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
2077{
2078	/* Adjust EEE LED frequency */
2079	if (tp->mac_version != RTL_GIGA_MAC_VER_38)
2080		RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
2081
2082	rtl_eri_set_bits(tp, 0x1b0, 0x0003);
2083}
2084
2085static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
2086{
2087	r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2088	r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
2089}
2090
2091static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
2092{
2093	RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
2094}
2095
2096static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
2097{
2098	rtl8125_set_eee_txidle_timer(tp);
2099	r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
2100}
2101
2102static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
2103{
2104	const u16 w[] = {
2105		addr[0] | (addr[1] << 8),
2106		addr[2] | (addr[3] << 8),
2107		addr[4] | (addr[5] << 8)
2108	};
2109
2110	rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
2111	rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
2112	rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
2113	rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
2114}
2115
2116u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
2117{
2118	u16 data1, data2, ioffset;
2119
2120	r8168_mac_ocp_write(tp, 0xdd02, 0x807d);
2121	data1 = r8168_mac_ocp_read(tp, 0xdd02);
2122	data2 = r8168_mac_ocp_read(tp, 0xdd00);
2123
2124	ioffset = (data2 >> 1) & 0x7ff8;
2125	ioffset |= data2 & 0x0007;
2126	if (data1 & BIT(7))
2127		ioffset |= BIT(15);
2128
2129	return ioffset;
2130}
2131
2132static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
2133{
2134	set_bit(flag, tp->wk.flags);
2135	schedule_work(&tp->wk.work);
 
2136}
2137
2138static void rtl8169_init_phy(struct rtl8169_private *tp)
2139{
2140	r8169_hw_phy_config(tp, tp->phydev, tp->mac_version);
2141
2142	if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
2143		pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
2144		pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
2145		/* set undocumented MAC Reg C+CR Offset 0x82h */
2146		RTL_W8(tp, 0x82, 0x01);
2147	}
2148
2149	if (tp->mac_version == RTL_GIGA_MAC_VER_05 &&
2150	    tp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_GIGABYTE &&
2151	    tp->pci_dev->subsystem_device == 0xe000)
2152		phy_write_paged(tp->phydev, 0x0001, 0x10, 0xf01b);
2153
2154	/* We may have called phy_speed_down before */
2155	phy_speed_up(tp->phydev);
2156
2157	if (rtl_supports_eee(tp))
2158		rtl_enable_eee(tp);
2159
2160	genphy_soft_reset(tp->phydev);
2161}
2162
2163static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2164{
2165	rtl_unlock_config_regs(tp);
2166
2167	RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
2168	rtl_pci_commit(tp);
2169
2170	RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
2171	rtl_pci_commit(tp);
2172
2173	if (tp->mac_version == RTL_GIGA_MAC_VER_34)
2174		rtl_rar_exgmac_set(tp, addr);
2175
2176	rtl_lock_config_regs(tp);
2177}
2178
2179static int rtl_set_mac_address(struct net_device *dev, void *p)
2180{
2181	struct rtl8169_private *tp = netdev_priv(dev);
2182	int ret;
2183
2184	ret = eth_mac_addr(dev, p);
2185	if (ret)
2186		return ret;
2187
2188	rtl_rar_set(tp, dev->dev_addr);
2189
2190	return 0;
2191}
2192
2193static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
2194{
2195	switch (tp->mac_version) {
2196	case RTL_GIGA_MAC_VER_25:
2197	case RTL_GIGA_MAC_VER_26:
2198	case RTL_GIGA_MAC_VER_29:
2199	case RTL_GIGA_MAC_VER_30:
2200	case RTL_GIGA_MAC_VER_32:
2201	case RTL_GIGA_MAC_VER_33:
2202	case RTL_GIGA_MAC_VER_34:
2203	case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63:
2204		RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
2205			AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
2206		break;
2207	default:
2208		break;
2209	}
2210}
2211
2212static void rtl_pll_power_down(struct rtl8169_private *tp)
2213{
2214	if (r8168_check_dash(tp))
2215		return;
2216
2217	if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
2218	    tp->mac_version == RTL_GIGA_MAC_VER_33)
2219		rtl_ephy_write(tp, 0x19, 0xff64);
2220
2221	if (device_may_wakeup(tp_to_dev(tp))) {
2222		phy_speed_down(tp->phydev, false);
2223		rtl_wol_suspend_quirk(tp);
2224		return;
2225	}
2226
2227	switch (tp->mac_version) {
2228	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
2229	case RTL_GIGA_MAC_VER_37:
2230	case RTL_GIGA_MAC_VER_39:
2231	case RTL_GIGA_MAC_VER_43:
2232	case RTL_GIGA_MAC_VER_44:
2233	case RTL_GIGA_MAC_VER_45:
2234	case RTL_GIGA_MAC_VER_46:
2235	case RTL_GIGA_MAC_VER_47:
2236	case RTL_GIGA_MAC_VER_48:
2237	case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
2238		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
2239		break;
2240	case RTL_GIGA_MAC_VER_40:
2241	case RTL_GIGA_MAC_VER_41:
2242	case RTL_GIGA_MAC_VER_49:
2243		rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
2244		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
2245		break;
2246	default:
2247		break;
2248	}
2249}
2250
2251static void rtl_pll_power_up(struct rtl8169_private *tp)
2252{
2253	switch (tp->mac_version) {
2254	case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
2255	case RTL_GIGA_MAC_VER_37:
2256	case RTL_GIGA_MAC_VER_39:
2257	case RTL_GIGA_MAC_VER_43:
2258		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
2259		break;
2260	case RTL_GIGA_MAC_VER_44:
2261	case RTL_GIGA_MAC_VER_45:
2262	case RTL_GIGA_MAC_VER_46:
2263	case RTL_GIGA_MAC_VER_47:
2264	case RTL_GIGA_MAC_VER_48:
2265	case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
2266		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
2267		break;
2268	case RTL_GIGA_MAC_VER_40:
2269	case RTL_GIGA_MAC_VER_41:
2270	case RTL_GIGA_MAC_VER_49:
2271		RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
2272		rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
2273		break;
2274	default:
2275		break;
2276	}
2277
2278	phy_resume(tp->phydev);
2279}
2280
2281static void rtl_init_rxcfg(struct rtl8169_private *tp)
2282{
2283	switch (tp->mac_version) {
2284	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
2285	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
2286		RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
2287		break;
2288	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
2289	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
2290	case RTL_GIGA_MAC_VER_38:
2291		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
2292		break;
2293	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
2294		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
2295		break;
2296	case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
2297		RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
2298		break;
 
 
 
 
2299	default:
2300		RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
2301		break;
2302	}
2303}
2304
2305static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
2306{
2307	tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
2308}
2309
2310static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
2311{
2312	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2313	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
2314}
2315
2316static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
2317{
2318	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2319	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
2320}
2321
2322static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
2323{
2324	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2325}
2326
2327static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
2328{
2329	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2330}
2331
2332static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
2333{
2334	RTL_W8(tp, MaxTxPacketSize, 0x3f);
2335	RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
2336	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
2337}
2338
2339static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
2340{
2341	RTL_W8(tp, MaxTxPacketSize, 0x0c);
2342	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
2343	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
2344}
2345
2346static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
2347{
2348	RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
2349}
2350
2351static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
2352{
2353	RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
2354}
2355
2356static void rtl_jumbo_config(struct rtl8169_private *tp)
2357{
2358	bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
 
 
 
 
 
2359
2360	rtl_unlock_config_regs(tp);
2361	switch (tp->mac_version) {
2362	case RTL_GIGA_MAC_VER_12:
2363	case RTL_GIGA_MAC_VER_17:
2364		if (jumbo) {
2365			pcie_set_readrq(tp->pci_dev, 512);
2366			r8168b_1_hw_jumbo_enable(tp);
2367		} else {
2368			r8168b_1_hw_jumbo_disable(tp);
2369		}
2370		break;
2371	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
2372		if (jumbo) {
2373			pcie_set_readrq(tp->pci_dev, 512);
2374			r8168c_hw_jumbo_enable(tp);
2375		} else {
2376			r8168c_hw_jumbo_disable(tp);
2377		}
2378		break;
2379	case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
2380		if (jumbo)
2381			r8168dp_hw_jumbo_enable(tp);
2382		else
2383			r8168dp_hw_jumbo_disable(tp);
2384		break;
2385	case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
2386		if (jumbo) {
2387			pcie_set_readrq(tp->pci_dev, 512);
2388			r8168e_hw_jumbo_enable(tp);
2389		} else {
2390			r8168e_hw_jumbo_disable(tp);
2391		}
2392		break;
2393	default:
2394		break;
2395	}
2396	rtl_lock_config_regs(tp);
2397
2398	if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
2399		pcie_set_readrq(tp->pci_dev, 4096);
 
 
 
 
 
 
 
 
 
2400}
2401
2402DECLARE_RTL_COND(rtl_chipcmd_cond)
2403{
2404	return RTL_R8(tp, ChipCmd) & CmdReset;
2405}
2406
2407static void rtl_hw_reset(struct rtl8169_private *tp)
2408{
2409	RTL_W8(tp, ChipCmd, CmdReset);
2410
2411	rtl_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
2412}
2413
2414static void rtl_request_firmware(struct rtl8169_private *tp)
2415{
2416	struct rtl_fw *rtl_fw;
2417
2418	/* firmware loaded already or no firmware available */
2419	if (tp->rtl_fw || !tp->fw_name)
2420		return;
2421
2422	rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
2423	if (!rtl_fw)
2424		return;
2425
2426	rtl_fw->phy_write = rtl_writephy;
2427	rtl_fw->phy_read = rtl_readphy;
2428	rtl_fw->mac_mcu_write = mac_mcu_write;
2429	rtl_fw->mac_mcu_read = mac_mcu_read;
2430	rtl_fw->fw_name = tp->fw_name;
2431	rtl_fw->dev = tp_to_dev(tp);
2432
2433	if (rtl_fw_request_firmware(rtl_fw))
2434		kfree(rtl_fw);
2435	else
2436		tp->rtl_fw = rtl_fw;
2437}
2438
2439static void rtl_rx_close(struct rtl8169_private *tp)
2440{
2441	RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
2442}
2443
2444DECLARE_RTL_COND(rtl_npq_cond)
2445{
2446	return RTL_R8(tp, TxPoll) & NPQ;
2447}
2448
2449DECLARE_RTL_COND(rtl_txcfg_empty_cond)
2450{
2451	return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
2452}
2453
2454DECLARE_RTL_COND(rtl_rxtx_empty_cond)
2455{
2456	return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
2457}
2458
2459DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
2460{
2461	/* IntrMitigate has new functionality on RTL8125 */
2462	return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103;
2463}
2464
2465static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
2466{
2467	switch (tp->mac_version) {
2468	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
2469		rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
2470		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2471		break;
2472	case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
2473		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2474		break;
2475	case RTL_GIGA_MAC_VER_63:
2476		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
2477		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
2478		rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
2479		break;
2480	default:
2481		break;
2482	}
2483}
2484
 
 
 
 
 
2485static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
2486{
2487	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
2488	fsleep(2000);
2489	rtl_wait_txrx_fifo_empty(tp);
2490}
2491
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2492static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
2493{
2494	u32 val = TX_DMA_BURST << TxDMAShift |
2495		  InterFrameGap << TxInterFrameGapShift;
2496
2497	if (rtl_is_8168evl_up(tp))
2498		val |= TXCFG_AUTO_FIFO;
2499
2500	RTL_W32(tp, TxConfig, val);
2501}
2502
2503static void rtl_set_rx_max_size(struct rtl8169_private *tp)
2504{
2505	/* Low hurts. Let's disable the filtering. */
2506	RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1);
2507}
2508
2509static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
2510{
2511	/*
2512	 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
2513	 * register to be written before TxDescAddrLow to work.
2514	 * Switching from MMIO to I/O access fixes the issue as well.
2515	 */
2516	RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
2517	RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
2518	RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
2519	RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
2520}
2521
2522static void rtl8169_set_magic_reg(struct rtl8169_private *tp)
2523{
2524	u32 val;
2525
2526	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
2527		val = 0x000fff00;
2528	else if (tp->mac_version == RTL_GIGA_MAC_VER_06)
2529		val = 0x00ffff00;
2530	else
2531		return;
2532
2533	if (RTL_R8(tp, Config2) & PCI_Clock_66MHz)
2534		val |= 0xff;
2535
2536	RTL_W32(tp, 0x7c, val);
2537}
2538
2539static void rtl_set_rx_mode(struct net_device *dev)
2540{
2541	u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
2542	/* Multicast hash filter */
2543	u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
2544	struct rtl8169_private *tp = netdev_priv(dev);
2545	u32 tmp;
2546
2547	if (dev->flags & IFF_PROMISC) {
2548		rx_mode |= AcceptAllPhys;
2549	} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
2550		   dev->flags & IFF_ALLMULTI ||
 
2551		   tp->mac_version == RTL_GIGA_MAC_VER_35) {
2552		/* accept all multicasts */
2553	} else if (netdev_mc_empty(dev)) {
2554		rx_mode &= ~AcceptMulticast;
2555	} else {
2556		struct netdev_hw_addr *ha;
2557
2558		mc_filter[1] = mc_filter[0] = 0;
2559		netdev_for_each_mc_addr(ha, dev) {
2560			u32 bit_nr = eth_hw_addr_crc(ha) >> 26;
2561			mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
2562		}
2563
2564		if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
2565			tmp = mc_filter[0];
2566			mc_filter[0] = swab32(mc_filter[1]);
2567			mc_filter[1] = swab32(tmp);
2568		}
2569	}
2570
2571	RTL_W32(tp, MAR0 + 4, mc_filter[1]);
2572	RTL_W32(tp, MAR0 + 0, mc_filter[0]);
2573
2574	tmp = RTL_R32(tp, RxConfig);
2575	RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_OK_MASK) | rx_mode);
2576}
2577
2578DECLARE_RTL_COND(rtl_csiar_cond)
2579{
2580	return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
2581}
2582
2583static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
2584{
2585	u32 func = PCI_FUNC(tp->pci_dev->devfn);
2586
2587	RTL_W32(tp, CSIDR, value);
2588	RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
2589		CSIAR_BYTE_ENABLE | func << 16);
2590
2591	rtl_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
2592}
2593
2594static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
2595{
2596	u32 func = PCI_FUNC(tp->pci_dev->devfn);
2597
2598	RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 |
2599		CSIAR_BYTE_ENABLE);
2600
2601	return rtl_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
2602		RTL_R32(tp, CSIDR) : ~0;
2603}
2604
2605static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val)
2606{
2607	struct pci_dev *pdev = tp->pci_dev;
2608	u32 csi;
2609
2610	/* According to Realtek the value at config space address 0x070f
2611	 * controls the L0s/L1 entrance latency. We try standard ECAM access
2612	 * first and if it fails fall back to CSI.
 
 
2613	 */
2614	if (pdev->cfg_size > 0x070f &&
2615	    pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL)
2616		return;
2617
2618	netdev_notice_once(tp->dev,
2619		"No native access to PCI extended config space, falling back to CSI\n");
2620	csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
2621	rtl_csi_write(tp, 0x070c, csi | val << 24);
2622}
2623
2624static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp)
2625{
2626	rtl_csi_access_enable(tp, 0x27);
 
2627}
2628
2629struct ephy_info {
2630	unsigned int offset;
2631	u16 mask;
2632	u16 bits;
2633};
2634
2635static void __rtl_ephy_init(struct rtl8169_private *tp,
2636			    const struct ephy_info *e, int len)
2637{
2638	u16 w;
2639
2640	while (len-- > 0) {
2641		w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
2642		rtl_ephy_write(tp, e->offset, w);
2643		e++;
2644	}
2645}
2646
2647#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
2648
2649static void rtl_disable_clock_request(struct rtl8169_private *tp)
2650{
2651	pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
2652				   PCI_EXP_LNKCTL_CLKREQ_EN);
2653}
2654
2655static void rtl_enable_clock_request(struct rtl8169_private *tp)
2656{
2657	pcie_capability_set_word(tp->pci_dev, PCI_EXP_LNKCTL,
2658				 PCI_EXP_LNKCTL_CLKREQ_EN);
2659}
2660
2661static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp)
2662{
2663	/* work around an issue when PCI reset occurs during L2/L3 state */
2664	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23);
2665}
2666
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2667static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
2668{
 
 
 
 
 
2669	/* Don't enable ASPM in the chip if OS can't control ASPM */
2670	if (enable && tp->aspm_manageable) {
2671		RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
2672		RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2673	} else {
2674		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
2675		RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2676	}
2677
2678	udelay(10);
2679}
2680
2681static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
2682			      u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
2683{
2684	/* Usage of dynamic vs. static FIFO is controlled by bit
2685	 * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
2686	 */
2687	rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
2688	rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
2689}
2690
2691static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
2692					  u8 low, u8 high)
2693{
2694	/* FIFO thresholds for pause flow control */
2695	rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
2696	rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
2697}
2698
2699static void rtl_hw_start_8168b(struct rtl8169_private *tp)
2700{
2701	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2702}
2703
2704static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
2705{
2706	RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
2707
2708	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2709
2710	rtl_disable_clock_request(tp);
2711}
2712
2713static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
2714{
2715	static const struct ephy_info e_info_8168cp[] = {
2716		{ 0x01, 0,	0x0001 },
2717		{ 0x02, 0x0800,	0x1000 },
2718		{ 0x03, 0,	0x0042 },
2719		{ 0x06, 0x0080,	0x0000 },
2720		{ 0x07, 0,	0x2000 }
2721	};
2722
2723	rtl_set_def_aspm_entry_latency(tp);
2724
2725	rtl_ephy_init(tp, e_info_8168cp);
2726
2727	__rtl_hw_start_8168cp(tp);
2728}
2729
2730static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
2731{
2732	rtl_set_def_aspm_entry_latency(tp);
2733
2734	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2735}
2736
2737static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
2738{
2739	rtl_set_def_aspm_entry_latency(tp);
2740
2741	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
2742
2743	/* Magic. */
2744	RTL_W8(tp, DBG_REG, 0x20);
2745}
2746
2747static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
2748{
2749	static const struct ephy_info e_info_8168c_1[] = {
2750		{ 0x02, 0x0800,	0x1000 },
2751		{ 0x03, 0,	0x0002 },
2752		{ 0x06, 0x0080,	0x0000 }
2753	};
2754
2755	rtl_set_def_aspm_entry_latency(tp);
2756
2757	RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
2758
2759	rtl_ephy_init(tp, e_info_8168c_1);
2760
2761	__rtl_hw_start_8168cp(tp);
2762}
2763
2764static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
2765{
2766	static const struct ephy_info e_info_8168c_2[] = {
2767		{ 0x01, 0,	0x0001 },
2768		{ 0x03, 0x0400,	0x0020 }
2769	};
2770
2771	rtl_set_def_aspm_entry_latency(tp);
2772
2773	rtl_ephy_init(tp, e_info_8168c_2);
2774
2775	__rtl_hw_start_8168cp(tp);
2776}
2777
2778static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
2779{
2780	rtl_hw_start_8168c_2(tp);
2781}
2782
2783static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
2784{
2785	rtl_set_def_aspm_entry_latency(tp);
2786
2787	__rtl_hw_start_8168cp(tp);
2788}
2789
2790static void rtl_hw_start_8168d(struct rtl8169_private *tp)
2791{
2792	rtl_set_def_aspm_entry_latency(tp);
2793
2794	rtl_disable_clock_request(tp);
2795}
2796
2797static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
2798{
2799	static const struct ephy_info e_info_8168d_4[] = {
2800		{ 0x0b, 0x0000,	0x0048 },
2801		{ 0x19, 0x0020,	0x0050 },
2802		{ 0x0c, 0x0100,	0x0020 },
2803		{ 0x10, 0x0004,	0x0000 },
2804	};
2805
2806	rtl_set_def_aspm_entry_latency(tp);
2807
2808	rtl_ephy_init(tp, e_info_8168d_4);
2809
2810	rtl_enable_clock_request(tp);
2811}
2812
2813static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
2814{
2815	static const struct ephy_info e_info_8168e_1[] = {
2816		{ 0x00, 0x0200,	0x0100 },
2817		{ 0x00, 0x0000,	0x0004 },
2818		{ 0x06, 0x0002,	0x0001 },
2819		{ 0x06, 0x0000,	0x0030 },
2820		{ 0x07, 0x0000,	0x2000 },
2821		{ 0x00, 0x0000,	0x0020 },
2822		{ 0x03, 0x5800,	0x2000 },
2823		{ 0x03, 0x0000,	0x0001 },
2824		{ 0x01, 0x0800,	0x1000 },
2825		{ 0x07, 0x0000,	0x4000 },
2826		{ 0x1e, 0x0000,	0x2000 },
2827		{ 0x19, 0xffff,	0xfe6c },
2828		{ 0x0a, 0x0000,	0x0040 }
2829	};
2830
2831	rtl_set_def_aspm_entry_latency(tp);
2832
2833	rtl_ephy_init(tp, e_info_8168e_1);
2834
2835	rtl_disable_clock_request(tp);
2836
2837	/* Reset tx FIFO pointer */
2838	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
2839	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
2840
2841	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
2842}
2843
2844static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
2845{
2846	static const struct ephy_info e_info_8168e_2[] = {
2847		{ 0x09, 0x0000,	0x0080 },
2848		{ 0x19, 0x0000,	0x0224 },
2849		{ 0x00, 0x0000,	0x0004 },
2850		{ 0x0c, 0x3df0,	0x0200 },
2851	};
2852
2853	rtl_set_def_aspm_entry_latency(tp);
2854
2855	rtl_ephy_init(tp, e_info_8168e_2);
2856
2857	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
2858	rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
2859	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
2860	rtl_eri_set_bits(tp, 0x0d4, 0x1f00);
2861	rtl_eri_set_bits(tp, 0x1d0, BIT(1));
2862	rtl_reset_packet_filter(tp);
2863	rtl_eri_set_bits(tp, 0x1b0, BIT(4));
2864	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
2865	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
2866
2867	rtl_disable_clock_request(tp);
2868
2869	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
2870
2871	rtl8168_config_eee_mac(tp);
2872
2873	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
2874	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
2875	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
2876
2877	rtl_hw_aspm_clkreq_enable(tp, true);
2878}
2879
2880static void rtl_hw_start_8168f(struct rtl8169_private *tp)
2881{
2882	rtl_set_def_aspm_entry_latency(tp);
2883
2884	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
2885	rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000);
2886	rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
2887	rtl_reset_packet_filter(tp);
2888	rtl_eri_set_bits(tp, 0x1b0, BIT(4));
2889	rtl_eri_set_bits(tp, 0x1d0, BIT(4) | BIT(1));
2890	rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
2891	rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
2892
2893	rtl_disable_clock_request(tp);
2894
2895	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
2896	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
2897	RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
2898	RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
2899
2900	rtl8168_config_eee_mac(tp);
2901}
2902
2903static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
2904{
2905	static const struct ephy_info e_info_8168f_1[] = {
2906		{ 0x06, 0x00c0,	0x0020 },
2907		{ 0x08, 0x0001,	0x0002 },
2908		{ 0x09, 0x0000,	0x0080 },
2909		{ 0x19, 0x0000,	0x0224 },
2910		{ 0x00, 0x0000,	0x0008 },
2911		{ 0x0c, 0x3df0,	0x0200 },
2912	};
2913
2914	rtl_hw_start_8168f(tp);
2915
2916	rtl_ephy_init(tp, e_info_8168f_1);
2917
2918	rtl_eri_set_bits(tp, 0x0d4, 0x1f00);
2919}
2920
2921static void rtl_hw_start_8411(struct rtl8169_private *tp)
2922{
2923	static const struct ephy_info e_info_8168f_1[] = {
2924		{ 0x06, 0x00c0,	0x0020 },
2925		{ 0x0f, 0xffff,	0x5200 },
2926		{ 0x19, 0x0000,	0x0224 },
2927		{ 0x00, 0x0000,	0x0008 },
2928		{ 0x0c, 0x3df0,	0x0200 },
2929	};
2930
2931	rtl_hw_start_8168f(tp);
2932	rtl_pcie_state_l2l3_disable(tp);
2933
2934	rtl_ephy_init(tp, e_info_8168f_1);
2935
2936	rtl_eri_set_bits(tp, 0x0d4, 0x0c00);
2937}
2938
2939static void rtl_hw_start_8168g(struct rtl8169_private *tp)
2940{
2941	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
2942	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
2943
2944	rtl_set_def_aspm_entry_latency(tp);
2945
2946	rtl_reset_packet_filter(tp);
2947	rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
2948
2949	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
2950
2951	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
2952	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
2953	rtl_eri_set_bits(tp, 0x0d4, 0x1f80);
2954
2955	rtl8168_config_eee_mac(tp);
2956
2957	rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
2958	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
2959
2960	rtl_pcie_state_l2l3_disable(tp);
2961}
2962
2963static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
2964{
2965	static const struct ephy_info e_info_8168g_1[] = {
2966		{ 0x00, 0x0008,	0x0000 },
2967		{ 0x0c, 0x3ff0,	0x0820 },
2968		{ 0x1e, 0x0000,	0x0001 },
2969		{ 0x19, 0x8000,	0x0000 }
2970	};
2971
2972	rtl_hw_start_8168g(tp);
2973
2974	/* disable aspm and clock request before access ephy */
2975	rtl_hw_aspm_clkreq_enable(tp, false);
2976	rtl_ephy_init(tp, e_info_8168g_1);
2977	rtl_hw_aspm_clkreq_enable(tp, true);
2978}
2979
2980static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
2981{
2982	static const struct ephy_info e_info_8168g_2[] = {
2983		{ 0x00, 0x0008,	0x0000 },
2984		{ 0x0c, 0x3ff0,	0x0820 },
2985		{ 0x19, 0xffff,	0x7c00 },
2986		{ 0x1e, 0xffff,	0x20eb },
2987		{ 0x0d, 0xffff,	0x1666 },
2988		{ 0x00, 0xffff,	0x10a3 },
2989		{ 0x06, 0xffff,	0xf050 },
2990		{ 0x04, 0x0000,	0x0010 },
2991		{ 0x1d, 0x4000,	0x0000 },
2992	};
2993
2994	rtl_hw_start_8168g(tp);
 
 
2995
2996	/* disable aspm and clock request before access ephy */
2997	rtl_hw_aspm_clkreq_enable(tp, false);
2998	rtl_ephy_init(tp, e_info_8168g_2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2999}
3000
3001static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
3002{
3003	static const struct ephy_info e_info_8411_2[] = {
3004		{ 0x00, 0x0008,	0x0000 },
3005		{ 0x0c, 0x37d0,	0x0820 },
3006		{ 0x1e, 0x0000,	0x0001 },
3007		{ 0x19, 0x8021,	0x0000 },
3008		{ 0x1e, 0x0000,	0x2000 },
3009		{ 0x0d, 0x0100,	0x0200 },
3010		{ 0x00, 0x0000,	0x0080 },
3011		{ 0x06, 0x0000,	0x0010 },
3012		{ 0x04, 0x0000,	0x0010 },
3013		{ 0x1d, 0x0000,	0x4000 },
3014	};
3015
3016	rtl_hw_start_8168g(tp);
3017
3018	/* disable aspm and clock request before access ephy */
3019	rtl_hw_aspm_clkreq_enable(tp, false);
3020	rtl_ephy_init(tp, e_info_8411_2);
3021
3022	/* The following Realtek-provided magic fixes an issue with the RX unit
3023	 * getting confused after the PHY having been powered-down.
3024	 */
3025	r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
3026	r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
3027	r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
3028	r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
3029	r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
3030	r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
3031	r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
3032	r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
3033	mdelay(3);
3034	r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
3035
3036	r8168_mac_ocp_write(tp, 0xF800, 0xE008);
3037	r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
3038	r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
3039	r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
3040	r8168_mac_ocp_write(tp, 0xF808, 0xE027);
3041	r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
3042	r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
3043	r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
3044	r8168_mac_ocp_write(tp, 0xF810, 0xC602);
3045	r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
3046	r8168_mac_ocp_write(tp, 0xF814, 0x0000);
3047	r8168_mac_ocp_write(tp, 0xF816, 0xC502);
3048	r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
3049	r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
3050	r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
3051	r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
3052	r8168_mac_ocp_write(tp, 0xF820, 0x080A);
3053	r8168_mac_ocp_write(tp, 0xF822, 0x6420);
3054	r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
3055	r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
3056	r8168_mac_ocp_write(tp, 0xF828, 0xC516);
3057	r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
3058	r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
3059	r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
3060	r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
3061	r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
3062	r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
3063	r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
3064	r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
3065	r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
3066	r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
3067	r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
3068	r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
3069	r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
3070	r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
3071	r8168_mac_ocp_write(tp, 0xF846, 0xC404);
3072	r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
3073	r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
3074	r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
3075	r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
3076	r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
3077	r8168_mac_ocp_write(tp, 0xF852, 0xE434);
3078	r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
3079	r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
3080	r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
3081	r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
3082	r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
3083	r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
3084	r8168_mac_ocp_write(tp, 0xF860, 0xF007);
3085	r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
3086	r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
3087	r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
3088	r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
3089	r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
3090	r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
3091	r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
3092	r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
3093	r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
3094	r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
3095	r8168_mac_ocp_write(tp, 0xF876, 0xC516);
3096	r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
3097	r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
3098	r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
3099	r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
3100	r8168_mac_ocp_write(tp, 0xF880, 0xC512);
3101	r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
3102	r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
3103	r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
3104	r8168_mac_ocp_write(tp, 0xF888, 0x483F);
3105	r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
3106	r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
3107	r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
3108	r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
3109	r8168_mac_ocp_write(tp, 0xF892, 0xC505);
3110	r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
3111	r8168_mac_ocp_write(tp, 0xF896, 0xC502);
3112	r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
3113	r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
3114	r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
3115	r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
3116	r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
3117	r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
3118	r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
3119	r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
3120	r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
3121	r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
3122	r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
3123	r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
3124	r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
3125	r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
3126	r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
3127	r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
3128	r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
3129	r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
3130	r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
3131	r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
3132	r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
3133	r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
3134	r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
3135	r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
3136	r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
3137	r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
3138	r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
3139	r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
3140	r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
3141	r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
3142	r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
3143	r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
3144	r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
3145	r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
3146	r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
3147
3148	r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
3149
3150	r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
3151	r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
3152	r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
3153	r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
3154	r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
3155	r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
3156	r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
3157
3158	rtl_hw_aspm_clkreq_enable(tp, true);
3159}
3160
3161static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
3162{
3163	static const struct ephy_info e_info_8168h_1[] = {
3164		{ 0x1e, 0x0800,	0x0001 },
3165		{ 0x1d, 0x0000,	0x0800 },
3166		{ 0x05, 0xffff,	0x2089 },
3167		{ 0x06, 0xffff,	0x5881 },
3168		{ 0x04, 0xffff,	0x854a },
3169		{ 0x01, 0xffff,	0x068b }
3170	};
3171	int rg_saw_cnt;
3172
3173	/* disable aspm and clock request before access ephy */
3174	rtl_hw_aspm_clkreq_enable(tp, false);
3175	rtl_ephy_init(tp, e_info_8168h_1);
3176
3177	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3178	rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
3179
3180	rtl_set_def_aspm_entry_latency(tp);
3181
3182	rtl_reset_packet_filter(tp);
3183
3184	rtl_eri_set_bits(tp, 0xd4, 0x1f00);
3185	rtl_eri_set_bits(tp, 0xdc, 0x001c);
3186
3187	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3188
3189	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3190
3191	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3192	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3193
3194	rtl8168_config_eee_mac(tp);
3195
3196	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3197	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3198
3199	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3200
3201	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3202
3203	rtl_pcie_state_l2l3_disable(tp);
3204
3205	rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3206	if (rg_saw_cnt > 0) {
3207		u16 sw_cnt_1ms_ini;
3208
3209		sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
3210		sw_cnt_1ms_ini &= 0x0fff;
3211		r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3212	}
3213
3214	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3215	r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
3216	r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
3217	r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3218
3219	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3220	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3221	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3222	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3223
3224	rtl_hw_aspm_clkreq_enable(tp, true);
3225}
3226
3227static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
3228{
3229	rtl8168ep_stop_cmac(tp);
3230
3231	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3232	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3233
3234	rtl_set_def_aspm_entry_latency(tp);
3235
3236	rtl_reset_packet_filter(tp);
3237
3238	rtl_eri_set_bits(tp, 0xd4, 0x1f80);
3239
3240	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3241
3242	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3243
3244	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3245	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3246
3247	rtl8168_config_eee_mac(tp);
3248
3249	rtl_w0w1_eri(tp, 0x2fc, 0x01, 0x06);
3250
3251	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3252
3253	rtl_pcie_state_l2l3_disable(tp);
3254}
3255
3256static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
3257{
3258	static const struct ephy_info e_info_8168ep_1[] = {
3259		{ 0x00, 0xffff,	0x10ab },
3260		{ 0x06, 0xffff,	0xf030 },
3261		{ 0x08, 0xffff,	0x2006 },
3262		{ 0x0d, 0xffff,	0x1666 },
3263		{ 0x0c, 0x3ff0,	0x0000 }
3264	};
3265
3266	/* disable aspm and clock request before access ephy */
3267	rtl_hw_aspm_clkreq_enable(tp, false);
3268	rtl_ephy_init(tp, e_info_8168ep_1);
3269
3270	rtl_hw_start_8168ep(tp);
3271
3272	rtl_hw_aspm_clkreq_enable(tp, true);
3273}
3274
3275static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
3276{
3277	static const struct ephy_info e_info_8168ep_2[] = {
3278		{ 0x00, 0xffff,	0x10a3 },
3279		{ 0x19, 0xffff,	0xfc00 },
3280		{ 0x1e, 0xffff,	0x20ea }
3281	};
3282
3283	/* disable aspm and clock request before access ephy */
3284	rtl_hw_aspm_clkreq_enable(tp, false);
3285	rtl_ephy_init(tp, e_info_8168ep_2);
3286
3287	rtl_hw_start_8168ep(tp);
3288
3289	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3290	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3291
3292	rtl_hw_aspm_clkreq_enable(tp, true);
3293}
3294
3295static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
3296{
3297	static const struct ephy_info e_info_8168ep_3[] = {
3298		{ 0x00, 0x0000,	0x0080 },
3299		{ 0x0d, 0x0100,	0x0200 },
3300		{ 0x19, 0x8021,	0x0000 },
3301		{ 0x1e, 0x0000,	0x2000 },
3302	};
3303
3304	/* disable aspm and clock request before access ephy */
3305	rtl_hw_aspm_clkreq_enable(tp, false);
3306	rtl_ephy_init(tp, e_info_8168ep_3);
3307
3308	rtl_hw_start_8168ep(tp);
3309
3310	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3311	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3312
3313	r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
3314	r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3315	r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3316
3317	rtl_hw_aspm_clkreq_enable(tp, true);
3318}
3319
3320static void rtl_hw_start_8117(struct rtl8169_private *tp)
3321{
3322	static const struct ephy_info e_info_8117[] = {
3323		{ 0x19, 0x0040,	0x1100 },
3324		{ 0x59, 0x0040,	0x1100 },
3325	};
3326	int rg_saw_cnt;
3327
3328	rtl8168ep_stop_cmac(tp);
3329
3330	/* disable aspm and clock request before access ephy */
3331	rtl_hw_aspm_clkreq_enable(tp, false);
3332	rtl_ephy_init(tp, e_info_8117);
3333
3334	rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
3335	rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
3336
3337	rtl_set_def_aspm_entry_latency(tp);
3338
3339	rtl_reset_packet_filter(tp);
3340
3341	rtl_eri_set_bits(tp, 0xd4, 0x1f90);
3342
3343	rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
3344
3345	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3346
3347	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3348	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3349
3350	rtl8168_config_eee_mac(tp);
3351
3352	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3353	RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
3354
3355	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
3356
3357	rtl_eri_clear_bits(tp, 0x1b0, BIT(12));
3358
3359	rtl_pcie_state_l2l3_disable(tp);
3360
3361	rg_saw_cnt = phy_read_paged(tp->phydev, 0x0c42, 0x13) & 0x3fff;
3362	if (rg_saw_cnt > 0) {
3363		u16 sw_cnt_1ms_ini;
3364
3365		sw_cnt_1ms_ini = (16000000 / rg_saw_cnt) & 0x0fff;
3366		r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
3367	}
3368
3369	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
3370	r8168_mac_ocp_write(tp, 0xea80, 0x0003);
3371	r8168_mac_ocp_modify(tp, 0xe052, 0x0000, 0x0009);
3372	r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
3373
3374	r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
3375	r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
3376	r8168_mac_ocp_write(tp, 0xc094, 0x0000);
3377	r8168_mac_ocp_write(tp, 0xc09e, 0x0000);
3378
3379	/* firmware is for MAC only */
3380	r8169_apply_firmware(tp);
3381
3382	rtl_hw_aspm_clkreq_enable(tp, true);
3383}
3384
3385static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
3386{
3387	static const struct ephy_info e_info_8102e_1[] = {
3388		{ 0x01,	0, 0x6e65 },
3389		{ 0x02,	0, 0x091f },
3390		{ 0x03,	0, 0xc2f9 },
3391		{ 0x06,	0, 0xafb5 },
3392		{ 0x07,	0, 0x0e00 },
3393		{ 0x19,	0, 0xec80 },
3394		{ 0x01,	0, 0x2e65 },
3395		{ 0x01,	0, 0x6e65 }
3396	};
3397	u8 cfg1;
3398
3399	rtl_set_def_aspm_entry_latency(tp);
3400
3401	RTL_W8(tp, DBG_REG, FIX_NAK_1);
3402
3403	RTL_W8(tp, Config1,
3404	       LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
3405	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3406
3407	cfg1 = RTL_R8(tp, Config1);
3408	if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3409		RTL_W8(tp, Config1, cfg1 & ~LEDS0);
3410
3411	rtl_ephy_init(tp, e_info_8102e_1);
3412}
3413
3414static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
3415{
3416	rtl_set_def_aspm_entry_latency(tp);
3417
3418	RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
3419	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3420}
3421
3422static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
3423{
3424	rtl_hw_start_8102e_2(tp);
3425
3426	rtl_ephy_write(tp, 0x03, 0xc2f9);
3427}
3428
3429static void rtl_hw_start_8401(struct rtl8169_private *tp)
3430{
3431	static const struct ephy_info e_info_8401[] = {
3432		{ 0x01,	0xffff, 0x6fe5 },
3433		{ 0x03,	0xffff, 0x0599 },
3434		{ 0x06,	0xffff, 0xaf25 },
3435		{ 0x07,	0xffff, 0x8e68 },
3436	};
3437
3438	rtl_ephy_init(tp, e_info_8401);
3439	RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
3440}
3441
3442static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
3443{
3444	static const struct ephy_info e_info_8105e_1[] = {
3445		{ 0x07,	0, 0x4000 },
3446		{ 0x19,	0, 0x0200 },
3447		{ 0x19,	0, 0x0020 },
3448		{ 0x1e,	0, 0x2000 },
3449		{ 0x03,	0, 0x0001 },
3450		{ 0x19,	0, 0x0100 },
3451		{ 0x19,	0, 0x0004 },
3452		{ 0x0a,	0, 0x0020 }
3453	};
3454
3455	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3456	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3457
3458	/* Disable Early Tally Counter */
3459	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
3460
3461	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3462	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
3463
3464	rtl_ephy_init(tp, e_info_8105e_1);
3465
3466	rtl_pcie_state_l2l3_disable(tp);
3467}
3468
3469static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
3470{
3471	rtl_hw_start_8105e_1(tp);
3472	rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
3473}
3474
3475static void rtl_hw_start_8402(struct rtl8169_private *tp)
3476{
3477	static const struct ephy_info e_info_8402[] = {
3478		{ 0x19,	0xffff, 0xff64 },
3479		{ 0x1e,	0, 0x4000 }
3480	};
3481
3482	rtl_set_def_aspm_entry_latency(tp);
3483
3484	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3485	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3486
3487	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
3488
3489	rtl_ephy_init(tp, e_info_8402);
3490
3491	rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
3492	rtl_reset_packet_filter(tp);
3493	rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
3494	rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
3495	rtl_w0w1_eri(tp, 0x0d4, 0x0e00, 0xff00);
3496
3497	/* disable EEE */
3498	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3499
3500	rtl_pcie_state_l2l3_disable(tp);
3501}
3502
3503static void rtl_hw_start_8106(struct rtl8169_private *tp)
3504{
3505	rtl_hw_aspm_clkreq_enable(tp, false);
3506
3507	/* Force LAN exit from ASPM if Rx/Tx are not idle */
3508	RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
3509
3510	RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
3511	RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
3512	RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
3513
 
 
 
3514	rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
3515
3516	/* disable EEE */
3517	rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
3518
3519	rtl_pcie_state_l2l3_disable(tp);
3520	rtl_hw_aspm_clkreq_enable(tp, true);
3521}
3522
3523DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
3524{
3525	return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13);
3526}
3527
3528static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
3529{
3530	rtl_pcie_state_l2l3_disable(tp);
3531
3532	RTL_W16(tp, 0x382, 0x221b);
3533	RTL_W8(tp, 0x4500, 0);
3534	RTL_W16(tp, 0x4800, 0);
3535
3536	/* disable UPS */
3537	r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
3538
3539	RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10);
3540
3541	r8168_mac_ocp_write(tp, 0xc140, 0xffff);
3542	r8168_mac_ocp_write(tp, 0xc142, 0xffff);
3543
3544	r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9);
3545	r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
3546	r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
3547
3548	/* disable new tx descriptor format */
3549	r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
3550
3551	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
 
 
 
 
 
 
 
3552		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
3553	else
3554		r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
3555
3556	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
3557		r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
3558	else
3559		r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
3560
3561	r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
3562	r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
3563	r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
3564	r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
3565	r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
3566	r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
 
 
 
 
 
3567	r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
3568	r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
3569	r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00);
3570	r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
3571
3572	r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
3573	r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
3574	udelay(1);
3575	r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000);
3576	RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030);
3577
3578	r8168_mac_ocp_write(tp, 0xe098, 0xc302);
3579
3580	rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
3581
3582	if (tp->mac_version == RTL_GIGA_MAC_VER_63)
 
 
3583		rtl8125b_config_eee_mac(tp);
3584	else
3585		rtl8125a_config_eee_mac(tp);
3586
3587	RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
3588	udelay(10);
3589}
3590
3591static void rtl_hw_start_8125a_1(struct rtl8169_private *tp)
3592{
3593	static const struct ephy_info e_info_8125a_1[] = {
3594		{ 0x01, 0xffff, 0xa812 },
3595		{ 0x09, 0xffff, 0x520c },
3596		{ 0x04, 0xffff, 0xd000 },
3597		{ 0x0d, 0xffff, 0xf702 },
3598		{ 0x0a, 0xffff, 0x8653 },
3599		{ 0x06, 0xffff, 0x001e },
3600		{ 0x08, 0xffff, 0x3595 },
3601		{ 0x20, 0xffff, 0x9455 },
3602		{ 0x21, 0xffff, 0x99ff },
3603		{ 0x02, 0xffff, 0x6046 },
3604		{ 0x29, 0xffff, 0xfe00 },
3605		{ 0x23, 0xffff, 0xab62 },
3606
3607		{ 0x41, 0xffff, 0xa80c },
3608		{ 0x49, 0xffff, 0x520c },
3609		{ 0x44, 0xffff, 0xd000 },
3610		{ 0x4d, 0xffff, 0xf702 },
3611		{ 0x4a, 0xffff, 0x8653 },
3612		{ 0x46, 0xffff, 0x001e },
3613		{ 0x48, 0xffff, 0x3595 },
3614		{ 0x60, 0xffff, 0x9455 },
3615		{ 0x61, 0xffff, 0x99ff },
3616		{ 0x42, 0xffff, 0x6046 },
3617		{ 0x69, 0xffff, 0xfe00 },
3618		{ 0x63, 0xffff, 0xab62 },
3619	};
3620
3621	rtl_set_def_aspm_entry_latency(tp);
3622
3623	/* disable aspm and clock request before access ephy */
3624	rtl_hw_aspm_clkreq_enable(tp, false);
3625	rtl_ephy_init(tp, e_info_8125a_1);
3626
3627	rtl_hw_start_8125_common(tp);
3628	rtl_hw_aspm_clkreq_enable(tp, true);
3629}
3630
3631static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
3632{
3633	static const struct ephy_info e_info_8125a_2[] = {
3634		{ 0x04, 0xffff, 0xd000 },
3635		{ 0x0a, 0xffff, 0x8653 },
3636		{ 0x23, 0xffff, 0xab66 },
3637		{ 0x20, 0xffff, 0x9455 },
3638		{ 0x21, 0xffff, 0x99ff },
3639		{ 0x29, 0xffff, 0xfe04 },
3640
3641		{ 0x44, 0xffff, 0xd000 },
3642		{ 0x4a, 0xffff, 0x8653 },
3643		{ 0x63, 0xffff, 0xab66 },
3644		{ 0x60, 0xffff, 0x9455 },
3645		{ 0x61, 0xffff, 0x99ff },
3646		{ 0x69, 0xffff, 0xfe04 },
3647	};
3648
3649	rtl_set_def_aspm_entry_latency(tp);
3650
3651	/* disable aspm and clock request before access ephy */
3652	rtl_hw_aspm_clkreq_enable(tp, false);
3653	rtl_ephy_init(tp, e_info_8125a_2);
3654
3655	rtl_hw_start_8125_common(tp);
3656	rtl_hw_aspm_clkreq_enable(tp, true);
3657}
3658
3659static void rtl_hw_start_8125b(struct rtl8169_private *tp)
3660{
3661	static const struct ephy_info e_info_8125b[] = {
3662		{ 0x0b, 0xffff, 0xa908 },
3663		{ 0x1e, 0xffff, 0x20eb },
3664		{ 0x4b, 0xffff, 0xa908 },
3665		{ 0x5e, 0xffff, 0x20eb },
3666		{ 0x22, 0x0030, 0x0020 },
3667		{ 0x62, 0x0030, 0x0020 },
3668	};
3669
3670	rtl_set_def_aspm_entry_latency(tp);
3671	rtl_hw_aspm_clkreq_enable(tp, false);
 
 
3672
3673	rtl_ephy_init(tp, e_info_8125b);
 
 
3674	rtl_hw_start_8125_common(tp);
 
3675
3676	rtl_hw_aspm_clkreq_enable(tp, true);
 
 
 
3677}
3678
3679static void rtl_hw_config(struct rtl8169_private *tp)
3680{
3681	static const rtl_generic_fct hw_configs[] = {
3682		[RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
3683		[RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
3684		[RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
3685		[RTL_GIGA_MAC_VER_10] = NULL,
3686		[RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
3687		[RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
3688		[RTL_GIGA_MAC_VER_13] = NULL,
3689		[RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
3690		[RTL_GIGA_MAC_VER_16] = NULL,
3691		[RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
3692		[RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
3693		[RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
3694		[RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
3695		[RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
3696		[RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
3697		[RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
3698		[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
3699		[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
3700		[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
3701		[RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
3702		[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
3703		[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
3704		[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
3705		[RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168d,
3706		[RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
3707		[RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
3708		[RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
3709		[RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
3710		[RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
3711		[RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
3712		[RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
3713		[RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
3714		[RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
3715		[RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
3716		[RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
3717		[RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
3718		[RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
3719		[RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
3720		[RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
3721		[RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
3722		[RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
3723		[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
3724		[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
3725		[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
3726		[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
3727		[RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
3728		[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
3729		[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
 
 
 
3730	};
3731
3732	if (hw_configs[tp->mac_version])
3733		hw_configs[tp->mac_version](tp);
3734}
3735
3736static void rtl_hw_start_8125(struct rtl8169_private *tp)
3737{
3738	int i;
3739
 
 
3740	/* disable interrupt coalescing */
3741	for (i = 0xa00; i < 0xb00; i += 4)
3742		RTL_W32(tp, i, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3743
3744	rtl_hw_config(tp);
3745}
3746
3747static void rtl_hw_start_8168(struct rtl8169_private *tp)
3748{
3749	if (rtl_is_8168evl_up(tp))
3750		RTL_W8(tp, MaxTxPacketSize, EarlySize);
3751	else
3752		RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
3753
3754	rtl_hw_config(tp);
3755
3756	/* disable interrupt coalescing */
3757	RTL_W16(tp, IntrMitigate, 0x0000);
3758}
3759
3760static void rtl_hw_start_8169(struct rtl8169_private *tp)
3761{
3762	RTL_W8(tp, EarlyTxThres, NoEarlyTx);
3763
3764	tp->cp_cmd |= PCIMulRW;
3765
3766	if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
3767	    tp->mac_version == RTL_GIGA_MAC_VER_03)
3768		tp->cp_cmd |= EnAnaPLL;
3769
3770	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3771
3772	rtl8169_set_magic_reg(tp);
3773
3774	/* disable interrupt coalescing */
3775	RTL_W16(tp, IntrMitigate, 0x0000);
3776}
3777
3778static void rtl_hw_start(struct  rtl8169_private *tp)
3779{
3780	rtl_unlock_config_regs(tp);
 
 
 
3781
3782	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
3783
3784	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3785		rtl_hw_start_8169(tp);
3786	else if (rtl_is_8125(tp))
3787		rtl_hw_start_8125(tp);
3788	else
3789		rtl_hw_start_8168(tp);
3790
 
 
3791	rtl_set_rx_max_size(tp);
3792	rtl_set_rx_tx_desc_registers(tp);
3793	rtl_lock_config_regs(tp);
3794
3795	rtl_jumbo_config(tp);
3796
3797	/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
3798	rtl_pci_commit(tp);
3799
3800	RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
3801	rtl_init_rxcfg(tp);
3802	rtl_set_tx_config_registers(tp);
3803	rtl_set_rx_config_features(tp, tp->dev->features);
3804	rtl_set_rx_mode(tp->dev);
3805	rtl_irq_enable(tp);
3806}
3807
3808static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
3809{
3810	struct rtl8169_private *tp = netdev_priv(dev);
3811
3812	dev->mtu = new_mtu;
3813	netdev_update_features(dev);
3814	rtl_jumbo_config(tp);
3815
3816	switch (tp->mac_version) {
3817	case RTL_GIGA_MAC_VER_61:
3818	case RTL_GIGA_MAC_VER_63:
3819		rtl8125_set_eee_txidle_timer(tp);
3820		break;
3821	default:
3822		break;
3823	}
3824
3825	return 0;
3826}
3827
3828static void rtl8169_mark_to_asic(struct RxDesc *desc)
3829{
3830	u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
3831
3832	desc->opts2 = 0;
3833	/* Force memory writes to complete before releasing descriptor */
3834	dma_wmb();
3835	WRITE_ONCE(desc->opts1, cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE));
3836}
3837
3838static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
3839					  struct RxDesc *desc)
3840{
3841	struct device *d = tp_to_dev(tp);
3842	int node = dev_to_node(d);
3843	dma_addr_t mapping;
3844	struct page *data;
3845
3846	data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
3847	if (!data)
3848		return NULL;
3849
3850	mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3851	if (unlikely(dma_mapping_error(d, mapping))) {
3852		netdev_err(tp->dev, "Failed to map RX DMA!\n");
3853		__free_pages(data, get_order(R8169_RX_BUF_SIZE));
3854		return NULL;
3855	}
3856
3857	desc->addr = cpu_to_le64(mapping);
3858	rtl8169_mark_to_asic(desc);
3859
3860	return data;
3861}
3862
3863static void rtl8169_rx_clear(struct rtl8169_private *tp)
3864{
3865	unsigned int i;
3866
3867	for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
3868		dma_unmap_page(tp_to_dev(tp),
3869			       le64_to_cpu(tp->RxDescArray[i].addr),
3870			       R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
3871		__free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
3872		tp->Rx_databuff[i] = NULL;
3873		tp->RxDescArray[i].addr = 0;
3874		tp->RxDescArray[i].opts1 = 0;
3875	}
3876}
3877
3878static int rtl8169_rx_fill(struct rtl8169_private *tp)
3879{
3880	unsigned int i;
3881
3882	for (i = 0; i < NUM_RX_DESC; i++) {
3883		struct page *data;
3884
3885		data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
3886		if (!data) {
3887			rtl8169_rx_clear(tp);
3888			return -ENOMEM;
3889		}
3890		tp->Rx_databuff[i] = data;
3891	}
3892
3893	/* mark as last descriptor in the ring */
3894	tp->RxDescArray[NUM_RX_DESC - 1].opts1 |= cpu_to_le32(RingEnd);
3895
3896	return 0;
3897}
3898
3899static int rtl8169_init_ring(struct rtl8169_private *tp)
3900{
3901	rtl8169_init_ring_indexes(tp);
3902
3903	memset(tp->tx_skb, 0, sizeof(tp->tx_skb));
3904	memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff));
3905
3906	return rtl8169_rx_fill(tp);
3907}
3908
3909static void rtl8169_unmap_tx_skb(struct rtl8169_private *tp, unsigned int entry)
3910{
3911	struct ring_info *tx_skb = tp->tx_skb + entry;
3912	struct TxDesc *desc = tp->TxDescArray + entry;
3913
3914	dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), tx_skb->len,
3915			 DMA_TO_DEVICE);
3916	memset(desc, 0, sizeof(*desc));
3917	memset(tx_skb, 0, sizeof(*tx_skb));
3918}
3919
3920static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
3921				   unsigned int n)
3922{
3923	unsigned int i;
3924
3925	for (i = 0; i < n; i++) {
3926		unsigned int entry = (start + i) % NUM_TX_DESC;
3927		struct ring_info *tx_skb = tp->tx_skb + entry;
3928		unsigned int len = tx_skb->len;
3929
3930		if (len) {
3931			struct sk_buff *skb = tx_skb->skb;
3932
3933			rtl8169_unmap_tx_skb(tp, entry);
3934			if (skb)
3935				dev_consume_skb_any(skb);
3936		}
3937	}
3938}
3939
3940static void rtl8169_tx_clear(struct rtl8169_private *tp)
3941{
3942	rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
3943	netdev_reset_queue(tp->dev);
3944}
3945
3946static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
3947{
3948	napi_disable(&tp->napi);
3949
3950	/* Give a racing hard_start_xmit a few cycles to complete. */
3951	synchronize_net();
3952
3953	/* Disable interrupts */
3954	rtl8169_irq_mask_and_ack(tp);
3955
3956	rtl_rx_close(tp);
3957
3958	if (going_down && tp->dev->wol_enabled)
3959		goto no_reset;
3960
3961	switch (tp->mac_version) {
3962	case RTL_GIGA_MAC_VER_27:
3963	case RTL_GIGA_MAC_VER_28:
3964	case RTL_GIGA_MAC_VER_31:
3965		rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
3966		break;
3967	case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38:
3968		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
3969		rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
3970		break;
3971	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
3972		rtl_enable_rxdvgate(tp);
3973		fsleep(2000);
3974		break;
3975	default:
3976		RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
3977		fsleep(100);
3978		break;
3979	}
3980
3981	rtl_hw_reset(tp);
3982no_reset:
3983	rtl8169_tx_clear(tp);
3984	rtl8169_init_ring_indexes(tp);
3985}
3986
3987static void rtl_reset_work(struct rtl8169_private *tp)
3988{
3989	int i;
3990
3991	netif_stop_queue(tp->dev);
3992
3993	rtl8169_cleanup(tp, false);
3994
3995	for (i = 0; i < NUM_RX_DESC; i++)
3996		rtl8169_mark_to_asic(tp->RxDescArray + i);
3997
3998	napi_enable(&tp->napi);
3999	rtl_hw_start(tp);
4000}
4001
4002static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
4003{
4004	struct rtl8169_private *tp = netdev_priv(dev);
4005
4006	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4007}
4008
4009static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
4010			  void *addr, unsigned int entry, bool desc_own)
4011{
4012	struct TxDesc *txd = tp->TxDescArray + entry;
4013	struct device *d = tp_to_dev(tp);
4014	dma_addr_t mapping;
4015	u32 opts1;
4016	int ret;
4017
4018	mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
4019	ret = dma_mapping_error(d, mapping);
4020	if (unlikely(ret)) {
4021		if (net_ratelimit())
4022			netdev_err(tp->dev, "Failed to map TX data!\n");
4023		return ret;
4024	}
4025
4026	txd->addr = cpu_to_le64(mapping);
4027	txd->opts2 = cpu_to_le32(opts[1]);
4028
4029	opts1 = opts[0] | len;
4030	if (entry == NUM_TX_DESC - 1)
4031		opts1 |= RingEnd;
4032	if (desc_own)
4033		opts1 |= DescOwn;
4034	txd->opts1 = cpu_to_le32(opts1);
4035
4036	tp->tx_skb[entry].len = len;
4037
4038	return 0;
4039}
4040
4041static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
4042			      const u32 *opts, unsigned int entry)
4043{
4044	struct skb_shared_info *info = skb_shinfo(skb);
4045	unsigned int cur_frag;
4046
4047	for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
4048		const skb_frag_t *frag = info->frags + cur_frag;
4049		void *addr = skb_frag_address(frag);
4050		u32 len = skb_frag_size(frag);
4051
4052		entry = (entry + 1) % NUM_TX_DESC;
4053
4054		if (unlikely(rtl8169_tx_map(tp, opts, len, addr, entry, true)))
4055			goto err_out;
4056	}
4057
4058	return 0;
4059
4060err_out:
4061	rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
4062	return -EIO;
4063}
4064
4065static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
4066{
4067	return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4068}
4069
4070static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
4071{
4072	u32 mss = skb_shinfo(skb)->gso_size;
4073
4074	if (mss) {
4075		opts[0] |= TD_LSO;
4076		opts[0] |= mss << TD0_MSS_SHIFT;
4077	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4078		const struct iphdr *ip = ip_hdr(skb);
4079
4080		if (ip->protocol == IPPROTO_TCP)
4081			opts[0] |= TD0_IP_CS | TD0_TCP_CS;
4082		else if (ip->protocol == IPPROTO_UDP)
4083			opts[0] |= TD0_IP_CS | TD0_UDP_CS;
4084		else
4085			WARN_ON_ONCE(1);
4086	}
4087}
4088
4089static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
4090				struct sk_buff *skb, u32 *opts)
4091{
4092	u32 transport_offset = (u32)skb_transport_offset(skb);
4093	struct skb_shared_info *shinfo = skb_shinfo(skb);
4094	u32 mss = shinfo->gso_size;
4095
4096	if (mss) {
4097		if (shinfo->gso_type & SKB_GSO_TCPV4) {
4098			opts[0] |= TD1_GTSENV4;
4099		} else if (shinfo->gso_type & SKB_GSO_TCPV6) {
4100			if (skb_cow_head(skb, 0))
4101				return false;
4102
4103			tcp_v6_gso_csum_prep(skb);
4104			opts[0] |= TD1_GTSENV6;
4105		} else {
4106			WARN_ON_ONCE(1);
4107		}
4108
4109		opts[0] |= transport_offset << GTTCPHO_SHIFT;
4110		opts[1] |= mss << TD1_MSS_SHIFT;
4111	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4112		u8 ip_protocol;
4113
4114		switch (vlan_get_protocol(skb)) {
4115		case htons(ETH_P_IP):
4116			opts[1] |= TD1_IPv4_CS;
4117			ip_protocol = ip_hdr(skb)->protocol;
4118			break;
4119
4120		case htons(ETH_P_IPV6):
4121			opts[1] |= TD1_IPv6_CS;
4122			ip_protocol = ipv6_hdr(skb)->nexthdr;
4123			break;
4124
4125		default:
4126			ip_protocol = IPPROTO_RAW;
4127			break;
4128		}
4129
4130		if (ip_protocol == IPPROTO_TCP)
4131			opts[1] |= TD1_TCP_CS;
4132		else if (ip_protocol == IPPROTO_UDP)
4133			opts[1] |= TD1_UDP_CS;
4134		else
4135			WARN_ON_ONCE(1);
4136
4137		opts[1] |= transport_offset << TCPHO_SHIFT;
4138	} else {
4139		if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
4140			return !eth_skb_pad(skb);
 
 
4141	}
4142
4143	return true;
4144}
4145
4146static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
4147			       unsigned int nr_frags)
4148{
4149	unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
4150
4151	/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
4152	return slots_avail > nr_frags;
4153}
4154
4155/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
4156static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
4157{
4158	switch (tp->mac_version) {
4159	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
4160	case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
4161		return false;
4162	default:
4163		return true;
4164	}
4165}
4166
4167static void rtl8169_doorbell(struct rtl8169_private *tp)
4168{
4169	if (rtl_is_8125(tp))
4170		RTL_W16(tp, TxPoll_8125, BIT(0));
4171	else
4172		RTL_W8(tp, TxPoll, NPQ);
4173}
4174
4175static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
4176				      struct net_device *dev)
4177{
4178	unsigned int frags = skb_shinfo(skb)->nr_frags;
4179	struct rtl8169_private *tp = netdev_priv(dev);
4180	unsigned int entry = tp->cur_tx % NUM_TX_DESC;
4181	struct TxDesc *txd_first, *txd_last;
4182	bool stop_queue, door_bell;
 
4183	u32 opts[2];
4184
4185	txd_first = tp->TxDescArray + entry;
4186
4187	if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
4188		if (net_ratelimit())
4189			netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
4190		goto err_stop_0;
 
4191	}
4192
4193	if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn))
4194		goto err_stop_0;
4195
4196	opts[1] = rtl8169_tx_vlan_tag(skb);
4197	opts[0] = 0;
4198
4199	if (!rtl_chip_supports_csum_v2(tp))
4200		rtl8169_tso_csum_v1(skb, opts);
4201	else if (!rtl8169_tso_csum_v2(tp, skb, opts))
4202		goto err_dma_0;
4203
4204	if (unlikely(rtl8169_tx_map(tp, opts, skb_headlen(skb), skb->data,
4205				    entry, false)))
4206		goto err_dma_0;
4207
 
 
 
4208	if (frags) {
4209		if (rtl8169_xmit_frags(tp, skb, opts, entry))
4210			goto err_dma_1;
4211		entry = (entry + frags) % NUM_TX_DESC;
4212	}
4213
4214	txd_last = tp->TxDescArray + entry;
4215	txd_last->opts1 |= cpu_to_le32(LastFrag);
4216	tp->tx_skb[entry].skb = skb;
4217
4218	skb_tx_timestamp(skb);
4219
4220	/* Force memory writes to complete before releasing descriptor */
4221	dma_wmb();
4222
4223	door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
4224
4225	txd_first->opts1 |= cpu_to_le32(DescOwn | FirstFrag);
4226
4227	/* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
4228	smp_wmb();
4229
4230	tp->cur_tx += frags + 1;
4231
4232	stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
4233	if (unlikely(stop_queue)) {
4234		/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
4235		 * not miss a ring update when it notices a stopped queue.
4236		 */
4237		smp_wmb();
4238		netif_stop_queue(dev);
4239		door_bell = true;
4240	}
4241
4242	if (door_bell)
 
 
 
4243		rtl8169_doorbell(tp);
4244
4245	if (unlikely(stop_queue)) {
4246		/* Sync with rtl_tx:
4247		 * - publish queue status and cur_tx ring index (write barrier)
4248		 * - refresh dirty_tx ring index (read barrier).
4249		 * May the current thread have a pessimistic view of the ring
4250		 * status and forget to wake up queue, a racing rtl_tx thread
4251		 * can't.
4252		 */
4253		smp_mb();
4254		if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
4255			netif_start_queue(dev);
4256	}
4257
4258	return NETDEV_TX_OK;
4259
4260err_dma_1:
4261	rtl8169_unmap_tx_skb(tp, entry);
4262err_dma_0:
4263	dev_kfree_skb_any(skb);
4264	dev->stats.tx_dropped++;
4265	return NETDEV_TX_OK;
4266
4267err_stop_0:
4268	netif_stop_queue(dev);
4269	dev->stats.tx_dropped++;
4270	return NETDEV_TX_BUSY;
4271}
4272
4273static unsigned int rtl_last_frag_len(struct sk_buff *skb)
4274{
4275	struct skb_shared_info *info = skb_shinfo(skb);
4276	unsigned int nr_frags = info->nr_frags;
4277
4278	if (!nr_frags)
4279		return UINT_MAX;
4280
4281	return skb_frag_size(info->frags + nr_frags - 1);
4282}
4283
4284/* Workaround for hw issues with TSO on RTL8168evl */
4285static netdev_features_t rtl8168evl_fix_tso(struct sk_buff *skb,
4286					    netdev_features_t features)
4287{
4288	/* IPv4 header has options field */
4289	if (vlan_get_protocol(skb) == htons(ETH_P_IP) &&
4290	    ip_hdrlen(skb) > sizeof(struct iphdr))
4291		features &= ~NETIF_F_ALL_TSO;
4292
4293	/* IPv4 TCP header has options field */
4294	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 &&
4295		 tcp_hdrlen(skb) > sizeof(struct tcphdr))
4296		features &= ~NETIF_F_ALL_TSO;
4297
4298	else if (rtl_last_frag_len(skb) <= 6)
4299		features &= ~NETIF_F_ALL_TSO;
4300
4301	return features;
4302}
4303
4304static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
4305						struct net_device *dev,
4306						netdev_features_t features)
4307{
4308	int transport_offset = skb_transport_offset(skb);
4309	struct rtl8169_private *tp = netdev_priv(dev);
4310
4311	if (skb_is_gso(skb)) {
4312		if (tp->mac_version == RTL_GIGA_MAC_VER_34)
4313			features = rtl8168evl_fix_tso(skb, features);
4314
4315		if (transport_offset > GTTCPHO_MAX &&
4316		    rtl_chip_supports_csum_v2(tp))
4317			features &= ~NETIF_F_ALL_TSO;
4318	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
4319		if (skb->len < ETH_ZLEN) {
4320			switch (tp->mac_version) {
4321			case RTL_GIGA_MAC_VER_11:
4322			case RTL_GIGA_MAC_VER_12:
4323			case RTL_GIGA_MAC_VER_17:
4324			case RTL_GIGA_MAC_VER_34:
4325				features &= ~NETIF_F_CSUM_MASK;
4326				break;
4327			default:
4328				break;
4329			}
4330		}
4331
4332		if (transport_offset > TCPHO_MAX &&
4333		    rtl_chip_supports_csum_v2(tp))
4334			features &= ~NETIF_F_CSUM_MASK;
4335	}
4336
4337	return vlan_features_check(skb, features);
4338}
4339
4340static void rtl8169_pcierr_interrupt(struct net_device *dev)
4341{
4342	struct rtl8169_private *tp = netdev_priv(dev);
4343	struct pci_dev *pdev = tp->pci_dev;
4344	int pci_status_errs;
4345	u16 pci_cmd;
4346
4347	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4348
4349	pci_status_errs = pci_status_get_and_clear_errors(pdev);
4350
4351	if (net_ratelimit())
4352		netdev_err(dev, "PCI error (cmd = 0x%04x, status_errs = 0x%04x)\n",
4353			   pci_cmd, pci_status_errs);
4354	/*
4355	 * The recovery sequence below admits a very elaborated explanation:
4356	 * - it seems to work;
4357	 * - I did not see what else could be done;
4358	 * - it makes iop3xx happy.
4359	 *
4360	 * Feel free to adjust to your needs.
4361	 */
4362	if (pdev->broken_parity_status)
4363		pci_cmd &= ~PCI_COMMAND_PARITY;
4364	else
4365		pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
4366
4367	pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4368
4369	rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4370}
4371
4372static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
4373		   int budget)
4374{
4375	unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
 
4376
4377	dirty_tx = tp->dirty_tx;
4378	smp_rmb();
4379
4380	for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
4381		unsigned int entry = dirty_tx % NUM_TX_DESC;
4382		struct sk_buff *skb = tp->tx_skb[entry].skb;
4383		u32 status;
4384
4385		status = le32_to_cpu(tp->TxDescArray[entry].opts1);
4386		if (status & DescOwn)
4387			break;
4388
 
4389		rtl8169_unmap_tx_skb(tp, entry);
4390
4391		if (skb) {
4392			pkts_compl++;
4393			bytes_compl += skb->len;
4394			napi_consume_skb(skb, budget);
4395		}
4396		dirty_tx++;
4397	}
4398
4399	if (tp->dirty_tx != dirty_tx) {
4400		netdev_completed_queue(dev, pkts_compl, bytes_compl);
 
4401
4402		u64_stats_update_begin(&tp->tx_stats.syncp);
4403		tp->tx_stats.packets += pkts_compl;
4404		tp->tx_stats.bytes += bytes_compl;
4405		u64_stats_update_end(&tp->tx_stats.syncp);
4406
4407		tp->dirty_tx = dirty_tx;
4408		/* Sync with rtl8169_start_xmit:
4409		 * - publish dirty_tx ring index (write barrier)
4410		 * - refresh cur_tx ring index and queue status (read barrier)
4411		 * May the current thread miss the stopped queue condition,
4412		 * a racing xmit thread can only have a right view of the
4413		 * ring status.
4414		 */
4415		smp_mb();
4416		if (netif_queue_stopped(dev) &&
4417		    rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
4418			netif_wake_queue(dev);
4419		}
4420		/*
4421		 * 8168 hack: TxPoll requests are lost when the Tx packets are
4422		 * too close. Let's kick an extra TxPoll request when a burst
4423		 * of start_xmit activity is detected (if it is not detected,
4424		 * it is slow enough). -- FR
 
 
4425		 */
4426		if (tp->cur_tx != dirty_tx)
4427			rtl8169_doorbell(tp);
4428	}
4429}
4430
4431static inline int rtl8169_fragmented_frame(u32 status)
4432{
4433	return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
4434}
4435
4436static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
4437{
4438	u32 status = opts1 & RxProtoMask;
4439
4440	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
4441	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
4442		skb->ip_summed = CHECKSUM_UNNECESSARY;
4443	else
4444		skb_checksum_none_assert(skb);
4445}
4446
4447static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
4448{
4449	unsigned int cur_rx, rx_left, count;
4450	struct device *d = tp_to_dev(tp);
 
4451
4452	cur_rx = tp->cur_rx;
4453
4454	for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
4455		unsigned int pkt_size, entry = cur_rx % NUM_RX_DESC;
4456		struct RxDesc *desc = tp->RxDescArray + entry;
4457		struct sk_buff *skb;
4458		const void *rx_buf;
4459		dma_addr_t addr;
4460		u32 status;
4461
4462		status = le32_to_cpu(desc->opts1);
4463		if (status & DescOwn)
4464			break;
4465
4466		/* This barrier is needed to keep us from reading
4467		 * any other fields out of the Rx descriptor until
4468		 * we know the status of DescOwn
4469		 */
4470		dma_rmb();
4471
4472		if (unlikely(status & RxRES)) {
4473			if (net_ratelimit())
4474				netdev_warn(dev, "Rx ERROR. status = %08x\n",
4475					    status);
4476			dev->stats.rx_errors++;
4477			if (status & (RxRWT | RxRUNT))
4478				dev->stats.rx_length_errors++;
4479			if (status & RxCRC)
4480				dev->stats.rx_crc_errors++;
4481
4482			if (!(dev->features & NETIF_F_RXALL))
4483				goto release_descriptor;
4484			else if (status & RxRWT || !(status & (RxRUNT | RxCRC)))
4485				goto release_descriptor;
4486		}
4487
4488		pkt_size = status & GENMASK(13, 0);
4489		if (likely(!(dev->features & NETIF_F_RXFCS)))
4490			pkt_size -= ETH_FCS_LEN;
4491
4492		/* The driver does not support incoming fragmented frames.
4493		 * They are seen as a symptom of over-mtu sized frames.
4494		 */
4495		if (unlikely(rtl8169_fragmented_frame(status))) {
4496			dev->stats.rx_dropped++;
4497			dev->stats.rx_length_errors++;
4498			goto release_descriptor;
4499		}
4500
4501		skb = napi_alloc_skb(&tp->napi, pkt_size);
4502		if (unlikely(!skb)) {
4503			dev->stats.rx_dropped++;
4504			goto release_descriptor;
4505		}
4506
4507		addr = le64_to_cpu(desc->addr);
4508		rx_buf = page_address(tp->Rx_databuff[entry]);
4509
4510		dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
4511		prefetch(rx_buf);
4512		skb_copy_to_linear_data(skb, rx_buf, pkt_size);
4513		skb->tail += pkt_size;
4514		skb->len = pkt_size;
4515		dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
4516
4517		rtl8169_rx_csum(skb, status);
4518		skb->protocol = eth_type_trans(skb, dev);
4519
4520		rtl8169_rx_vlan_tag(desc, skb);
4521
4522		if (skb->pkt_type == PACKET_MULTICAST)
4523			dev->stats.multicast++;
4524
4525		napi_gro_receive(&tp->napi, skb);
4526
4527		u64_stats_update_begin(&tp->rx_stats.syncp);
4528		tp->rx_stats.packets++;
4529		tp->rx_stats.bytes += pkt_size;
4530		u64_stats_update_end(&tp->rx_stats.syncp);
4531
4532release_descriptor:
4533		rtl8169_mark_to_asic(desc);
4534	}
4535
4536	count = cur_rx - tp->cur_rx;
4537	tp->cur_rx = cur_rx;
4538
4539	return count;
4540}
4541
4542static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4543{
4544	struct rtl8169_private *tp = dev_instance;
4545	u32 status = rtl_get_events(tp);
4546
4547	if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
4548	    !(status & tp->irq_mask))
4549		return IRQ_NONE;
4550
4551	if (unlikely(status & SYSErr)) {
 
 
4552		rtl8169_pcierr_interrupt(tp->dev);
4553		goto out;
4554	}
4555
4556	if (status & LinkChg)
4557		phy_mac_interrupt(tp->phydev);
4558
4559	if (unlikely(status & RxFIFOOver &&
4560	    tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4561		netif_stop_queue(tp->dev);
4562		rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
4563	}
4564
4565	rtl_irq_disable(tp);
4566	napi_schedule_irqoff(&tp->napi);
4567out:
4568	rtl_ack_events(tp, status);
4569
4570	return IRQ_HANDLED;
4571}
4572
4573static void rtl_task(struct work_struct *work)
4574{
4575	struct rtl8169_private *tp =
4576		container_of(work, struct rtl8169_private, wk.work);
 
4577
4578	rtnl_lock();
 
 
 
 
 
 
 
 
 
4579
4580	if (!netif_running(tp->dev) ||
4581	    !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
4582		goto out_unlock;
 
 
 
 
4583
4584	if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
 
4585		rtl_reset_work(tp);
4586		netif_wake_queue(tp->dev);
 
 
4587	}
4588out_unlock:
4589	rtnl_unlock();
4590}
4591
4592static int rtl8169_poll(struct napi_struct *napi, int budget)
4593{
4594	struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
4595	struct net_device *dev = tp->dev;
4596	int work_done;
4597
4598	work_done = rtl_rx(dev, tp, (u32) budget);
4599
4600	rtl_tx(dev, tp, budget);
4601
4602	if (work_done < budget) {
4603		napi_complete_done(napi, work_done);
4604		rtl_irq_enable(tp);
4605	}
4606
4607	return work_done;
4608}
4609
4610static void r8169_phylink_handler(struct net_device *ndev)
4611{
4612	struct rtl8169_private *tp = netdev_priv(ndev);
 
4613
4614	if (netif_carrier_ok(ndev)) {
4615		rtl_link_chg_patch(tp);
4616		pm_request_resume(&tp->pci_dev->dev);
4617	} else {
4618		pm_runtime_idle(&tp->pci_dev->dev);
4619	}
4620
4621	if (net_ratelimit())
4622		phy_print_status(tp->phydev);
4623}
4624
4625static int r8169_phy_connect(struct rtl8169_private *tp)
4626{
4627	struct phy_device *phydev = tp->phydev;
4628	phy_interface_t phy_mode;
4629	int ret;
4630
4631	phy_mode = tp->supports_gmii ? PHY_INTERFACE_MODE_GMII :
4632		   PHY_INTERFACE_MODE_MII;
4633
4634	ret = phy_connect_direct(tp->dev, phydev, r8169_phylink_handler,
4635				 phy_mode);
4636	if (ret)
4637		return ret;
4638
4639	if (!tp->supports_gmii)
4640		phy_set_max_speed(phydev, SPEED_100);
4641
4642	phy_support_asym_pause(phydev);
4643
4644	phy_attached_info(phydev);
4645
4646	return 0;
4647}
4648
4649static void rtl8169_down(struct rtl8169_private *tp)
4650{
 
4651	/* Clear all task flags */
4652	bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
4653
4654	phy_stop(tp->phydev);
4655
4656	rtl8169_update_counters(tp);
4657
4658	rtl8169_cleanup(tp, true);
 
 
 
 
 
4659
4660	rtl_pll_power_down(tp);
 
4661}
4662
4663static void rtl8169_up(struct rtl8169_private *tp)
4664{
4665	rtl_pll_power_up(tp);
 
 
 
 
 
4666	rtl8169_init_phy(tp);
4667	napi_enable(&tp->napi);
4668	set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
4669	rtl_reset_work(tp);
4670
4671	phy_start(tp->phydev);
4672}
4673
4674static int rtl8169_close(struct net_device *dev)
4675{
4676	struct rtl8169_private *tp = netdev_priv(dev);
4677	struct pci_dev *pdev = tp->pci_dev;
4678
4679	pm_runtime_get_sync(&pdev->dev);
4680
4681	netif_stop_queue(dev);
4682	rtl8169_down(tp);
4683	rtl8169_rx_clear(tp);
4684
4685	cancel_work_sync(&tp->wk.work);
4686
4687	phy_disconnect(tp->phydev);
4688
4689	pci_free_irq(pdev, 0, tp);
4690
4691	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4692			  tp->RxPhyAddr);
4693	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4694			  tp->TxPhyAddr);
4695	tp->TxDescArray = NULL;
4696	tp->RxDescArray = NULL;
4697
4698	pm_runtime_put_sync(&pdev->dev);
4699
4700	return 0;
4701}
4702
4703#ifdef CONFIG_NET_POLL_CONTROLLER
4704static void rtl8169_netpoll(struct net_device *dev)
4705{
4706	struct rtl8169_private *tp = netdev_priv(dev);
4707
4708	rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
4709}
4710#endif
4711
4712static int rtl_open(struct net_device *dev)
4713{
4714	struct rtl8169_private *tp = netdev_priv(dev);
4715	struct pci_dev *pdev = tp->pci_dev;
 
4716	int retval = -ENOMEM;
4717
4718	pm_runtime_get_sync(&pdev->dev);
4719
4720	/*
4721	 * Rx and Tx descriptors needs 256 bytes alignment.
4722	 * dma_alloc_coherent provides more.
4723	 */
4724	tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
4725					     &tp->TxPhyAddr, GFP_KERNEL);
4726	if (!tp->TxDescArray)
4727		goto err_pm_runtime_put;
4728
4729	tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
4730					     &tp->RxPhyAddr, GFP_KERNEL);
4731	if (!tp->RxDescArray)
4732		goto err_free_tx_0;
4733
4734	retval = rtl8169_init_ring(tp);
4735	if (retval < 0)
4736		goto err_free_rx_1;
4737
4738	rtl_request_firmware(tp);
4739
4740	retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp,
4741				 dev->name);
4742	if (retval < 0)
4743		goto err_release_fw_2;
4744
4745	retval = r8169_phy_connect(tp);
4746	if (retval)
4747		goto err_free_irq;
4748
4749	rtl8169_up(tp);
4750	rtl8169_init_counter_offsets(tp);
4751	netif_start_queue(dev);
 
 
4752
4753	pm_runtime_put_sync(&pdev->dev);
4754out:
4755	return retval;
4756
4757err_free_irq:
4758	pci_free_irq(pdev, 0, tp);
4759err_release_fw_2:
4760	rtl_release_firmware(tp);
4761	rtl8169_rx_clear(tp);
4762err_free_rx_1:
4763	dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
4764			  tp->RxPhyAddr);
4765	tp->RxDescArray = NULL;
4766err_free_tx_0:
4767	dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
4768			  tp->TxPhyAddr);
4769	tp->TxDescArray = NULL;
4770err_pm_runtime_put:
4771	pm_runtime_put_noidle(&pdev->dev);
4772	goto out;
4773}
4774
4775static void
4776rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4777{
4778	struct rtl8169_private *tp = netdev_priv(dev);
4779	struct pci_dev *pdev = tp->pci_dev;
4780	struct rtl8169_counters *counters = tp->counters;
4781	unsigned int start;
4782
4783	pm_runtime_get_noresume(&pdev->dev);
4784
4785	netdev_stats_to_stats64(stats, &dev->stats);
4786
4787	do {
4788		start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
4789		stats->rx_packets = tp->rx_stats.packets;
4790		stats->rx_bytes	= tp->rx_stats.bytes;
4791	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
4792
4793	do {
4794		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
4795		stats->tx_packets = tp->tx_stats.packets;
4796		stats->tx_bytes	= tp->tx_stats.bytes;
4797	} while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
4798
4799	/*
4800	 * Fetch additional counter values missing in stats collected by driver
4801	 * from tally counters.
4802	 */
4803	if (pm_runtime_active(&pdev->dev))
4804		rtl8169_update_counters(tp);
4805
4806	/*
4807	 * Subtract values fetched during initalization.
4808	 * See rtl8169_init_counter_offsets for a description why we do that.
4809	 */
4810	stats->tx_errors = le64_to_cpu(counters->tx_errors) -
4811		le64_to_cpu(tp->tc_offset.tx_errors);
4812	stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
4813		le32_to_cpu(tp->tc_offset.tx_multi_collision);
4814	stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
4815		le16_to_cpu(tp->tc_offset.tx_aborted);
4816	stats->rx_missed_errors = le16_to_cpu(counters->rx_missed) -
4817		le16_to_cpu(tp->tc_offset.rx_missed);
4818
4819	pm_runtime_put_noidle(&pdev->dev);
4820}
4821
4822static void rtl8169_net_suspend(struct rtl8169_private *tp)
4823{
4824	netif_device_detach(tp->dev);
4825
4826	if (netif_running(tp->dev))
4827		rtl8169_down(tp);
4828}
4829
4830#ifdef CONFIG_PM
 
 
4831
4832static int rtl8169_net_resume(struct rtl8169_private *tp)
4833{
4834	rtl_rar_set(tp, tp->dev->dev_addr);
 
4835
4836	if (tp->TxDescArray)
4837		rtl8169_up(tp);
4838
4839	netif_device_attach(tp->dev);
4840
4841	return 0;
4842}
4843
4844static int __maybe_unused rtl8169_suspend(struct device *device)
4845{
4846	struct rtl8169_private *tp = dev_get_drvdata(device);
4847
4848	rtnl_lock();
4849	rtl8169_net_suspend(tp);
4850	if (!device_may_wakeup(tp_to_dev(tp)))
4851		clk_disable_unprepare(tp->clk);
4852	rtnl_unlock();
4853
4854	return 0;
4855}
4856
4857static int __maybe_unused rtl8169_resume(struct device *device)
4858{
4859	struct rtl8169_private *tp = dev_get_drvdata(device);
4860
4861	if (!device_may_wakeup(tp_to_dev(tp)))
4862		clk_prepare_enable(tp->clk);
4863
4864	/* Reportedly at least Asus X453MA truncates packets otherwise */
4865	if (tp->mac_version == RTL_GIGA_MAC_VER_37)
4866		rtl_init_rxcfg(tp);
4867
4868	return rtl8169_net_resume(tp);
4869}
4870
4871static int rtl8169_runtime_suspend(struct device *device)
4872{
4873	struct rtl8169_private *tp = dev_get_drvdata(device);
4874
4875	if (!tp->TxDescArray) {
4876		netif_device_detach(tp->dev);
4877		return 0;
4878	}
4879
4880	rtnl_lock();
4881	__rtl8169_set_wol(tp, WAKE_PHY);
4882	rtl8169_net_suspend(tp);
4883	rtnl_unlock();
4884
4885	return 0;
4886}
4887
4888static int rtl8169_runtime_resume(struct device *device)
4889{
4890	struct rtl8169_private *tp = dev_get_drvdata(device);
4891
4892	__rtl8169_set_wol(tp, tp->saved_wolopts);
4893
4894	return rtl8169_net_resume(tp);
4895}
4896
4897static int rtl8169_runtime_idle(struct device *device)
4898{
4899	struct rtl8169_private *tp = dev_get_drvdata(device);
4900
4901	if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
4902		pm_schedule_suspend(device, 10000);
4903
4904	return -EBUSY;
4905}
4906
4907static const struct dev_pm_ops rtl8169_pm_ops = {
4908	SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume)
4909	SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume,
4910			   rtl8169_runtime_idle)
4911};
4912
4913#endif /* CONFIG_PM */
4914
4915static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
4916{
4917	/* WoL fails with 8168b when the receiver is disabled. */
4918	switch (tp->mac_version) {
4919	case RTL_GIGA_MAC_VER_11:
4920	case RTL_GIGA_MAC_VER_12:
4921	case RTL_GIGA_MAC_VER_17:
4922		pci_clear_master(tp->pci_dev);
4923
4924		RTL_W8(tp, ChipCmd, CmdRxEnb);
4925		rtl_pci_commit(tp);
4926		break;
4927	default:
4928		break;
4929	}
4930}
4931
4932static void rtl_shutdown(struct pci_dev *pdev)
4933{
4934	struct rtl8169_private *tp = pci_get_drvdata(pdev);
4935
4936	rtnl_lock();
4937	rtl8169_net_suspend(tp);
4938	rtnl_unlock();
4939
4940	/* Restore original MAC address */
4941	rtl_rar_set(tp, tp->dev->perm_addr);
4942
4943	if (system_state == SYSTEM_POWER_OFF) {
4944		if (tp->saved_wolopts) {
4945			rtl_wol_suspend_quirk(tp);
4946			rtl_wol_shutdown_quirk(tp);
4947		}
4948
4949		pci_wake_from_d3(pdev, true);
4950		pci_set_power_state(pdev, PCI_D3hot);
4951	}
4952}
4953
4954static void rtl_remove_one(struct pci_dev *pdev)
4955{
4956	struct rtl8169_private *tp = pci_get_drvdata(pdev);
4957
4958	if (pci_dev_run_wake(pdev))
4959		pm_runtime_get_noresume(&pdev->dev);
4960
 
 
 
 
 
4961	unregister_netdev(tp->dev);
4962
4963	if (r8168_check_dash(tp))
4964		rtl8168_driver_stop(tp);
4965
4966	rtl_release_firmware(tp);
4967
4968	/* restore original MAC address */
4969	rtl_rar_set(tp, tp->dev->perm_addr);
4970}
4971
4972static const struct net_device_ops rtl_netdev_ops = {
4973	.ndo_open		= rtl_open,
4974	.ndo_stop		= rtl8169_close,
4975	.ndo_get_stats64	= rtl8169_get_stats64,
4976	.ndo_start_xmit		= rtl8169_start_xmit,
4977	.ndo_features_check	= rtl8169_features_check,
4978	.ndo_tx_timeout		= rtl8169_tx_timeout,
4979	.ndo_validate_addr	= eth_validate_addr,
4980	.ndo_change_mtu		= rtl8169_change_mtu,
4981	.ndo_fix_features	= rtl8169_fix_features,
4982	.ndo_set_features	= rtl8169_set_features,
4983	.ndo_set_mac_address	= rtl_set_mac_address,
4984	.ndo_do_ioctl		= phy_do_ioctl_running,
4985	.ndo_set_rx_mode	= rtl_set_rx_mode,
4986#ifdef CONFIG_NET_POLL_CONTROLLER
4987	.ndo_poll_controller	= rtl8169_netpoll,
4988#endif
4989
4990};
4991
4992static void rtl_set_irq_mask(struct rtl8169_private *tp)
4993{
4994	tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
4995
4996	if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
4997		tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
4998	else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
4999		/* special workaround needed */
5000		tp->irq_mask |= RxFIFOOver;
5001	else
5002		tp->irq_mask |= RxOverflow;
5003}
5004
5005static int rtl_alloc_irq(struct rtl8169_private *tp)
5006{
5007	unsigned int flags;
5008
5009	switch (tp->mac_version) {
5010	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5011		rtl_unlock_config_regs(tp);
5012		RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
5013		rtl_lock_config_regs(tp);
5014		fallthrough;
5015	case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
5016		flags = PCI_IRQ_LEGACY;
5017		break;
5018	default:
5019		flags = PCI_IRQ_ALL_TYPES;
5020		break;
5021	}
5022
5023	return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
5024}
5025
5026static void rtl_read_mac_address(struct rtl8169_private *tp,
5027				 u8 mac_addr[ETH_ALEN])
5028{
5029	/* Get MAC address */
5030	if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
5031		u32 value = rtl_eri_read(tp, 0xe0);
5032
5033		mac_addr[0] = (value >>  0) & 0xff;
5034		mac_addr[1] = (value >>  8) & 0xff;
5035		mac_addr[2] = (value >> 16) & 0xff;
5036		mac_addr[3] = (value >> 24) & 0xff;
5037
 
 
5038		value = rtl_eri_read(tp, 0xe4);
5039		mac_addr[4] = (value >>  0) & 0xff;
5040		mac_addr[5] = (value >>  8) & 0xff;
5041	} else if (rtl_is_8125(tp)) {
5042		rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
5043	}
5044}
5045
5046DECLARE_RTL_COND(rtl_link_list_ready_cond)
5047{
5048	return RTL_R8(tp, MCU) & LINK_LIST_RDY;
5049}
5050
5051static void r8168g_wait_ll_share_fifo_ready(struct rtl8169_private *tp)
5052{
5053	rtl_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
5054}
5055
5056static int r8169_mdio_read_reg(struct mii_bus *mii_bus, int phyaddr, int phyreg)
5057{
5058	struct rtl8169_private *tp = mii_bus->priv;
5059
5060	if (phyaddr > 0)
5061		return -ENODEV;
5062
5063	return rtl_readphy(tp, phyreg);
5064}
5065
5066static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr,
5067				int phyreg, u16 val)
5068{
5069	struct rtl8169_private *tp = mii_bus->priv;
5070
5071	if (phyaddr > 0)
5072		return -ENODEV;
5073
5074	rtl_writephy(tp, phyreg, val);
5075
5076	return 0;
5077}
5078
5079static int r8169_mdio_register(struct rtl8169_private *tp)
5080{
5081	struct pci_dev *pdev = tp->pci_dev;
5082	struct mii_bus *new_bus;
5083	int ret;
5084
 
 
 
 
 
 
 
 
 
5085	new_bus = devm_mdiobus_alloc(&pdev->dev);
5086	if (!new_bus)
5087		return -ENOMEM;
5088
5089	new_bus->name = "r8169";
5090	new_bus->priv = tp;
5091	new_bus->parent = &pdev->dev;
5092	new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
5093	snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
 
5094
5095	new_bus->read = r8169_mdio_read_reg;
5096	new_bus->write = r8169_mdio_write_reg;
5097
5098	ret = devm_mdiobus_register(&pdev->dev, new_bus);
5099	if (ret)
5100		return ret;
5101
5102	tp->phydev = mdiobus_get_phy(new_bus, 0);
5103	if (!tp->phydev) {
5104		return -ENODEV;
5105	} else if (!tp->phydev->drv) {
5106		/* Most chip versions fail with the genphy driver.
5107		 * Therefore ensure that the dedicated PHY driver is loaded.
5108		 */
5109		dev_err(&pdev->dev, "no dedicated PHY driver found for PHY ID 0x%08x, maybe realtek.ko needs to be added to initramfs?\n",
5110			tp->phydev->phy_id);
5111		return -EUNATCH;
5112	}
5113
 
 
 
 
 
 
 
 
 
 
 
5114	/* PHY will be woken up in rtl_open() */
5115	phy_suspend(tp->phydev);
5116
5117	return 0;
5118}
5119
5120static void rtl_hw_init_8168g(struct rtl8169_private *tp)
5121{
5122	rtl_enable_rxdvgate(tp);
5123
5124	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5125	msleep(1);
5126	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5127
5128	r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5129	r8168g_wait_ll_share_fifo_ready(tp);
5130
5131	r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
5132	r8168g_wait_ll_share_fifo_ready(tp);
5133}
5134
5135static void rtl_hw_init_8125(struct rtl8169_private *tp)
5136{
5137	rtl_enable_rxdvgate(tp);
5138
5139	RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
5140	msleep(1);
5141	RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
5142
5143	r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
5144	r8168g_wait_ll_share_fifo_ready(tp);
5145
5146	r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
5147	r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
5148	r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
5149	r8168g_wait_ll_share_fifo_ready(tp);
5150}
5151
5152static void rtl_hw_initialize(struct rtl8169_private *tp)
5153{
5154	switch (tp->mac_version) {
5155	case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
5156		rtl8168ep_stop_cmac(tp);
5157		fallthrough;
5158	case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
5159		rtl_hw_init_8168g(tp);
5160		break;
5161	case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
5162		rtl_hw_init_8125(tp);
5163		break;
5164	default:
5165		break;
5166	}
5167}
5168
5169static int rtl_jumbo_max(struct rtl8169_private *tp)
5170{
5171	/* Non-GBit versions don't support jumbo frames */
5172	if (!tp->supports_gmii)
5173		return 0;
5174
5175	switch (tp->mac_version) {
5176	/* RTL8169 */
5177	case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
5178		return JUMBO_7K;
5179	/* RTL8168b */
5180	case RTL_GIGA_MAC_VER_11:
5181	case RTL_GIGA_MAC_VER_12:
5182	case RTL_GIGA_MAC_VER_17:
5183		return JUMBO_4K;
5184	/* RTL8168c */
5185	case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
5186		return JUMBO_6K;
5187	default:
5188		return JUMBO_9K;
5189	}
5190}
5191
5192static void rtl_disable_clk(void *data)
5193{
5194	clk_disable_unprepare(data);
5195}
5196
5197static int rtl_get_ether_clk(struct rtl8169_private *tp)
5198{
5199	struct device *d = tp_to_dev(tp);
5200	struct clk *clk;
5201	int rc;
5202
5203	clk = devm_clk_get(d, "ether_clk");
5204	if (IS_ERR(clk)) {
5205		rc = PTR_ERR(clk);
5206		if (rc == -ENOENT)
5207			/* clk-core allows NULL (for suspend / resume) */
5208			rc = 0;
5209		else if (rc != -EPROBE_DEFER)
5210			dev_err(d, "failed to get clk: %d\n", rc);
5211	} else {
5212		tp->clk = clk;
5213		rc = clk_prepare_enable(clk);
5214		if (rc)
5215			dev_err(d, "failed to enable clk: %d\n", rc);
5216		else
5217			rc = devm_add_action_or_reset(d, rtl_disable_clk, clk);
5218	}
5219
5220	return rc;
5221}
5222
5223static void rtl_init_mac_address(struct rtl8169_private *tp)
5224{
 
5225	struct net_device *dev = tp->dev;
5226	u8 *mac_addr = dev->dev_addr;
5227	int rc;
5228
5229	rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
5230	if (!rc)
5231		goto done;
5232
5233	rtl_read_mac_address(tp, mac_addr);
5234	if (is_valid_ether_addr(mac_addr))
5235		goto done;
5236
5237	rtl_read_mac_from_reg(tp, mac_addr, MAC0);
5238	if (is_valid_ether_addr(mac_addr))
5239		goto done;
5240
5241	eth_hw_addr_random(dev);
 
5242	dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
5243done:
 
5244	rtl_rar_set(tp, mac_addr);
5245}
5246
 
 
 
 
 
 
 
 
 
 
5247static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5248{
5249	struct rtl8169_private *tp;
5250	int jumbo_max, region, rc;
5251	enum mac_version chipset;
5252	struct net_device *dev;
 
5253	u16 xid;
5254
5255	dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
5256	if (!dev)
5257		return -ENOMEM;
5258
5259	SET_NETDEV_DEV(dev, &pdev->dev);
5260	dev->netdev_ops = &rtl_netdev_ops;
5261	tp = netdev_priv(dev);
5262	tp->dev = dev;
5263	tp->pci_dev = pdev;
5264	tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1;
5265	tp->eee_adv = -1;
5266	tp->ocp_base = OCP_STD_PHY_BASE;
5267
 
 
 
5268	/* Get the *optional* external "ether_clk" used on some boards */
5269	rc = rtl_get_ether_clk(tp);
5270	if (rc)
5271		return rc;
5272
5273	/* Disable ASPM completely as that cause random device stop working
5274	 * problems as well as full system hangs for some PCIe devices users.
5275	 */
5276	rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
5277					  PCIE_LINK_STATE_L1);
5278	tp->aspm_manageable = !rc;
5279
5280	/* enable device (incl. PCI PM wakeup and hotplug setup) */
5281	rc = pcim_enable_device(pdev);
5282	if (rc < 0) {
5283		dev_err(&pdev->dev, "enable failure\n");
5284		return rc;
5285	}
5286
5287	if (pcim_set_mwi(pdev) < 0)
5288		dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n");
5289
5290	/* use first MMIO region */
5291	region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1;
5292	if (region < 0) {
5293		dev_err(&pdev->dev, "no MMIO resource found\n");
5294		return -ENODEV;
5295	}
5296
5297	/* check for weird/broken PCI region reporting */
5298	if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
5299		dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
5300		return -ENODEV;
5301	}
5302
5303	rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
5304	if (rc < 0) {
5305		dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
5306		return rc;
5307	}
5308
5309	tp->mmio_addr = pcim_iomap_table(pdev)[region];
 
 
5310
5311	xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf;
5312
5313	/* Identify chip attached to board */
5314	chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
5315	if (chipset == RTL_GIGA_MAC_NONE) {
5316		dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
5317		return -ENODEV;
5318	}
 
 
 
 
 
 
 
 
 
 
5319
5320	tp->mac_version = chipset;
 
5321
5322	tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
5323
5324	if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
5325	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
5326		dev->features |= NETIF_F_HIGHDMA;
5327
5328	rtl_init_rxcfg(tp);
5329
5330	rtl8169_irq_mask_and_ack(tp);
5331
5332	rtl_hw_initialize(tp);
5333
5334	rtl_hw_reset(tp);
5335
5336	pci_set_master(pdev);
 
 
5337
5338	rc = rtl_alloc_irq(tp);
5339	if (rc < 0) {
5340		dev_err(&pdev->dev, "Can't allocate interrupt\n");
5341		return rc;
5342	}
5343
5344	INIT_WORK(&tp->wk.work, rtl_task);
5345	u64_stats_init(&tp->rx_stats.syncp);
5346	u64_stats_init(&tp->tx_stats.syncp);
5347
5348	rtl_init_mac_address(tp);
5349
5350	dev->ethtool_ops = &rtl8169_ethtool_ops;
5351
5352	netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
5353
5354	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
5355			   NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5356	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
5357	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5358
5359	/*
5360	 * Pretend we are using VLANs; This bypasses a nasty bug where
5361	 * Interrupts stop flowing on high load on 8110SCd controllers.
5362	 */
5363	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
5364		/* Disallow toggling */
5365		dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
5366
5367	if (rtl_chip_supports_csum_v2(tp))
5368		dev->hw_features |= NETIF_F_IPV6_CSUM;
5369
5370	dev->features |= dev->hw_features;
5371
5372	/* There has been a number of reports that using SG/TSO results in
5373	 * tx timeouts. However for a lot of people SG/TSO works fine.
5374	 * Therefore disable both features by default, but allow users to
5375	 * enable them. Use at own risk!
5376	 */
5377	if (rtl_chip_supports_csum_v2(tp)) {
5378		dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
5379		dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
5380		dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
5381	} else {
5382		dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
5383		dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
5384		dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
5385	}
5386
 
 
 
 
 
 
 
 
 
 
 
5387	dev->hw_features |= NETIF_F_RXALL;
5388	dev->hw_features |= NETIF_F_RXFCS;
5389
 
 
 
 
5390	/* configure chip for default features */
5391	rtl8169_set_features(dev, dev->features);
5392
 
 
 
 
 
 
 
5393	jumbo_max = rtl_jumbo_max(tp);
5394	if (jumbo_max)
5395		dev->max_mtu = jumbo_max;
5396
5397	rtl_set_irq_mask(tp);
5398
5399	tp->fw_name = rtl_chip_infos[chipset].fw_name;
5400
5401	tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
5402					    &tp->counters_phys_addr,
5403					    GFP_KERNEL);
5404	if (!tp->counters)
5405		return -ENOMEM;
5406
5407	pci_set_drvdata(pdev, tp);
5408
5409	rc = r8169_mdio_register(tp);
5410	if (rc)
5411		return rc;
5412
5413	/* chip gets powered up in rtl_open() */
5414	rtl_pll_power_down(tp);
5415
5416	rc = register_netdev(dev);
5417	if (rc)
5418		return rc;
5419
 
 
 
 
 
 
 
5420	netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n",
5421		    rtl_chip_infos[chipset].name, dev->dev_addr, xid,
5422		    pci_irq_vector(pdev, 0));
5423
5424	if (jumbo_max)
5425		netdev_info(dev, "jumbo features [frames: %d bytes, tx checksumming: %s]\n",
5426			    jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
5427			    "ok" : "ko");
5428
5429	if (r8168_check_dash(tp)) {
5430		netdev_info(dev, "DASH enabled\n");
 
5431		rtl8168_driver_start(tp);
5432	}
5433
5434	if (pci_dev_run_wake(pdev))
5435		pm_runtime_put_sync(&pdev->dev);
5436
5437	return 0;
5438}
5439
5440static struct pci_driver rtl8169_pci_driver = {
5441	.name		= MODULENAME,
5442	.id_table	= rtl8169_pci_tbl,
5443	.probe		= rtl_init_one,
5444	.remove		= rtl_remove_one,
5445	.shutdown	= rtl_shutdown,
5446#ifdef CONFIG_PM
5447	.driver.pm	= &rtl8169_pm_ops,
5448#endif
5449};
5450
5451module_pci_driver(rtl8169_pci_driver);