Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1/*
    2 * tg3.c: Broadcom Tigon3 ethernet driver.
    3 *
    4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
    5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
    6 * Copyright (C) 2004 Sun Microsystems Inc.
    7 * Copyright (C) 2005-2016 Broadcom Corporation.
    8 * Copyright (C) 2016-2017 Broadcom Limited.
    9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
   10 * refers to Broadcom Inc. and/or its subsidiaries.
   11 *
   12 * Firmware is:
   13 *	Derived from proprietary unpublished source code,
   14 *	Copyright (C) 2000-2016 Broadcom Corporation.
   15 *	Copyright (C) 2016-2017 Broadcom Ltd.
   16 *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
   17 *	refers to Broadcom Inc. and/or its subsidiaries.
   18 *
   19 *	Permission is hereby granted for the distribution of this firmware
   20 *	data in hexadecimal or equivalent format, provided this copyright
   21 *	notice is accompanying it.
   22 */
   23
   24
   25#include <linux/module.h>
   26#include <linux/moduleparam.h>
   27#include <linux/stringify.h>
   28#include <linux/kernel.h>
   29#include <linux/sched/signal.h>
   30#include <linux/types.h>
   31#include <linux/compiler.h>
   32#include <linux/slab.h>
   33#include <linux/delay.h>
   34#include <linux/in.h>
   35#include <linux/interrupt.h>
   36#include <linux/ioport.h>
   37#include <linux/pci.h>
   38#include <linux/netdevice.h>
   39#include <linux/etherdevice.h>
   40#include <linux/skbuff.h>
   41#include <linux/ethtool.h>
   42#include <linux/mdio.h>
   43#include <linux/mii.h>
   44#include <linux/phy.h>
   45#include <linux/brcmphy.h>
   46#include <linux/if.h>
   47#include <linux/if_vlan.h>
   48#include <linux/ip.h>
   49#include <linux/tcp.h>
   50#include <linux/workqueue.h>
   51#include <linux/prefetch.h>
   52#include <linux/dma-mapping.h>
   53#include <linux/firmware.h>
   54#include <linux/ssb/ssb_driver_gige.h>
   55#include <linux/hwmon.h>
   56#include <linux/hwmon-sysfs.h>
   57#include <linux/crc32poly.h>
   58#include <linux/dmi.h>
   59
   60#include <net/checksum.h>
   61#include <net/gso.h>
   62#include <net/ip.h>
   63
   64#include <linux/io.h>
   65#include <asm/byteorder.h>
   66#include <linux/uaccess.h>
   67
   68#include <uapi/linux/net_tstamp.h>
   69#include <linux/ptp_clock_kernel.h>
   70
   71#define BAR_0	0
   72#define BAR_2	2
   73
   74#include "tg3.h"
   75
   76/* Functions & macros to verify TG3_FLAGS types */
   77
   78static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
   79{
   80	return test_bit(flag, bits);
   81}
   82
   83static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
   84{
   85	set_bit(flag, bits);
   86}
   87
   88static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
   89{
   90	clear_bit(flag, bits);
   91}
   92
   93#define tg3_flag(tp, flag)				\
   94	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
   95#define tg3_flag_set(tp, flag)				\
   96	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
   97#define tg3_flag_clear(tp, flag)			\
   98	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
   99
  100#define DRV_MODULE_NAME		"tg3"
  101/* DO NOT UPDATE TG3_*_NUM defines */
  102#define TG3_MAJ_NUM			3
  103#define TG3_MIN_NUM			137
  104
  105#define RESET_KIND_SHUTDOWN	0
  106#define RESET_KIND_INIT		1
  107#define RESET_KIND_SUSPEND	2
  108
  109#define TG3_DEF_RX_MODE		0
  110#define TG3_DEF_TX_MODE		0
  111#define TG3_DEF_MSG_ENABLE	  \
  112	(NETIF_MSG_DRV		| \
  113	 NETIF_MSG_PROBE	| \
  114	 NETIF_MSG_LINK		| \
  115	 NETIF_MSG_TIMER	| \
  116	 NETIF_MSG_IFDOWN	| \
  117	 NETIF_MSG_IFUP		| \
  118	 NETIF_MSG_RX_ERR	| \
  119	 NETIF_MSG_TX_ERR)
  120
  121#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
  122
  123/* length of time before we decide the hardware is borked,
  124 * and dev->tx_timeout() should be called to fix the problem
  125 */
  126
  127#define TG3_TX_TIMEOUT			(5 * HZ)
  128
  129/* hardware minimum and maximum for a single frame's data payload */
  130#define TG3_MIN_MTU			ETH_ZLEN
  131#define TG3_MAX_MTU(tp)	\
  132	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
  133
  134/* These numbers seem to be hard coded in the NIC firmware somehow.
  135 * You can't change the ring sizes, but you can change where you place
  136 * them in the NIC onboard memory.
  137 */
  138#define TG3_RX_STD_RING_SIZE(tp) \
  139	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  140	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
  141#define TG3_DEF_RX_RING_PENDING		200
  142#define TG3_RX_JMB_RING_SIZE(tp) \
  143	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  144	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
  145#define TG3_DEF_RX_JUMBO_RING_PENDING	100
  146
  147/* Do not place this n-ring entries value into the tp struct itself,
  148 * we really want to expose these constants to GCC so that modulo et
  149 * al.  operations are done with shifts and masks instead of with
  150 * hw multiply/modulo instructions.  Another solution would be to
  151 * replace things like '% foo' with '& (foo - 1)'.
  152 */
  153
  154#define TG3_TX_RING_SIZE		512
  155#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
  156
  157#define TG3_RX_STD_RING_BYTES(tp) \
  158	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
  159#define TG3_RX_JMB_RING_BYTES(tp) \
  160	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
  161#define TG3_RX_RCB_RING_BYTES(tp) \
  162	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
  163#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
  164				 TG3_TX_RING_SIZE)
  165#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
  166
  167#define TG3_DMA_BYTE_ENAB		64
  168
  169#define TG3_RX_STD_DMA_SZ		1536
  170#define TG3_RX_JMB_DMA_SZ		9046
  171
  172#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
  173
  174#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
  175#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
  176
  177#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
  178	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
  179
  180#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
  181	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
  182
  183/* Due to a hardware bug, the 5701 can only DMA to memory addresses
  184 * that are at least dword aligned when used in PCIX mode.  The driver
  185 * works around this bug by double copying the packet.  This workaround
  186 * is built into the normal double copy length check for efficiency.
  187 *
  188 * However, the double copy is only necessary on those architectures
  189 * where unaligned memory accesses are inefficient.  For those architectures
  190 * where unaligned memory accesses incur little penalty, we can reintegrate
  191 * the 5701 in the normal rx path.  Doing so saves a device structure
  192 * dereference by hardcoding the double copy threshold in place.
  193 */
  194#define TG3_RX_COPY_THRESHOLD		256
  195#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  196	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
  197#else
  198	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
  199#endif
  200
  201#if (NET_IP_ALIGN != 0)
  202#define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
  203#else
  204#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
  205#endif
  206
  207/* minimum number of free TX descriptors required to wake up TX process */
  208#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
  209#define TG3_TX_BD_DMA_MAX_2K		2048
  210#define TG3_TX_BD_DMA_MAX_4K		4096
  211
  212#define TG3_RAW_IP_ALIGN 2
  213
  214#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
  215#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
  216
  217#define TG3_FW_UPDATE_TIMEOUT_SEC	5
  218#define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
  219
  220#define FIRMWARE_TG3		"tigon/tg3.bin"
  221#define FIRMWARE_TG357766	"tigon/tg357766.bin"
  222#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
  223#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
  224
  225MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
  226MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
  227MODULE_LICENSE("GPL");
  228MODULE_FIRMWARE(FIRMWARE_TG3);
  229MODULE_FIRMWARE(FIRMWARE_TG357766);
  230MODULE_FIRMWARE(FIRMWARE_TG3TSO);
  231MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
  232
  233static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
  234module_param(tg3_debug, int, 0);
  235MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
  236
  237#define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
  238#define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
  239
  240static const struct pci_device_id tg3_pci_tbl[] = {
  241	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
  242	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
  243	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
  244	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
  245	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
  246	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
  247	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
  248	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
  249	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
  250	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
  251	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
  252	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
  253	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
  254	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
  255	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
  256	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
  257	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
  258	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
  259	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
  260	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  261			TG3_DRV_DATA_FLAG_5705_10_100},
  262	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
  263	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  264			TG3_DRV_DATA_FLAG_5705_10_100},
  265	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
  266	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
  267	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
  268			TG3_DRV_DATA_FLAG_5705_10_100},
  269	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
  270	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
  271	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
  272	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
  273	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
  274	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
  275	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  276	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
  277	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
  278	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
  279	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
  280	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
  281	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  282	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
  283	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
  284	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
  285	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
  286	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
  287	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
  288	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
  289	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
  290			PCI_VENDOR_ID_LENOVO,
  291			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
  292	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  293	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
  294	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
  295	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  296	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
  297	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
  298	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
  299	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
  300	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
  301	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
  302	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
  303	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
  304	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
  305	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
  306	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
  307	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
  308	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
  309	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
  310	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
  311	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
  312	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
  313	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
  314	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
  315			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
  316	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  317	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
  318			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
  319	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  320	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
  321	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
  322	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
  323	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  324	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
  325	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
  326	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
  327	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
  328	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
  329	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
  330	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
  331	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
  332	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
  333	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  334	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
  335	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
  336	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
  337	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
  338	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
  339	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
  340	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
  341	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
  342	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
  343	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
  344	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
  345	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
  346	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
  347	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
  348	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
  349	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
  350	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
  351	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
  352	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
  353	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
  354	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
  355	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
  356	{}
  357};
  358
  359MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
  360
  361static const struct {
  362	const char string[ETH_GSTRING_LEN];
  363} ethtool_stats_keys[] = {
  364	{ "rx_octets" },
  365	{ "rx_fragments" },
  366	{ "rx_ucast_packets" },
  367	{ "rx_mcast_packets" },
  368	{ "rx_bcast_packets" },
  369	{ "rx_fcs_errors" },
  370	{ "rx_align_errors" },
  371	{ "rx_xon_pause_rcvd" },
  372	{ "rx_xoff_pause_rcvd" },
  373	{ "rx_mac_ctrl_rcvd" },
  374	{ "rx_xoff_entered" },
  375	{ "rx_frame_too_long_errors" },
  376	{ "rx_jabbers" },
  377	{ "rx_undersize_packets" },
  378	{ "rx_in_length_errors" },
  379	{ "rx_out_length_errors" },
  380	{ "rx_64_or_less_octet_packets" },
  381	{ "rx_65_to_127_octet_packets" },
  382	{ "rx_128_to_255_octet_packets" },
  383	{ "rx_256_to_511_octet_packets" },
  384	{ "rx_512_to_1023_octet_packets" },
  385	{ "rx_1024_to_1522_octet_packets" },
  386	{ "rx_1523_to_2047_octet_packets" },
  387	{ "rx_2048_to_4095_octet_packets" },
  388	{ "rx_4096_to_8191_octet_packets" },
  389	{ "rx_8192_to_9022_octet_packets" },
  390
  391	{ "tx_octets" },
  392	{ "tx_collisions" },
  393
  394	{ "tx_xon_sent" },
  395	{ "tx_xoff_sent" },
  396	{ "tx_flow_control" },
  397	{ "tx_mac_errors" },
  398	{ "tx_single_collisions" },
  399	{ "tx_mult_collisions" },
  400	{ "tx_deferred" },
  401	{ "tx_excessive_collisions" },
  402	{ "tx_late_collisions" },
  403	{ "tx_collide_2times" },
  404	{ "tx_collide_3times" },
  405	{ "tx_collide_4times" },
  406	{ "tx_collide_5times" },
  407	{ "tx_collide_6times" },
  408	{ "tx_collide_7times" },
  409	{ "tx_collide_8times" },
  410	{ "tx_collide_9times" },
  411	{ "tx_collide_10times" },
  412	{ "tx_collide_11times" },
  413	{ "tx_collide_12times" },
  414	{ "tx_collide_13times" },
  415	{ "tx_collide_14times" },
  416	{ "tx_collide_15times" },
  417	{ "tx_ucast_packets" },
  418	{ "tx_mcast_packets" },
  419	{ "tx_bcast_packets" },
  420	{ "tx_carrier_sense_errors" },
  421	{ "tx_discards" },
  422	{ "tx_errors" },
  423
  424	{ "dma_writeq_full" },
  425	{ "dma_write_prioq_full" },
  426	{ "rxbds_empty" },
  427	{ "rx_discards" },
  428	{ "rx_errors" },
  429	{ "rx_threshold_hit" },
  430
  431	{ "dma_readq_full" },
  432	{ "dma_read_prioq_full" },
  433	{ "tx_comp_queue_full" },
  434
  435	{ "ring_set_send_prod_index" },
  436	{ "ring_status_update" },
  437	{ "nic_irqs" },
  438	{ "nic_avoided_irqs" },
  439	{ "nic_tx_threshold_hit" },
  440
  441	{ "mbuf_lwm_thresh_hit" },
  442};
  443
  444#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
  445#define TG3_NVRAM_TEST		0
  446#define TG3_LINK_TEST		1
  447#define TG3_REGISTER_TEST	2
  448#define TG3_MEMORY_TEST		3
  449#define TG3_MAC_LOOPB_TEST	4
  450#define TG3_PHY_LOOPB_TEST	5
  451#define TG3_EXT_LOOPB_TEST	6
  452#define TG3_INTERRUPT_TEST	7
  453
  454
  455static const struct {
  456	const char string[ETH_GSTRING_LEN];
  457} ethtool_test_keys[] = {
  458	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
  459	[TG3_LINK_TEST]		= { "link test         (online) " },
  460	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
  461	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
  462	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
  463	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
  464	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
  465	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
  466};
  467
  468#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
  469
  470
  471static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
  472{
  473	writel(val, tp->regs + off);
  474}
  475
  476static u32 tg3_read32(struct tg3 *tp, u32 off)
  477{
  478	return readl(tp->regs + off);
  479}
  480
  481static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
  482{
  483	writel(val, tp->aperegs + off);
  484}
  485
  486static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
  487{
  488	return readl(tp->aperegs + off);
  489}
  490
  491static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  492{
  493	unsigned long flags;
  494
  495	spin_lock_irqsave(&tp->indirect_lock, flags);
  496	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  497	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  498	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  499}
  500
  501static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
  502{
  503	writel(val, tp->regs + off);
  504	readl(tp->regs + off);
  505}
  506
  507static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  508{
  509	unsigned long flags;
  510	u32 val;
  511
  512	spin_lock_irqsave(&tp->indirect_lock, flags);
  513	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  514	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  515	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  516	return val;
  517}
  518
  519static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  520{
  521	unsigned long flags;
  522
  523	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  524		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  525				       TG3_64BIT_REG_LOW, val);
  526		return;
  527	}
  528	if (off == TG3_RX_STD_PROD_IDX_REG) {
  529		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  530				       TG3_64BIT_REG_LOW, val);
  531		return;
  532	}
  533
  534	spin_lock_irqsave(&tp->indirect_lock, flags);
  535	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  536	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  537	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  538
  539	/* In indirect mode when disabling interrupts, we also need
  540	 * to clear the interrupt bit in the GRC local ctrl register.
  541	 */
  542	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  543	    (val == 0x1)) {
  544		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  545				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  546	}
  547}
  548
  549static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  550{
  551	unsigned long flags;
  552	u32 val;
  553
  554	spin_lock_irqsave(&tp->indirect_lock, flags);
  555	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  556	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  557	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  558	return val;
  559}
  560
  561/* usec_wait specifies the wait time in usec when writing to certain registers
  562 * where it is unsafe to read back the register without some delay.
  563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  565 */
  566static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  567{
  568	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
  569		/* Non-posted methods */
  570		tp->write32(tp, off, val);
  571	else {
  572		/* Posted method */
  573		tg3_write32(tp, off, val);
  574		if (usec_wait)
  575			udelay(usec_wait);
  576		tp->read32(tp, off);
  577	}
  578	/* Wait again after the read for the posted method to guarantee that
  579	 * the wait time is met.
  580	 */
  581	if (usec_wait)
  582		udelay(usec_wait);
  583}
  584
  585static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
  586{
  587	tp->write32_mbox(tp, off, val);
  588	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
  589	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
  590	     !tg3_flag(tp, ICH_WORKAROUND)))
  591		tp->read32_mbox(tp, off);
  592}
  593
  594static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
  595{
  596	void __iomem *mbox = tp->regs + off;
  597	writel(val, mbox);
  598	if (tg3_flag(tp, TXD_MBOX_HWBUG))
  599		writel(val, mbox);
  600	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
  601	    tg3_flag(tp, FLUSH_POSTED_WRITES))
  602		readl(mbox);
  603}
  604
  605static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  606{
  607	return readl(tp->regs + off + GRCMBOX_BASE);
  608}
  609
  610static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  611{
  612	writel(val, tp->regs + off + GRCMBOX_BASE);
  613}
  614
  615#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
  616#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
  617#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
  618#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
  619#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
  620
  621#define tw32(reg, val)			tp->write32(tp, reg, val)
  622#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
  623#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
  624#define tr32(reg)			tp->read32(tp, reg)
  625
  626static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  627{
  628	unsigned long flags;
  629
  630	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
  631	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  632		return;
  633
  634	spin_lock_irqsave(&tp->indirect_lock, flags);
  635	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  636		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  637		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  638
  639		/* Always leave this as zero. */
  640		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  641	} else {
  642		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  643		tw32_f(TG3PCI_MEM_WIN_DATA, val);
  644
  645		/* Always leave this as zero. */
  646		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  647	}
  648	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  649}
  650
  651static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  652{
  653	unsigned long flags;
  654
  655	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
  656	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  657		*val = 0;
  658		return;
  659	}
  660
  661	spin_lock_irqsave(&tp->indirect_lock, flags);
  662	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  663		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  664		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  665
  666		/* Always leave this as zero. */
  667		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  668	} else {
  669		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  670		*val = tr32(TG3PCI_MEM_WIN_DATA);
  671
  672		/* Always leave this as zero. */
  673		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  674	}
  675	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  676}
  677
  678static void tg3_ape_lock_init(struct tg3 *tp)
  679{
  680	int i;
  681	u32 regbase, bit;
  682
  683	if (tg3_asic_rev(tp) == ASIC_REV_5761)
  684		regbase = TG3_APE_LOCK_GRANT;
  685	else
  686		regbase = TG3_APE_PER_LOCK_GRANT;
  687
  688	/* Make sure the driver hasn't any stale locks. */
  689	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
  690		switch (i) {
  691		case TG3_APE_LOCK_PHY0:
  692		case TG3_APE_LOCK_PHY1:
  693		case TG3_APE_LOCK_PHY2:
  694		case TG3_APE_LOCK_PHY3:
  695			bit = APE_LOCK_GRANT_DRIVER;
  696			break;
  697		default:
  698			if (!tp->pci_fn)
  699				bit = APE_LOCK_GRANT_DRIVER;
  700			else
  701				bit = 1 << tp->pci_fn;
  702		}
  703		tg3_ape_write32(tp, regbase + 4 * i, bit);
  704	}
  705
  706}
  707
  708static int tg3_ape_lock(struct tg3 *tp, int locknum)
  709{
  710	int i, off;
  711	int ret = 0;
  712	u32 status, req, gnt, bit;
  713
  714	if (!tg3_flag(tp, ENABLE_APE))
  715		return 0;
  716
  717	switch (locknum) {
  718	case TG3_APE_LOCK_GPIO:
  719		if (tg3_asic_rev(tp) == ASIC_REV_5761)
  720			return 0;
  721		fallthrough;
  722	case TG3_APE_LOCK_GRC:
  723	case TG3_APE_LOCK_MEM:
  724		if (!tp->pci_fn)
  725			bit = APE_LOCK_REQ_DRIVER;
  726		else
  727			bit = 1 << tp->pci_fn;
  728		break;
  729	case TG3_APE_LOCK_PHY0:
  730	case TG3_APE_LOCK_PHY1:
  731	case TG3_APE_LOCK_PHY2:
  732	case TG3_APE_LOCK_PHY3:
  733		bit = APE_LOCK_REQ_DRIVER;
  734		break;
  735	default:
  736		return -EINVAL;
  737	}
  738
  739	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
  740		req = TG3_APE_LOCK_REQ;
  741		gnt = TG3_APE_LOCK_GRANT;
  742	} else {
  743		req = TG3_APE_PER_LOCK_REQ;
  744		gnt = TG3_APE_PER_LOCK_GRANT;
  745	}
  746
  747	off = 4 * locknum;
  748
  749	tg3_ape_write32(tp, req + off, bit);
  750
  751	/* Wait for up to 1 millisecond to acquire lock. */
  752	for (i = 0; i < 100; i++) {
  753		status = tg3_ape_read32(tp, gnt + off);
  754		if (status == bit)
  755			break;
  756		if (pci_channel_offline(tp->pdev))
  757			break;
  758
  759		udelay(10);
  760	}
  761
  762	if (status != bit) {
  763		/* Revoke the lock request. */
  764		tg3_ape_write32(tp, gnt + off, bit);
  765		ret = -EBUSY;
  766	}
  767
  768	return ret;
  769}
  770
  771static void tg3_ape_unlock(struct tg3 *tp, int locknum)
  772{
  773	u32 gnt, bit;
  774
  775	if (!tg3_flag(tp, ENABLE_APE))
  776		return;
  777
  778	switch (locknum) {
  779	case TG3_APE_LOCK_GPIO:
  780		if (tg3_asic_rev(tp) == ASIC_REV_5761)
  781			return;
  782		fallthrough;
  783	case TG3_APE_LOCK_GRC:
  784	case TG3_APE_LOCK_MEM:
  785		if (!tp->pci_fn)
  786			bit = APE_LOCK_GRANT_DRIVER;
  787		else
  788			bit = 1 << tp->pci_fn;
  789		break;
  790	case TG3_APE_LOCK_PHY0:
  791	case TG3_APE_LOCK_PHY1:
  792	case TG3_APE_LOCK_PHY2:
  793	case TG3_APE_LOCK_PHY3:
  794		bit = APE_LOCK_GRANT_DRIVER;
  795		break;
  796	default:
  797		return;
  798	}
  799
  800	if (tg3_asic_rev(tp) == ASIC_REV_5761)
  801		gnt = TG3_APE_LOCK_GRANT;
  802	else
  803		gnt = TG3_APE_PER_LOCK_GRANT;
  804
  805	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
  806}
  807
  808static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
  809{
  810	u32 apedata;
  811
  812	while (timeout_us) {
  813		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
  814			return -EBUSY;
  815
  816		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  817		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  818			break;
  819
  820		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  821
  822		udelay(10);
  823		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
  824	}
  825
  826	return timeout_us ? 0 : -EBUSY;
  827}
  828
  829#ifdef CONFIG_TIGON3_HWMON
  830static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
  831{
  832	u32 i, apedata;
  833
  834	for (i = 0; i < timeout_us / 10; i++) {
  835		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  836
  837		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  838			break;
  839
  840		udelay(10);
  841	}
  842
  843	return i == timeout_us / 10;
  844}
  845
  846static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
  847				   u32 len)
  848{
  849	int err;
  850	u32 i, bufoff, msgoff, maxlen, apedata;
  851
  852	if (!tg3_flag(tp, APE_HAS_NCSI))
  853		return 0;
  854
  855	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  856	if (apedata != APE_SEG_SIG_MAGIC)
  857		return -ENODEV;
  858
  859	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  860	if (!(apedata & APE_FW_STATUS_READY))
  861		return -EAGAIN;
  862
  863	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
  864		 TG3_APE_SHMEM_BASE;
  865	msgoff = bufoff + 2 * sizeof(u32);
  866	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
  867
  868	while (len) {
  869		u32 length;
  870
  871		/* Cap xfer sizes to scratchpad limits. */
  872		length = (len > maxlen) ? maxlen : len;
  873		len -= length;
  874
  875		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  876		if (!(apedata & APE_FW_STATUS_READY))
  877			return -EAGAIN;
  878
  879		/* Wait for up to 1 msec for APE to service previous event. */
  880		err = tg3_ape_event_lock(tp, 1000);
  881		if (err)
  882			return err;
  883
  884		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
  885			  APE_EVENT_STATUS_SCRTCHPD_READ |
  886			  APE_EVENT_STATUS_EVENT_PENDING;
  887		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
  888
  889		tg3_ape_write32(tp, bufoff, base_off);
  890		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
  891
  892		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  893		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  894
  895		base_off += length;
  896
  897		if (tg3_ape_wait_for_event(tp, 30000))
  898			return -EAGAIN;
  899
  900		for (i = 0; length; i += 4, length -= 4) {
  901			u32 val = tg3_ape_read32(tp, msgoff + i);
  902			memcpy(data, &val, sizeof(u32));
  903			data++;
  904		}
  905	}
  906
  907	return 0;
  908}
  909#endif
  910
  911static int tg3_ape_send_event(struct tg3 *tp, u32 event)
  912{
  913	int err;
  914	u32 apedata;
  915
  916	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  917	if (apedata != APE_SEG_SIG_MAGIC)
  918		return -EAGAIN;
  919
  920	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  921	if (!(apedata & APE_FW_STATUS_READY))
  922		return -EAGAIN;
  923
  924	/* Wait for up to 20 millisecond for APE to service previous event. */
  925	err = tg3_ape_event_lock(tp, 20000);
  926	if (err)
  927		return err;
  928
  929	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
  930			event | APE_EVENT_STATUS_EVENT_PENDING);
  931
  932	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  933	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  934
  935	return 0;
  936}
  937
  938static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
  939{
  940	u32 event;
  941	u32 apedata;
  942
  943	if (!tg3_flag(tp, ENABLE_APE))
  944		return;
  945
  946	switch (kind) {
  947	case RESET_KIND_INIT:
  948		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
  949		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
  950				APE_HOST_SEG_SIG_MAGIC);
  951		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
  952				APE_HOST_SEG_LEN_MAGIC);
  953		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
  954		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
  955		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
  956			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
  957		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
  958				APE_HOST_BEHAV_NO_PHYLOCK);
  959		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
  960				    TG3_APE_HOST_DRVR_STATE_START);
  961
  962		event = APE_EVENT_STATUS_STATE_START;
  963		break;
  964	case RESET_KIND_SHUTDOWN:
  965		if (device_may_wakeup(&tp->pdev->dev) &&
  966		    tg3_flag(tp, WOL_ENABLE)) {
  967			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
  968					    TG3_APE_HOST_WOL_SPEED_AUTO);
  969			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
  970		} else
  971			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
  972
  973		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
  974
  975		event = APE_EVENT_STATUS_STATE_UNLOAD;
  976		break;
  977	default:
  978		return;
  979	}
  980
  981	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
  982
  983	tg3_ape_send_event(tp, event);
  984}
  985
  986static void tg3_send_ape_heartbeat(struct tg3 *tp,
  987				   unsigned long interval)
  988{
  989	/* Check if hb interval has exceeded */
  990	if (!tg3_flag(tp, ENABLE_APE) ||
  991	    time_before(jiffies, tp->ape_hb_jiffies + interval))
  992		return;
  993
  994	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
  995	tp->ape_hb_jiffies = jiffies;
  996}
  997
  998static void tg3_disable_ints(struct tg3 *tp)
  999{
 1000	int i;
 1001
 1002	tw32(TG3PCI_MISC_HOST_CTRL,
 1003	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
 1004	for (i = 0; i < tp->irq_max; i++)
 1005		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
 1006}
 1007
 1008static void tg3_enable_ints(struct tg3 *tp)
 1009{
 1010	int i;
 1011
 1012	tp->irq_sync = 0;
 1013	wmb();
 1014
 1015	tw32(TG3PCI_MISC_HOST_CTRL,
 1016	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
 1017
 1018	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
 1019	for (i = 0; i < tp->irq_cnt; i++) {
 1020		struct tg3_napi *tnapi = &tp->napi[i];
 1021
 1022		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 1023		if (tg3_flag(tp, 1SHOT_MSI))
 1024			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 1025
 1026		tp->coal_now |= tnapi->coal_now;
 1027	}
 1028
 1029	/* Force an initial interrupt */
 1030	if (!tg3_flag(tp, TAGGED_STATUS) &&
 1031	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
 1032		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
 1033	else
 1034		tw32(HOSTCC_MODE, tp->coal_now);
 1035
 1036	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
 1037}
 1038
 1039static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
 1040{
 1041	struct tg3 *tp = tnapi->tp;
 1042	struct tg3_hw_status *sblk = tnapi->hw_status;
 1043	unsigned int work_exists = 0;
 1044
 1045	/* check for phy events */
 1046	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
 1047		if (sblk->status & SD_STATUS_LINK_CHG)
 1048			work_exists = 1;
 1049	}
 1050
 1051	/* check for TX work to do */
 1052	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
 1053		work_exists = 1;
 1054
 1055	/* check for RX work to do */
 1056	if (tnapi->rx_rcb_prod_idx &&
 1057	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 1058		work_exists = 1;
 1059
 1060	return work_exists;
 1061}
 1062
 1063/* tg3_int_reenable
 1064 *  similar to tg3_enable_ints, but it accurately determines whether there
 1065 *  is new work pending and can return without flushing the PIO write
 1066 *  which reenables interrupts
 1067 */
 1068static void tg3_int_reenable(struct tg3_napi *tnapi)
 1069{
 1070	struct tg3 *tp = tnapi->tp;
 1071
 1072	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 1073
 1074	/* When doing tagged status, this work check is unnecessary.
 1075	 * The last_tag we write above tells the chip which piece of
 1076	 * work we've completed.
 1077	 */
 1078	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
 1079		tw32(HOSTCC_MODE, tp->coalesce_mode |
 1080		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
 1081}
 1082
 1083static void tg3_switch_clocks(struct tg3 *tp)
 1084{
 1085	u32 clock_ctrl;
 1086	u32 orig_clock_ctrl;
 1087
 1088	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
 1089		return;
 1090
 1091	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
 1092
 1093	orig_clock_ctrl = clock_ctrl;
 1094	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
 1095		       CLOCK_CTRL_CLKRUN_OENABLE |
 1096		       0x1f);
 1097	tp->pci_clock_ctrl = clock_ctrl;
 1098
 1099	if (tg3_flag(tp, 5705_PLUS)) {
 1100		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
 1101			tw32_wait_f(TG3PCI_CLOCK_CTRL,
 1102				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
 1103		}
 1104	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
 1105		tw32_wait_f(TG3PCI_CLOCK_CTRL,
 1106			    clock_ctrl |
 1107			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
 1108			    40);
 1109		tw32_wait_f(TG3PCI_CLOCK_CTRL,
 1110			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
 1111			    40);
 1112	}
 1113	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
 1114}
 1115
 1116#define PHY_BUSY_LOOPS	5000
 1117
 1118static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
 1119			 u32 *val)
 1120{
 1121	u32 frame_val;
 1122	unsigned int loops;
 1123	int ret;
 1124
 1125	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1126		tw32_f(MAC_MI_MODE,
 1127		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 1128		udelay(80);
 1129	}
 1130
 1131	tg3_ape_lock(tp, tp->phy_ape_lock);
 1132
 1133	*val = 0x0;
 1134
 1135	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 1136		      MI_COM_PHY_ADDR_MASK);
 1137	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 1138		      MI_COM_REG_ADDR_MASK);
 1139	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
 1140
 1141	tw32_f(MAC_MI_COM, frame_val);
 1142
 1143	loops = PHY_BUSY_LOOPS;
 1144	while (loops != 0) {
 1145		udelay(10);
 1146		frame_val = tr32(MAC_MI_COM);
 1147
 1148		if ((frame_val & MI_COM_BUSY) == 0) {
 1149			udelay(5);
 1150			frame_val = tr32(MAC_MI_COM);
 1151			break;
 1152		}
 1153		loops -= 1;
 1154	}
 1155
 1156	ret = -EBUSY;
 1157	if (loops != 0) {
 1158		*val = frame_val & MI_COM_DATA_MASK;
 1159		ret = 0;
 1160	}
 1161
 1162	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1163		tw32_f(MAC_MI_MODE, tp->mi_mode);
 1164		udelay(80);
 1165	}
 1166
 1167	tg3_ape_unlock(tp, tp->phy_ape_lock);
 1168
 1169	return ret;
 1170}
 1171
 1172static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 1173{
 1174	return __tg3_readphy(tp, tp->phy_addr, reg, val);
 1175}
 1176
 1177static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
 1178			  u32 val)
 1179{
 1180	u32 frame_val;
 1181	unsigned int loops;
 1182	int ret;
 1183
 1184	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 1185	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
 1186		return 0;
 1187
 1188	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1189		tw32_f(MAC_MI_MODE,
 1190		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 1191		udelay(80);
 1192	}
 1193
 1194	tg3_ape_lock(tp, tp->phy_ape_lock);
 1195
 1196	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 1197		      MI_COM_PHY_ADDR_MASK);
 1198	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 1199		      MI_COM_REG_ADDR_MASK);
 1200	frame_val |= (val & MI_COM_DATA_MASK);
 1201	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
 1202
 1203	tw32_f(MAC_MI_COM, frame_val);
 1204
 1205	loops = PHY_BUSY_LOOPS;
 1206	while (loops != 0) {
 1207		udelay(10);
 1208		frame_val = tr32(MAC_MI_COM);
 1209		if ((frame_val & MI_COM_BUSY) == 0) {
 1210			udelay(5);
 1211			frame_val = tr32(MAC_MI_COM);
 1212			break;
 1213		}
 1214		loops -= 1;
 1215	}
 1216
 1217	ret = -EBUSY;
 1218	if (loops != 0)
 1219		ret = 0;
 1220
 1221	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1222		tw32_f(MAC_MI_MODE, tp->mi_mode);
 1223		udelay(80);
 1224	}
 1225
 1226	tg3_ape_unlock(tp, tp->phy_ape_lock);
 1227
 1228	return ret;
 1229}
 1230
 1231static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 1232{
 1233	return __tg3_writephy(tp, tp->phy_addr, reg, val);
 1234}
 1235
 1236static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 1237{
 1238	int err;
 1239
 1240	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 1241	if (err)
 1242		goto done;
 1243
 1244	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 1245	if (err)
 1246		goto done;
 1247
 1248	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 1249			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 1250	if (err)
 1251		goto done;
 1252
 1253	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
 1254
 1255done:
 1256	return err;
 1257}
 1258
 1259static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
 1260{
 1261	int err;
 1262
 1263	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 1264	if (err)
 1265		goto done;
 1266
 1267	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 1268	if (err)
 1269		goto done;
 1270
 1271	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 1272			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 1273	if (err)
 1274		goto done;
 1275
 1276	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
 1277
 1278done:
 1279	return err;
 1280}
 1281
 1282static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
 1283{
 1284	int err;
 1285
 1286	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 1287	if (!err)
 1288		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
 1289
 1290	return err;
 1291}
 1292
 1293static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 1294{
 1295	int err;
 1296
 1297	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 1298	if (!err)
 1299		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
 1300
 1301	return err;
 1302}
 1303
 1304static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
 1305{
 1306	int err;
 1307
 1308	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
 1309			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
 1310			   MII_TG3_AUXCTL_SHDWSEL_MISC);
 1311	if (!err)
 1312		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
 1313
 1314	return err;
 1315}
 1316
 1317static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
 1318{
 1319	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
 1320		set |= MII_TG3_AUXCTL_MISC_WREN;
 1321
 1322	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 1323}
 1324
 1325static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
 1326{
 1327	u32 val;
 1328	int err;
 1329
 1330	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 1331
 1332	if (err)
 1333		return err;
 1334
 1335	if (enable)
 1336		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
 1337	else
 1338		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
 1339
 1340	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 1341				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
 1342
 1343	return err;
 1344}
 1345
 1346static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
 1347{
 1348	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
 1349			    reg | val | MII_TG3_MISC_SHDW_WREN);
 1350}
 1351
 1352static int tg3_bmcr_reset(struct tg3 *tp)
 1353{
 1354	u32 phy_control;
 1355	int limit, err;
 1356
 1357	/* OK, reset it, and poll the BMCR_RESET bit until it
 1358	 * clears or we time out.
 1359	 */
 1360	phy_control = BMCR_RESET;
 1361	err = tg3_writephy(tp, MII_BMCR, phy_control);
 1362	if (err != 0)
 1363		return -EBUSY;
 1364
 1365	limit = 5000;
 1366	while (limit--) {
 1367		err = tg3_readphy(tp, MII_BMCR, &phy_control);
 1368		if (err != 0)
 1369			return -EBUSY;
 1370
 1371		if ((phy_control & BMCR_RESET) == 0) {
 1372			udelay(40);
 1373			break;
 1374		}
 1375		udelay(10);
 1376	}
 1377	if (limit < 0)
 1378		return -EBUSY;
 1379
 1380	return 0;
 1381}
 1382
 1383static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 1384{
 1385	struct tg3 *tp = bp->priv;
 1386	u32 val;
 1387
 1388	spin_lock_bh(&tp->lock);
 1389
 1390	if (__tg3_readphy(tp, mii_id, reg, &val))
 1391		val = -EIO;
 1392
 1393	spin_unlock_bh(&tp->lock);
 1394
 1395	return val;
 1396}
 1397
 1398static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 1399{
 1400	struct tg3 *tp = bp->priv;
 1401	u32 ret = 0;
 1402
 1403	spin_lock_bh(&tp->lock);
 1404
 1405	if (__tg3_writephy(tp, mii_id, reg, val))
 1406		ret = -EIO;
 1407
 1408	spin_unlock_bh(&tp->lock);
 1409
 1410	return ret;
 1411}
 1412
 1413static void tg3_mdio_config_5785(struct tg3 *tp)
 1414{
 1415	u32 val;
 1416	struct phy_device *phydev;
 1417
 1418	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 1419	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 1420	case PHY_ID_BCM50610:
 1421	case PHY_ID_BCM50610M:
 1422		val = MAC_PHYCFG2_50610_LED_MODES;
 1423		break;
 1424	case PHY_ID_BCMAC131:
 1425		val = MAC_PHYCFG2_AC131_LED_MODES;
 1426		break;
 1427	case PHY_ID_RTL8211C:
 1428		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
 1429		break;
 1430	case PHY_ID_RTL8201E:
 1431		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
 1432		break;
 1433	default:
 1434		return;
 1435	}
 1436
 1437	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
 1438		tw32(MAC_PHYCFG2, val);
 1439
 1440		val = tr32(MAC_PHYCFG1);
 1441		val &= ~(MAC_PHYCFG1_RGMII_INT |
 1442			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
 1443		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
 1444		tw32(MAC_PHYCFG1, val);
 1445
 1446		return;
 1447	}
 1448
 1449	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
 1450		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
 1451		       MAC_PHYCFG2_FMODE_MASK_MASK |
 1452		       MAC_PHYCFG2_GMODE_MASK_MASK |
 1453		       MAC_PHYCFG2_ACT_MASK_MASK   |
 1454		       MAC_PHYCFG2_QUAL_MASK_MASK |
 1455		       MAC_PHYCFG2_INBAND_ENABLE;
 1456
 1457	tw32(MAC_PHYCFG2, val);
 1458
 1459	val = tr32(MAC_PHYCFG1);
 1460	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
 1461		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
 1462	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 1463		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1464			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
 1465		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1466			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
 1467	}
 1468	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
 1469	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
 1470	tw32(MAC_PHYCFG1, val);
 1471
 1472	val = tr32(MAC_EXT_RGMII_MODE);
 1473	val &= ~(MAC_RGMII_MODE_RX_INT_B |
 1474		 MAC_RGMII_MODE_RX_QUALITY |
 1475		 MAC_RGMII_MODE_RX_ACTIVITY |
 1476		 MAC_RGMII_MODE_RX_ENG_DET |
 1477		 MAC_RGMII_MODE_TX_ENABLE |
 1478		 MAC_RGMII_MODE_TX_LOWPWR |
 1479		 MAC_RGMII_MODE_TX_RESET);
 1480	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 1481		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1482			val |= MAC_RGMII_MODE_RX_INT_B |
 1483			       MAC_RGMII_MODE_RX_QUALITY |
 1484			       MAC_RGMII_MODE_RX_ACTIVITY |
 1485			       MAC_RGMII_MODE_RX_ENG_DET;
 1486		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1487			val |= MAC_RGMII_MODE_TX_ENABLE |
 1488			       MAC_RGMII_MODE_TX_LOWPWR |
 1489			       MAC_RGMII_MODE_TX_RESET;
 1490	}
 1491	tw32(MAC_EXT_RGMII_MODE, val);
 1492}
 1493
 1494static void tg3_mdio_start(struct tg3 *tp)
 1495{
 1496	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
 1497	tw32_f(MAC_MI_MODE, tp->mi_mode);
 1498	udelay(80);
 1499
 1500	if (tg3_flag(tp, MDIOBUS_INITED) &&
 1501	    tg3_asic_rev(tp) == ASIC_REV_5785)
 1502		tg3_mdio_config_5785(tp);
 1503}
 1504
 1505static int tg3_mdio_init(struct tg3 *tp)
 1506{
 1507	int i;
 1508	u32 reg;
 1509	struct phy_device *phydev;
 1510
 1511	if (tg3_flag(tp, 5717_PLUS)) {
 1512		u32 is_serdes;
 1513
 1514		tp->phy_addr = tp->pci_fn + 1;
 1515
 1516		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
 1517			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
 1518		else
 1519			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
 1520				    TG3_CPMU_PHY_STRAP_IS_SERDES;
 1521		if (is_serdes)
 1522			tp->phy_addr += 7;
 1523	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
 1524		int addr;
 1525
 1526		addr = ssb_gige_get_phyaddr(tp->pdev);
 1527		if (addr < 0)
 1528			return addr;
 1529		tp->phy_addr = addr;
 1530	} else
 1531		tp->phy_addr = TG3_PHY_MII_ADDR;
 1532
 1533	tg3_mdio_start(tp);
 1534
 1535	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
 1536		return 0;
 1537
 1538	tp->mdio_bus = mdiobus_alloc();
 1539	if (tp->mdio_bus == NULL)
 1540		return -ENOMEM;
 1541
 1542	tp->mdio_bus->name     = "tg3 mdio bus";
 1543	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
 1544	tp->mdio_bus->priv     = tp;
 1545	tp->mdio_bus->parent   = &tp->pdev->dev;
 1546	tp->mdio_bus->read     = &tg3_mdio_read;
 1547	tp->mdio_bus->write    = &tg3_mdio_write;
 1548	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
 1549
 1550	/* The bus registration will look for all the PHYs on the mdio bus.
 1551	 * Unfortunately, it does not ensure the PHY is powered up before
 1552	 * accessing the PHY ID registers.  A chip reset is the
 1553	 * quickest way to bring the device back to an operational state..
 1554	 */
 1555	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
 1556		tg3_bmcr_reset(tp);
 1557
 1558	i = mdiobus_register(tp->mdio_bus);
 1559	if (i) {
 1560		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
 1561		mdiobus_free(tp->mdio_bus);
 1562		return i;
 1563	}
 1564
 1565	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 1566
 1567	if (!phydev || !phydev->drv) {
 1568		dev_warn(&tp->pdev->dev, "No PHY devices\n");
 1569		mdiobus_unregister(tp->mdio_bus);
 1570		mdiobus_free(tp->mdio_bus);
 1571		return -ENODEV;
 1572	}
 1573
 1574	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 1575	case PHY_ID_BCM57780:
 1576		phydev->interface = PHY_INTERFACE_MODE_GMII;
 1577		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1578		break;
 1579	case PHY_ID_BCM50610:
 1580	case PHY_ID_BCM50610M:
 1581		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
 1582				     PHY_BRCM_RX_REFCLK_UNUSED |
 1583				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
 1584				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1585		fallthrough;
 1586	case PHY_ID_RTL8211C:
 1587		phydev->interface = PHY_INTERFACE_MODE_RGMII;
 1588		break;
 1589	case PHY_ID_RTL8201E:
 1590	case PHY_ID_BCMAC131:
 1591		phydev->interface = PHY_INTERFACE_MODE_MII;
 1592		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1593		tp->phy_flags |= TG3_PHYFLG_IS_FET;
 1594		break;
 1595	}
 1596
 1597	tg3_flag_set(tp, MDIOBUS_INITED);
 1598
 1599	if (tg3_asic_rev(tp) == ASIC_REV_5785)
 1600		tg3_mdio_config_5785(tp);
 1601
 1602	return 0;
 1603}
 1604
 1605static void tg3_mdio_fini(struct tg3 *tp)
 1606{
 1607	if (tg3_flag(tp, MDIOBUS_INITED)) {
 1608		tg3_flag_clear(tp, MDIOBUS_INITED);
 1609		mdiobus_unregister(tp->mdio_bus);
 1610		mdiobus_free(tp->mdio_bus);
 1611	}
 1612}
 1613
 1614/* tp->lock is held. */
 1615static inline void tg3_generate_fw_event(struct tg3 *tp)
 1616{
 1617	u32 val;
 1618
 1619	val = tr32(GRC_RX_CPU_EVENT);
 1620	val |= GRC_RX_CPU_DRIVER_EVENT;
 1621	tw32_f(GRC_RX_CPU_EVENT, val);
 1622
 1623	tp->last_event_jiffies = jiffies;
 1624}
 1625
 1626#define TG3_FW_EVENT_TIMEOUT_USEC 2500
 1627
 1628/* tp->lock is held. */
 1629static void tg3_wait_for_event_ack(struct tg3 *tp)
 1630{
 1631	int i;
 1632	unsigned int delay_cnt;
 1633	long time_remain;
 1634
 1635	/* If enough time has passed, no wait is necessary. */
 1636	time_remain = (long)(tp->last_event_jiffies + 1 +
 1637		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
 1638		      (long)jiffies;
 1639	if (time_remain < 0)
 1640		return;
 1641
 1642	/* Check if we can shorten the wait time. */
 1643	delay_cnt = jiffies_to_usecs(time_remain);
 1644	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
 1645		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
 1646	delay_cnt = (delay_cnt >> 3) + 1;
 1647
 1648	for (i = 0; i < delay_cnt; i++) {
 1649		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
 1650			break;
 1651		if (pci_channel_offline(tp->pdev))
 1652			break;
 1653
 1654		udelay(8);
 1655	}
 1656}
 1657
 1658/* tp->lock is held. */
 1659static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
 1660{
 1661	u32 reg, val;
 1662
 1663	val = 0;
 1664	if (!tg3_readphy(tp, MII_BMCR, &reg))
 1665		val = reg << 16;
 1666	if (!tg3_readphy(tp, MII_BMSR, &reg))
 1667		val |= (reg & 0xffff);
 1668	*data++ = val;
 1669
 1670	val = 0;
 1671	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
 1672		val = reg << 16;
 1673	if (!tg3_readphy(tp, MII_LPA, &reg))
 1674		val |= (reg & 0xffff);
 1675	*data++ = val;
 1676
 1677	val = 0;
 1678	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
 1679		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
 1680			val = reg << 16;
 1681		if (!tg3_readphy(tp, MII_STAT1000, &reg))
 1682			val |= (reg & 0xffff);
 1683	}
 1684	*data++ = val;
 1685
 1686	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
 1687		val = reg << 16;
 1688	else
 1689		val = 0;
 1690	*data++ = val;
 1691}
 1692
 1693/* tp->lock is held. */
 1694static void tg3_ump_link_report(struct tg3 *tp)
 1695{
 1696	u32 data[4];
 1697
 1698	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
 1699		return;
 1700
 1701	tg3_phy_gather_ump_data(tp, data);
 1702
 1703	tg3_wait_for_event_ack(tp);
 1704
 1705	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
 1706	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
 1707	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
 1708	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
 1709	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
 1710	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
 1711
 1712	tg3_generate_fw_event(tp);
 1713}
 1714
 1715/* tp->lock is held. */
 1716static void tg3_stop_fw(struct tg3 *tp)
 1717{
 1718	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
 1719		/* Wait for RX cpu to ACK the previous event. */
 1720		tg3_wait_for_event_ack(tp);
 1721
 1722		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
 1723
 1724		tg3_generate_fw_event(tp);
 1725
 1726		/* Wait for RX cpu to ACK this event. */
 1727		tg3_wait_for_event_ack(tp);
 1728	}
 1729}
 1730
 1731/* tp->lock is held. */
 1732static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
 1733{
 1734	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
 1735		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
 1736
 1737	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 1738		switch (kind) {
 1739		case RESET_KIND_INIT:
 1740			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1741				      DRV_STATE_START);
 1742			break;
 1743
 1744		case RESET_KIND_SHUTDOWN:
 1745			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1746				      DRV_STATE_UNLOAD);
 1747			break;
 1748
 1749		case RESET_KIND_SUSPEND:
 1750			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1751				      DRV_STATE_SUSPEND);
 1752			break;
 1753
 1754		default:
 1755			break;
 1756		}
 1757	}
 1758}
 1759
 1760/* tp->lock is held. */
 1761static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
 1762{
 1763	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 1764		switch (kind) {
 1765		case RESET_KIND_INIT:
 1766			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1767				      DRV_STATE_START_DONE);
 1768			break;
 1769
 1770		case RESET_KIND_SHUTDOWN:
 1771			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1772				      DRV_STATE_UNLOAD_DONE);
 1773			break;
 1774
 1775		default:
 1776			break;
 1777		}
 1778	}
 1779}
 1780
 1781/* tp->lock is held. */
 1782static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
 1783{
 1784	if (tg3_flag(tp, ENABLE_ASF)) {
 1785		switch (kind) {
 1786		case RESET_KIND_INIT:
 1787			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1788				      DRV_STATE_START);
 1789			break;
 1790
 1791		case RESET_KIND_SHUTDOWN:
 1792			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1793				      DRV_STATE_UNLOAD);
 1794			break;
 1795
 1796		case RESET_KIND_SUSPEND:
 1797			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1798				      DRV_STATE_SUSPEND);
 1799			break;
 1800
 1801		default:
 1802			break;
 1803		}
 1804	}
 1805}
 1806
 1807static int tg3_poll_fw(struct tg3 *tp)
 1808{
 1809	int i;
 1810	u32 val;
 1811
 1812	if (tg3_flag(tp, NO_FWARE_REPORTED))
 1813		return 0;
 1814
 1815	if (tg3_flag(tp, IS_SSB_CORE)) {
 1816		/* We don't use firmware. */
 1817		return 0;
 1818	}
 1819
 1820	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 1821		/* Wait up to 20ms for init done. */
 1822		for (i = 0; i < 200; i++) {
 1823			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
 1824				return 0;
 1825			if (pci_channel_offline(tp->pdev))
 1826				return -ENODEV;
 1827
 1828			udelay(100);
 1829		}
 1830		return -ENODEV;
 1831	}
 1832
 1833	/* Wait for firmware initialization to complete. */
 1834	for (i = 0; i < 100000; i++) {
 1835		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
 1836		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 1837			break;
 1838		if (pci_channel_offline(tp->pdev)) {
 1839			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
 1840				tg3_flag_set(tp, NO_FWARE_REPORTED);
 1841				netdev_info(tp->dev, "No firmware running\n");
 1842			}
 1843
 1844			break;
 1845		}
 1846
 1847		udelay(10);
 1848	}
 1849
 1850	/* Chip might not be fitted with firmware.  Some Sun onboard
 1851	 * parts are configured like that.  So don't signal the timeout
 1852	 * of the above loop as an error, but do report the lack of
 1853	 * running firmware once.
 1854	 */
 1855	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
 1856		tg3_flag_set(tp, NO_FWARE_REPORTED);
 1857
 1858		netdev_info(tp->dev, "No firmware running\n");
 1859	}
 1860
 1861	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
 1862		/* The 57765 A0 needs a little more
 1863		 * time to do some important work.
 1864		 */
 1865		mdelay(10);
 1866	}
 1867
 1868	return 0;
 1869}
 1870
 1871static void tg3_link_report(struct tg3 *tp)
 1872{
 1873	if (!netif_carrier_ok(tp->dev)) {
 1874		netif_info(tp, link, tp->dev, "Link is down\n");
 1875		tg3_ump_link_report(tp);
 1876	} else if (netif_msg_link(tp)) {
 1877		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
 1878			    (tp->link_config.active_speed == SPEED_1000 ?
 1879			     1000 :
 1880			     (tp->link_config.active_speed == SPEED_100 ?
 1881			      100 : 10)),
 1882			    (tp->link_config.active_duplex == DUPLEX_FULL ?
 1883			     "full" : "half"));
 1884
 1885		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
 1886			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
 1887			    "on" : "off",
 1888			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
 1889			    "on" : "off");
 1890
 1891		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
 1892			netdev_info(tp->dev, "EEE is %s\n",
 1893				    tp->setlpicnt ? "enabled" : "disabled");
 1894
 1895		tg3_ump_link_report(tp);
 1896	}
 1897
 1898	tp->link_up = netif_carrier_ok(tp->dev);
 1899}
 1900
 1901static u32 tg3_decode_flowctrl_1000T(u32 adv)
 1902{
 1903	u32 flowctrl = 0;
 1904
 1905	if (adv & ADVERTISE_PAUSE_CAP) {
 1906		flowctrl |= FLOW_CTRL_RX;
 1907		if (!(adv & ADVERTISE_PAUSE_ASYM))
 1908			flowctrl |= FLOW_CTRL_TX;
 1909	} else if (adv & ADVERTISE_PAUSE_ASYM)
 1910		flowctrl |= FLOW_CTRL_TX;
 1911
 1912	return flowctrl;
 1913}
 1914
 1915static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 1916{
 1917	u16 miireg;
 1918
 1919	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
 1920		miireg = ADVERTISE_1000XPAUSE;
 1921	else if (flow_ctrl & FLOW_CTRL_TX)
 1922		miireg = ADVERTISE_1000XPSE_ASYM;
 1923	else if (flow_ctrl & FLOW_CTRL_RX)
 1924		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
 1925	else
 1926		miireg = 0;
 1927
 1928	return miireg;
 1929}
 1930
 1931static u32 tg3_decode_flowctrl_1000X(u32 adv)
 1932{
 1933	u32 flowctrl = 0;
 1934
 1935	if (adv & ADVERTISE_1000XPAUSE) {
 1936		flowctrl |= FLOW_CTRL_RX;
 1937		if (!(adv & ADVERTISE_1000XPSE_ASYM))
 1938			flowctrl |= FLOW_CTRL_TX;
 1939	} else if (adv & ADVERTISE_1000XPSE_ASYM)
 1940		flowctrl |= FLOW_CTRL_TX;
 1941
 1942	return flowctrl;
 1943}
 1944
 1945static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
 1946{
 1947	u8 cap = 0;
 1948
 1949	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
 1950		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
 1951	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
 1952		if (lcladv & ADVERTISE_1000XPAUSE)
 1953			cap = FLOW_CTRL_RX;
 1954		if (rmtadv & ADVERTISE_1000XPAUSE)
 1955			cap = FLOW_CTRL_TX;
 1956	}
 1957
 1958	return cap;
 1959}
 1960
 1961static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
 1962{
 1963	u8 autoneg;
 1964	u8 flowctrl = 0;
 1965	u32 old_rx_mode = tp->rx_mode;
 1966	u32 old_tx_mode = tp->tx_mode;
 1967
 1968	if (tg3_flag(tp, USE_PHYLIB))
 1969		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
 1970	else
 1971		autoneg = tp->link_config.autoneg;
 1972
 1973	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
 1974		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 1975			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
 1976		else
 1977			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
 1978	} else
 1979		flowctrl = tp->link_config.flowctrl;
 1980
 1981	tp->link_config.active_flowctrl = flowctrl;
 1982
 1983	if (flowctrl & FLOW_CTRL_RX)
 1984		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
 1985	else
 1986		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
 1987
 1988	if (old_rx_mode != tp->rx_mode)
 1989		tw32_f(MAC_RX_MODE, tp->rx_mode);
 1990
 1991	if (flowctrl & FLOW_CTRL_TX)
 1992		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
 1993	else
 1994		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
 1995
 1996	if (old_tx_mode != tp->tx_mode)
 1997		tw32_f(MAC_TX_MODE, tp->tx_mode);
 1998}
 1999
 2000static void tg3_adjust_link(struct net_device *dev)
 2001{
 2002	u8 oldflowctrl, linkmesg = 0;
 2003	u32 mac_mode, lcl_adv, rmt_adv;
 2004	struct tg3 *tp = netdev_priv(dev);
 2005	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 2006
 2007	spin_lock_bh(&tp->lock);
 2008
 2009	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
 2010				    MAC_MODE_HALF_DUPLEX);
 2011
 2012	oldflowctrl = tp->link_config.active_flowctrl;
 2013
 2014	if (phydev->link) {
 2015		lcl_adv = 0;
 2016		rmt_adv = 0;
 2017
 2018		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
 2019			mac_mode |= MAC_MODE_PORT_MODE_MII;
 2020		else if (phydev->speed == SPEED_1000 ||
 2021			 tg3_asic_rev(tp) != ASIC_REV_5785)
 2022			mac_mode |= MAC_MODE_PORT_MODE_GMII;
 2023		else
 2024			mac_mode |= MAC_MODE_PORT_MODE_MII;
 2025
 2026		if (phydev->duplex == DUPLEX_HALF)
 2027			mac_mode |= MAC_MODE_HALF_DUPLEX;
 2028		else {
 2029			lcl_adv = mii_advertise_flowctrl(
 2030				  tp->link_config.flowctrl);
 2031
 2032			if (phydev->pause)
 2033				rmt_adv = LPA_PAUSE_CAP;
 2034			if (phydev->asym_pause)
 2035				rmt_adv |= LPA_PAUSE_ASYM;
 2036		}
 2037
 2038		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 2039	} else
 2040		mac_mode |= MAC_MODE_PORT_MODE_GMII;
 2041
 2042	if (mac_mode != tp->mac_mode) {
 2043		tp->mac_mode = mac_mode;
 2044		tw32_f(MAC_MODE, tp->mac_mode);
 2045		udelay(40);
 2046	}
 2047
 2048	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
 2049		if (phydev->speed == SPEED_10)
 2050			tw32(MAC_MI_STAT,
 2051			     MAC_MI_STAT_10MBPS_MODE |
 2052			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 2053		else
 2054			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 2055	}
 2056
 2057	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
 2058		tw32(MAC_TX_LENGTHS,
 2059		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 2060		      (6 << TX_LENGTHS_IPG_SHIFT) |
 2061		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
 2062	else
 2063		tw32(MAC_TX_LENGTHS,
 2064		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 2065		      (6 << TX_LENGTHS_IPG_SHIFT) |
 2066		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
 2067
 2068	if (phydev->link != tp->old_link ||
 2069	    phydev->speed != tp->link_config.active_speed ||
 2070	    phydev->duplex != tp->link_config.active_duplex ||
 2071	    oldflowctrl != tp->link_config.active_flowctrl)
 2072		linkmesg = 1;
 2073
 2074	tp->old_link = phydev->link;
 2075	tp->link_config.active_speed = phydev->speed;
 2076	tp->link_config.active_duplex = phydev->duplex;
 2077
 2078	spin_unlock_bh(&tp->lock);
 2079
 2080	if (linkmesg)
 2081		tg3_link_report(tp);
 2082}
 2083
 2084static int tg3_phy_init(struct tg3 *tp)
 2085{
 2086	struct phy_device *phydev;
 2087
 2088	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
 2089		return 0;
 2090
 2091	/* Bring the PHY back to a known state. */
 2092	tg3_bmcr_reset(tp);
 2093
 2094	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 2095
 2096	/* Attach the MAC to the PHY. */
 2097	phydev = phy_connect(tp->dev, phydev_name(phydev),
 2098			     tg3_adjust_link, phydev->interface);
 2099	if (IS_ERR(phydev)) {
 2100		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
 2101		return PTR_ERR(phydev);
 2102	}
 2103
 2104	/* Mask with MAC supported features. */
 2105	switch (phydev->interface) {
 2106	case PHY_INTERFACE_MODE_GMII:
 2107	case PHY_INTERFACE_MODE_RGMII:
 2108		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 2109			phy_set_max_speed(phydev, SPEED_1000);
 2110			phy_support_asym_pause(phydev);
 2111			break;
 2112		}
 2113		fallthrough;
 2114	case PHY_INTERFACE_MODE_MII:
 2115		phy_set_max_speed(phydev, SPEED_100);
 2116		phy_support_asym_pause(phydev);
 2117		break;
 2118	default:
 2119		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
 2120		return -EINVAL;
 2121	}
 2122
 2123	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
 2124
 2125	phy_attached_info(phydev);
 2126
 2127	return 0;
 2128}
 2129
 2130static void tg3_phy_start(struct tg3 *tp)
 2131{
 2132	struct phy_device *phydev;
 2133
 2134	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 2135		return;
 2136
 2137	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 2138
 2139	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 2140		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 2141		phydev->speed = tp->link_config.speed;
 2142		phydev->duplex = tp->link_config.duplex;
 2143		phydev->autoneg = tp->link_config.autoneg;
 2144		ethtool_convert_legacy_u32_to_link_mode(
 2145			phydev->advertising, tp->link_config.advertising);
 2146	}
 2147
 2148	phy_start(phydev);
 2149
 2150	phy_start_aneg(phydev);
 2151}
 2152
 2153static void tg3_phy_stop(struct tg3 *tp)
 2154{
 2155	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 2156		return;
 2157
 2158	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
 2159}
 2160
 2161static void tg3_phy_fini(struct tg3 *tp)
 2162{
 2163	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 2164		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
 2165		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
 2166	}
 2167}
 2168
 2169static int tg3_phy_set_extloopbk(struct tg3 *tp)
 2170{
 2171	int err;
 2172	u32 val;
 2173
 2174	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 2175		return 0;
 2176
 2177	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 2178		/* Cannot do read-modify-write on 5401 */
 2179		err = tg3_phy_auxctl_write(tp,
 2180					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 2181					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
 2182					   0x4c20);
 2183		goto done;
 2184	}
 2185
 2186	err = tg3_phy_auxctl_read(tp,
 2187				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 2188	if (err)
 2189		return err;
 2190
 2191	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
 2192	err = tg3_phy_auxctl_write(tp,
 2193				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
 2194
 2195done:
 2196	return err;
 2197}
 2198
 2199static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
 2200{
 2201	u32 phytest;
 2202
 2203	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 2204		u32 phy;
 2205
 2206		tg3_writephy(tp, MII_TG3_FET_TEST,
 2207			     phytest | MII_TG3_FET_SHADOW_EN);
 2208		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
 2209			if (enable)
 2210				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
 2211			else
 2212				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
 2213			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
 2214		}
 2215		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 2216	}
 2217}
 2218
 2219static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
 2220{
 2221	u32 reg;
 2222
 2223	if (!tg3_flag(tp, 5705_PLUS) ||
 2224	    (tg3_flag(tp, 5717_PLUS) &&
 2225	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 2226		return;
 2227
 2228	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 2229		tg3_phy_fet_toggle_apd(tp, enable);
 2230		return;
 2231	}
 2232
 2233	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
 2234	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
 2235	      MII_TG3_MISC_SHDW_SCR5_SDTL |
 2236	      MII_TG3_MISC_SHDW_SCR5_C125OE;
 2237	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
 2238		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 2239
 2240	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
 2241
 2242
 2243	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
 2244	if (enable)
 2245		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 2246
 2247	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
 2248}
 2249
 2250static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
 2251{
 2252	u32 phy;
 2253
 2254	if (!tg3_flag(tp, 5705_PLUS) ||
 2255	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 2256		return;
 2257
 2258	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 2259		u32 ephy;
 2260
 2261		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
 2262			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
 2263
 2264			tg3_writephy(tp, MII_TG3_FET_TEST,
 2265				     ephy | MII_TG3_FET_SHADOW_EN);
 2266			if (!tg3_readphy(tp, reg, &phy)) {
 2267				if (enable)
 2268					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 2269				else
 2270					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 2271				tg3_writephy(tp, reg, phy);
 2272			}
 2273			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
 2274		}
 2275	} else {
 2276		int ret;
 2277
 2278		ret = tg3_phy_auxctl_read(tp,
 2279					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
 2280		if (!ret) {
 2281			if (enable)
 2282				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 2283			else
 2284				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 2285			tg3_phy_auxctl_write(tp,
 2286					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
 2287		}
 2288	}
 2289}
 2290
 2291static void tg3_phy_set_wirespeed(struct tg3 *tp)
 2292{
 2293	int ret;
 2294	u32 val;
 2295
 2296	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
 2297		return;
 2298
 2299	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
 2300	if (!ret)
 2301		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
 2302				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
 2303}
 2304
 2305static void tg3_phy_apply_otp(struct tg3 *tp)
 2306{
 2307	u32 otp, phy;
 2308
 2309	if (!tp->phy_otp)
 2310		return;
 2311
 2312	otp = tp->phy_otp;
 2313
 2314	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
 2315		return;
 2316
 2317	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
 2318	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
 2319	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
 2320
 2321	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
 2322	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
 2323	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
 2324
 2325	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
 2326	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
 2327	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
 2328
 2329	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
 2330	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
 2331
 2332	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
 2333	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
 2334
 2335	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
 2336	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
 2337	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
 2338
 2339	tg3_phy_toggle_auxctl_smdsp(tp, false);
 2340}
 2341
 2342static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
 2343{
 2344	u32 val;
 2345	struct ethtool_keee *dest = &tp->eee;
 2346
 2347	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 2348		return;
 2349
 2350	if (eee)
 2351		dest = eee;
 2352
 2353	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
 2354		return;
 2355
 2356	/* Pull eee_active */
 2357	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
 2358	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
 2359		dest->eee_active = 1;
 2360	} else
 2361		dest->eee_active = 0;
 2362
 2363	/* Pull lp advertised settings */
 2364	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
 2365		return;
 2366	mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
 2367
 2368	/* Pull advertised and eee_enabled settings */
 2369	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
 2370		return;
 2371	dest->eee_enabled = !!val;
 2372	mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
 2373
 2374	/* Pull tx_lpi_enabled */
 2375	val = tr32(TG3_CPMU_EEE_MODE);
 2376	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
 2377
 2378	/* Pull lpi timer value */
 2379	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
 2380}
 2381
 2382static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
 2383{
 2384	u32 val;
 2385
 2386	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 2387		return;
 2388
 2389	tp->setlpicnt = 0;
 2390
 2391	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 2392	    current_link_up &&
 2393	    tp->link_config.active_duplex == DUPLEX_FULL &&
 2394	    (tp->link_config.active_speed == SPEED_100 ||
 2395	     tp->link_config.active_speed == SPEED_1000)) {
 2396		u32 eeectl;
 2397
 2398		if (tp->link_config.active_speed == SPEED_1000)
 2399			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
 2400		else
 2401			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
 2402
 2403		tw32(TG3_CPMU_EEE_CTRL, eeectl);
 2404
 2405		tg3_eee_pull_config(tp, NULL);
 2406		if (tp->eee.eee_active)
 2407			tp->setlpicnt = 2;
 2408	}
 2409
 2410	if (!tp->setlpicnt) {
 2411		if (current_link_up &&
 2412		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2413			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
 2414			tg3_phy_toggle_auxctl_smdsp(tp, false);
 2415		}
 2416
 2417		val = tr32(TG3_CPMU_EEE_MODE);
 2418		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 2419	}
 2420}
 2421
 2422static void tg3_phy_eee_enable(struct tg3 *tp)
 2423{
 2424	u32 val;
 2425
 2426	if (tp->link_config.active_speed == SPEED_1000 &&
 2427	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2428	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
 2429	     tg3_flag(tp, 57765_CLASS)) &&
 2430	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2431		val = MII_TG3_DSP_TAP26_ALNOKO |
 2432		      MII_TG3_DSP_TAP26_RMRXSTO;
 2433		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 2434		tg3_phy_toggle_auxctl_smdsp(tp, false);
 2435	}
 2436
 2437	val = tr32(TG3_CPMU_EEE_MODE);
 2438	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
 2439}
 2440
 2441static int tg3_wait_macro_done(struct tg3 *tp)
 2442{
 2443	int limit = 100;
 2444
 2445	while (limit--) {
 2446		u32 tmp32;
 2447
 2448		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
 2449			if ((tmp32 & 0x1000) == 0)
 2450				break;
 2451		}
 2452	}
 2453	if (limit < 0)
 2454		return -EBUSY;
 2455
 2456	return 0;
 2457}
 2458
 2459static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
 2460{
 2461	static const u32 test_pat[4][6] = {
 2462	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
 2463	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
 2464	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
 2465	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
 2466	};
 2467	int chan;
 2468
 2469	for (chan = 0; chan < 4; chan++) {
 2470		int i;
 2471
 2472		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2473			     (chan * 0x2000) | 0x0200);
 2474		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 2475
 2476		for (i = 0; i < 6; i++)
 2477			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
 2478				     test_pat[chan][i]);
 2479
 2480		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 2481		if (tg3_wait_macro_done(tp)) {
 2482			*resetp = 1;
 2483			return -EBUSY;
 2484		}
 2485
 2486		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2487			     (chan * 0x2000) | 0x0200);
 2488		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
 2489		if (tg3_wait_macro_done(tp)) {
 2490			*resetp = 1;
 2491			return -EBUSY;
 2492		}
 2493
 2494		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
 2495		if (tg3_wait_macro_done(tp)) {
 2496			*resetp = 1;
 2497			return -EBUSY;
 2498		}
 2499
 2500		for (i = 0; i < 6; i += 2) {
 2501			u32 low, high;
 2502
 2503			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
 2504			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
 2505			    tg3_wait_macro_done(tp)) {
 2506				*resetp = 1;
 2507				return -EBUSY;
 2508			}
 2509			low &= 0x7fff;
 2510			high &= 0x000f;
 2511			if (low != test_pat[chan][i] ||
 2512			    high != test_pat[chan][i+1]) {
 2513				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
 2514				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
 2515				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
 2516
 2517				return -EBUSY;
 2518			}
 2519		}
 2520	}
 2521
 2522	return 0;
 2523}
 2524
 2525static int tg3_phy_reset_chanpat(struct tg3 *tp)
 2526{
 2527	int chan;
 2528
 2529	for (chan = 0; chan < 4; chan++) {
 2530		int i;
 2531
 2532		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2533			     (chan * 0x2000) | 0x0200);
 2534		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 2535		for (i = 0; i < 6; i++)
 2536			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
 2537		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 2538		if (tg3_wait_macro_done(tp))
 2539			return -EBUSY;
 2540	}
 2541
 2542	return 0;
 2543}
 2544
 2545static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
 2546{
 2547	u32 reg32, phy9_orig;
 2548	int retries, do_phy_reset, err;
 2549
 2550	retries = 10;
 2551	do_phy_reset = 1;
 2552	do {
 2553		if (do_phy_reset) {
 2554			err = tg3_bmcr_reset(tp);
 2555			if (err)
 2556				return err;
 2557			do_phy_reset = 0;
 2558		}
 2559
 2560		/* Disable transmitter and interrupt.  */
 2561		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
 2562			continue;
 2563
 2564		reg32 |= 0x3000;
 2565		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 2566
 2567		/* Set full-duplex, 1000 mbps.  */
 2568		tg3_writephy(tp, MII_BMCR,
 2569			     BMCR_FULLDPLX | BMCR_SPEED1000);
 2570
 2571		/* Set to master mode.  */
 2572		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
 2573			continue;
 2574
 2575		tg3_writephy(tp, MII_CTRL1000,
 2576			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 2577
 2578		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
 2579		if (err)
 2580			return err;
 2581
 2582		/* Block the PHY control access.  */
 2583		tg3_phydsp_write(tp, 0x8005, 0x0800);
 2584
 2585		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
 2586		if (!err)
 2587			break;
 2588	} while (--retries);
 2589
 2590	err = tg3_phy_reset_chanpat(tp);
 2591	if (err)
 2592		return err;
 2593
 2594	tg3_phydsp_write(tp, 0x8005, 0x0000);
 2595
 2596	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
 2597	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 2598
 2599	tg3_phy_toggle_auxctl_smdsp(tp, false);
 2600
 2601	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 2602
 2603	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
 2604	if (err)
 2605		return err;
 2606
 2607	reg32 &= ~0x3000;
 2608	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 2609
 2610	return 0;
 2611}
 2612
 2613static void tg3_carrier_off(struct tg3 *tp)
 2614{
 2615	netif_carrier_off(tp->dev);
 2616	tp->link_up = false;
 2617}
 2618
 2619static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
 2620{
 2621	if (tg3_flag(tp, ENABLE_ASF))
 2622		netdev_warn(tp->dev,
 2623			    "Management side-band traffic will be interrupted during phy settings change\n");
 2624}
 2625
 2626/* This will reset the tigon3 PHY if there is no valid
 2627 * link unless the FORCE argument is non-zero.
 2628 */
 2629static int tg3_phy_reset(struct tg3 *tp)
 2630{
 2631	u32 val, cpmuctrl;
 2632	int err;
 2633
 2634	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 2635		val = tr32(GRC_MISC_CFG);
 2636		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
 2637		udelay(40);
 2638	}
 2639	err  = tg3_readphy(tp, MII_BMSR, &val);
 2640	err |= tg3_readphy(tp, MII_BMSR, &val);
 2641	if (err != 0)
 2642		return -EBUSY;
 2643
 2644	if (netif_running(tp->dev) && tp->link_up) {
 2645		netif_carrier_off(tp->dev);
 2646		tg3_link_report(tp);
 2647	}
 2648
 2649	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
 2650	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
 2651	    tg3_asic_rev(tp) == ASIC_REV_5705) {
 2652		err = tg3_phy_reset_5703_4_5(tp);
 2653		if (err)
 2654			return err;
 2655		goto out;
 2656	}
 2657
 2658	cpmuctrl = 0;
 2659	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
 2660	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
 2661		cpmuctrl = tr32(TG3_CPMU_CTRL);
 2662		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
 2663			tw32(TG3_CPMU_CTRL,
 2664			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
 2665	}
 2666
 2667	err = tg3_bmcr_reset(tp);
 2668	if (err)
 2669		return err;
 2670
 2671	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
 2672		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
 2673		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
 2674
 2675		tw32(TG3_CPMU_CTRL, cpmuctrl);
 2676	}
 2677
 2678	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
 2679	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
 2680		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 2681		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
 2682		    CPMU_LSPD_1000MB_MACCLK_12_5) {
 2683			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 2684			udelay(40);
 2685			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 2686		}
 2687	}
 2688
 2689	if (tg3_flag(tp, 5717_PLUS) &&
 2690	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
 2691		return 0;
 2692
 2693	tg3_phy_apply_otp(tp);
 2694
 2695	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 2696		tg3_phy_toggle_apd(tp, true);
 2697	else
 2698		tg3_phy_toggle_apd(tp, false);
 2699
 2700out:
 2701	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
 2702	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2703		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
 2704		tg3_phydsp_write(tp, 0x000a, 0x0323);
 2705		tg3_phy_toggle_auxctl_smdsp(tp, false);
 2706	}
 2707
 2708	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
 2709		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 2710		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 2711	}
 2712
 2713	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
 2714		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2715			tg3_phydsp_write(tp, 0x000a, 0x310b);
 2716			tg3_phydsp_write(tp, 0x201f, 0x9506);
 2717			tg3_phydsp_write(tp, 0x401f, 0x14e2);
 2718			tg3_phy_toggle_auxctl_smdsp(tp, false);
 2719		}
 2720	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
 2721		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
 2722			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
 2723			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 2724				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 2725				tg3_writephy(tp, MII_TG3_TEST1,
 2726					     MII_TG3_TEST1_TRIM_EN | 0x4);
 2727			} else
 2728				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 2729
 2730			tg3_phy_toggle_auxctl_smdsp(tp, false);
 2731		}
 2732	}
 2733
 2734	/* Set Extended packet length bit (bit 14) on all chips that */
 2735	/* support jumbo frames */
 2736	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 2737		/* Cannot do read-modify-write on 5401 */
 2738		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 2739	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
 2740		/* Set bit 14 with read-modify-write to preserve other bits */
 2741		err = tg3_phy_auxctl_read(tp,
 2742					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 2743		if (!err)
 2744			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 2745					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
 2746	}
 2747
 2748	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
 2749	 * jumbo frames transmission.
 2750	 */
 2751	if (tg3_flag(tp, JUMBO_CAPABLE)) {
 2752		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
 2753			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 2754				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
 2755	}
 2756
 2757	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 2758		/* adjust output voltage */
 2759		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
 2760	}
 2761
 2762	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
 2763		tg3_phydsp_write(tp, 0xffb, 0x4000);
 2764
 2765	tg3_phy_toggle_automdix(tp, true);
 2766	tg3_phy_set_wirespeed(tp);
 2767	return 0;
 2768}
 2769
 2770#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
 2771#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
 2772#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
 2773					  TG3_GPIO_MSG_NEED_VAUX)
 2774#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
 2775	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
 2776	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
 2777	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
 2778	 (TG3_GPIO_MSG_DRVR_PRES << 12))
 2779
 2780#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
 2781	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
 2782	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
 2783	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
 2784	 (TG3_GPIO_MSG_NEED_VAUX << 12))
 2785
 2786static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
 2787{
 2788	u32 status, shift;
 2789
 2790	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2791	    tg3_asic_rev(tp) == ASIC_REV_5719)
 2792		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
 2793	else
 2794		status = tr32(TG3_CPMU_DRV_STATUS);
 2795
 2796	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
 2797	status &= ~(TG3_GPIO_MSG_MASK << shift);
 2798	status |= (newstat << shift);
 2799
 2800	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2801	    tg3_asic_rev(tp) == ASIC_REV_5719)
 2802		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
 2803	else
 2804		tw32(TG3_CPMU_DRV_STATUS, status);
 2805
 2806	return status >> TG3_APE_GPIO_MSG_SHIFT;
 2807}
 2808
 2809static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
 2810{
 2811	if (!tg3_flag(tp, IS_NIC))
 2812		return 0;
 2813
 2814	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2815	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
 2816	    tg3_asic_rev(tp) == ASIC_REV_5720) {
 2817		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 2818			return -EIO;
 2819
 2820		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
 2821
 2822		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 2823			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2824
 2825		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 2826	} else {
 2827		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 2828			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2829	}
 2830
 2831	return 0;
 2832}
 2833
 2834static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
 2835{
 2836	u32 grc_local_ctrl;
 2837
 2838	if (!tg3_flag(tp, IS_NIC) ||
 2839	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
 2840	    tg3_asic_rev(tp) == ASIC_REV_5701)
 2841		return;
 2842
 2843	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
 2844
 2845	tw32_wait_f(GRC_LOCAL_CTRL,
 2846		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 2847		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2848
 2849	tw32_wait_f(GRC_LOCAL_CTRL,
 2850		    grc_local_ctrl,
 2851		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2852
 2853	tw32_wait_f(GRC_LOCAL_CTRL,
 2854		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 2855		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2856}
 2857
 2858static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
 2859{
 2860	if (!tg3_flag(tp, IS_NIC))
 2861		return;
 2862
 2863	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 2864	    tg3_asic_rev(tp) == ASIC_REV_5701) {
 2865		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 2866			    (GRC_LCLCTRL_GPIO_OE0 |
 2867			     GRC_LCLCTRL_GPIO_OE1 |
 2868			     GRC_LCLCTRL_GPIO_OE2 |
 2869			     GRC_LCLCTRL_GPIO_OUTPUT0 |
 2870			     GRC_LCLCTRL_GPIO_OUTPUT1),
 2871			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2872	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
 2873		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
 2874		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
 2875		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
 2876				     GRC_LCLCTRL_GPIO_OE1 |
 2877				     GRC_LCLCTRL_GPIO_OE2 |
 2878				     GRC_LCLCTRL_GPIO_OUTPUT0 |
 2879				     GRC_LCLCTRL_GPIO_OUTPUT1 |
 2880				     tp->grc_local_ctrl;
 2881		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2882			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2883
 2884		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
 2885		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2886			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2887
 2888		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
 2889		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2890			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2891	} else {
 2892		u32 no_gpio2;
 2893		u32 grc_local_ctrl = 0;
 2894
 2895		/* Workaround to prevent overdrawing Amps. */
 2896		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 2897			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
 2898			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 2899				    grc_local_ctrl,
 2900				    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2901		}
 2902
 2903		/* On 5753 and variants, GPIO2 cannot be used. */
 2904		no_gpio2 = tp->nic_sram_data_cfg &
 2905			   NIC_SRAM_DATA_CFG_NO_GPIO2;
 2906
 2907		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
 2908				  GRC_LCLCTRL_GPIO_OE1 |
 2909				  GRC_LCLCTRL_GPIO_OE2 |
 2910				  GRC_LCLCTRL_GPIO_OUTPUT1 |
 2911				  GRC_LCLCTRL_GPIO_OUTPUT2;
 2912		if (no_gpio2) {
 2913			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
 2914					    GRC_LCLCTRL_GPIO_OUTPUT2);
 2915		}
 2916		tw32_wait_f(GRC_LOCAL_CTRL,
 2917			    tp->grc_local_ctrl | grc_local_ctrl,
 2918			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2919
 2920		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
 2921
 2922		tw32_wait_f(GRC_LOCAL_CTRL,
 2923			    tp->grc_local_ctrl | grc_local_ctrl,
 2924			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2925
 2926		if (!no_gpio2) {
 2927			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
 2928			tw32_wait_f(GRC_LOCAL_CTRL,
 2929				    tp->grc_local_ctrl | grc_local_ctrl,
 2930				    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2931		}
 2932	}
 2933}
 2934
 2935static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
 2936{
 2937	u32 msg = 0;
 2938
 2939	/* Serialize power state transitions */
 2940	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 2941		return;
 2942
 2943	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
 2944		msg = TG3_GPIO_MSG_NEED_VAUX;
 2945
 2946	msg = tg3_set_function_status(tp, msg);
 2947
 2948	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
 2949		goto done;
 2950
 2951	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
 2952		tg3_pwrsrc_switch_to_vaux(tp);
 2953	else
 2954		tg3_pwrsrc_die_with_vmain(tp);
 2955
 2956done:
 2957	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 2958}
 2959
 2960static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
 2961{
 2962	bool need_vaux = false;
 2963
 2964	/* The GPIOs do something completely different on 57765. */
 2965	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
 2966		return;
 2967
 2968	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
 2969	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
 2970	    tg3_asic_rev(tp) == ASIC_REV_5720) {
 2971		tg3_frob_aux_power_5717(tp, include_wol ?
 2972					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
 2973		return;
 2974	}
 2975
 2976	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
 2977		struct net_device *dev_peer;
 2978
 2979		dev_peer = pci_get_drvdata(tp->pdev_peer);
 2980
 2981		/* remove_one() may have been run on the peer. */
 2982		if (dev_peer) {
 2983			struct tg3 *tp_peer = netdev_priv(dev_peer);
 2984
 2985			if (tg3_flag(tp_peer, INIT_COMPLETE))
 2986				return;
 2987
 2988			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
 2989			    tg3_flag(tp_peer, ENABLE_ASF))
 2990				need_vaux = true;
 2991		}
 2992	}
 2993
 2994	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
 2995	    tg3_flag(tp, ENABLE_ASF))
 2996		need_vaux = true;
 2997
 2998	if (need_vaux)
 2999		tg3_pwrsrc_switch_to_vaux(tp);
 3000	else
 3001		tg3_pwrsrc_die_with_vmain(tp);
 3002}
 3003
 3004static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
 3005{
 3006	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
 3007		return 1;
 3008	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
 3009		if (speed != SPEED_10)
 3010			return 1;
 3011	} else if (speed == SPEED_10)
 3012		return 1;
 3013
 3014	return 0;
 3015}
 3016
 3017static bool tg3_phy_power_bug(struct tg3 *tp)
 3018{
 3019	switch (tg3_asic_rev(tp)) {
 3020	case ASIC_REV_5700:
 3021	case ASIC_REV_5704:
 3022		return true;
 3023	case ASIC_REV_5780:
 3024		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 3025			return true;
 3026		return false;
 3027	case ASIC_REV_5717:
 3028		if (!tp->pci_fn)
 3029			return true;
 3030		return false;
 3031	case ASIC_REV_5719:
 3032	case ASIC_REV_5720:
 3033		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 3034		    !tp->pci_fn)
 3035			return true;
 3036		return false;
 3037	}
 3038
 3039	return false;
 3040}
 3041
 3042static bool tg3_phy_led_bug(struct tg3 *tp)
 3043{
 3044	switch (tg3_asic_rev(tp)) {
 3045	case ASIC_REV_5719:
 3046	case ASIC_REV_5720:
 3047		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 3048		    !tp->pci_fn)
 3049			return true;
 3050		return false;
 3051	}
 3052
 3053	return false;
 3054}
 3055
 3056static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 3057{
 3058	u32 val;
 3059
 3060	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
 3061		return;
 3062
 3063	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 3064		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
 3065			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
 3066			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
 3067
 3068			sg_dig_ctrl |=
 3069				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
 3070			tw32(SG_DIG_CTRL, sg_dig_ctrl);
 3071			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
 3072		}
 3073		return;
 3074	}
 3075
 3076	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 3077		tg3_bmcr_reset(tp);
 3078		val = tr32(GRC_MISC_CFG);
 3079		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 3080		udelay(40);
 3081		return;
 3082	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 3083		u32 phytest;
 3084		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 3085			u32 phy;
 3086
 3087			tg3_writephy(tp, MII_ADVERTISE, 0);
 3088			tg3_writephy(tp, MII_BMCR,
 3089				     BMCR_ANENABLE | BMCR_ANRESTART);
 3090
 3091			tg3_writephy(tp, MII_TG3_FET_TEST,
 3092				     phytest | MII_TG3_FET_SHADOW_EN);
 3093			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
 3094				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
 3095				tg3_writephy(tp,
 3096					     MII_TG3_FET_SHDW_AUXMODE4,
 3097					     phy);
 3098			}
 3099			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 3100		}
 3101		return;
 3102	} else if (do_low_power) {
 3103		if (!tg3_phy_led_bug(tp))
 3104			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 3105				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
 3106
 3107		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 3108		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
 3109		      MII_TG3_AUXCTL_PCTL_VREG_11V;
 3110		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
 3111	}
 3112
 3113	/* The PHY should not be powered down on some chips because
 3114	 * of bugs.
 3115	 */
 3116	if (tg3_phy_power_bug(tp))
 3117		return;
 3118
 3119	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
 3120	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
 3121		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 3122		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 3123		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
 3124		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 3125	}
 3126
 3127	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
 3128}
 3129
 3130/* tp->lock is held. */
 3131static int tg3_nvram_lock(struct tg3 *tp)
 3132{
 3133	if (tg3_flag(tp, NVRAM)) {
 3134		int i;
 3135
 3136		if (tp->nvram_lock_cnt == 0) {
 3137			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
 3138			for (i = 0; i < 8000; i++) {
 3139				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
 3140					break;
 3141				udelay(20);
 3142			}
 3143			if (i == 8000) {
 3144				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
 3145				return -ENODEV;
 3146			}
 3147		}
 3148		tp->nvram_lock_cnt++;
 3149	}
 3150	return 0;
 3151}
 3152
 3153/* tp->lock is held. */
 3154static void tg3_nvram_unlock(struct tg3 *tp)
 3155{
 3156	if (tg3_flag(tp, NVRAM)) {
 3157		if (tp->nvram_lock_cnt > 0)
 3158			tp->nvram_lock_cnt--;
 3159		if (tp->nvram_lock_cnt == 0)
 3160			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
 3161	}
 3162}
 3163
 3164/* tp->lock is held. */
 3165static void tg3_enable_nvram_access(struct tg3 *tp)
 3166{
 3167	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 3168		u32 nvaccess = tr32(NVRAM_ACCESS);
 3169
 3170		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
 3171	}
 3172}
 3173
 3174/* tp->lock is held. */
 3175static void tg3_disable_nvram_access(struct tg3 *tp)
 3176{
 3177	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 3178		u32 nvaccess = tr32(NVRAM_ACCESS);
 3179
 3180		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
 3181	}
 3182}
 3183
 3184static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
 3185					u32 offset, u32 *val)
 3186{
 3187	u32 tmp;
 3188	int i;
 3189
 3190	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
 3191		return -EINVAL;
 3192
 3193	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
 3194					EEPROM_ADDR_DEVID_MASK |
 3195					EEPROM_ADDR_READ);
 3196	tw32(GRC_EEPROM_ADDR,
 3197	     tmp |
 3198	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
 3199	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
 3200	      EEPROM_ADDR_ADDR_MASK) |
 3201	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
 3202
 3203	for (i = 0; i < 1000; i++) {
 3204		tmp = tr32(GRC_EEPROM_ADDR);
 3205
 3206		if (tmp & EEPROM_ADDR_COMPLETE)
 3207			break;
 3208		msleep(1);
 3209	}
 3210	if (!(tmp & EEPROM_ADDR_COMPLETE))
 3211		return -EBUSY;
 3212
 3213	tmp = tr32(GRC_EEPROM_DATA);
 3214
 3215	/*
 3216	 * The data will always be opposite the native endian
 3217	 * format.  Perform a blind byteswap to compensate.
 3218	 */
 3219	*val = swab32(tmp);
 3220
 3221	return 0;
 3222}
 3223
 3224#define NVRAM_CMD_TIMEOUT 10000
 3225
 3226static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 3227{
 3228	int i;
 3229
 3230	tw32(NVRAM_CMD, nvram_cmd);
 3231	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
 3232		usleep_range(10, 40);
 3233		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
 3234			udelay(10);
 3235			break;
 3236		}
 3237	}
 3238
 3239	if (i == NVRAM_CMD_TIMEOUT)
 3240		return -EBUSY;
 3241
 3242	return 0;
 3243}
 3244
 3245static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
 3246{
 3247	if (tg3_flag(tp, NVRAM) &&
 3248	    tg3_flag(tp, NVRAM_BUFFERED) &&
 3249	    tg3_flag(tp, FLASH) &&
 3250	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 3251	    (tp->nvram_jedecnum == JEDEC_ATMEL))
 3252
 3253		addr = ((addr / tp->nvram_pagesize) <<
 3254			ATMEL_AT45DB0X1B_PAGE_POS) +
 3255		       (addr % tp->nvram_pagesize);
 3256
 3257	return addr;
 3258}
 3259
 3260static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
 3261{
 3262	if (tg3_flag(tp, NVRAM) &&
 3263	    tg3_flag(tp, NVRAM_BUFFERED) &&
 3264	    tg3_flag(tp, FLASH) &&
 3265	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 3266	    (tp->nvram_jedecnum == JEDEC_ATMEL))
 3267
 3268		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
 3269			tp->nvram_pagesize) +
 3270		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
 3271
 3272	return addr;
 3273}
 3274
 3275/* NOTE: Data read in from NVRAM is byteswapped according to
 3276 * the byteswapping settings for all other register accesses.
 3277 * tg3 devices are BE devices, so on a BE machine, the data
 3278 * returned will be exactly as it is seen in NVRAM.  On a LE
 3279 * machine, the 32-bit value will be byteswapped.
 3280 */
 3281static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
 3282{
 3283	int ret;
 3284
 3285	if (!tg3_flag(tp, NVRAM))
 3286		return tg3_nvram_read_using_eeprom(tp, offset, val);
 3287
 3288	offset = tg3_nvram_phys_addr(tp, offset);
 3289
 3290	if (offset > NVRAM_ADDR_MSK)
 3291		return -EINVAL;
 3292
 3293	ret = tg3_nvram_lock(tp);
 3294	if (ret)
 3295		return ret;
 3296
 3297	tg3_enable_nvram_access(tp);
 3298
 3299	tw32(NVRAM_ADDR, offset);
 3300	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
 3301		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
 3302
 3303	if (ret == 0)
 3304		*val = tr32(NVRAM_RDDATA);
 3305
 3306	tg3_disable_nvram_access(tp);
 3307
 3308	tg3_nvram_unlock(tp);
 3309
 3310	return ret;
 3311}
 3312
 3313/* Ensures NVRAM data is in bytestream format. */
 3314static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
 3315{
 3316	u32 v;
 3317	int res = tg3_nvram_read(tp, offset, &v);
 3318	if (!res)
 3319		*val = cpu_to_be32(v);
 3320	return res;
 3321}
 3322
 3323static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
 3324				    u32 offset, u32 len, u8 *buf)
 3325{
 3326	int i, j, rc = 0;
 3327	u32 val;
 3328
 3329	for (i = 0; i < len; i += 4) {
 3330		u32 addr;
 3331		__be32 data;
 3332
 3333		addr = offset + i;
 3334
 3335		memcpy(&data, buf + i, 4);
 3336
 3337		/*
 3338		 * The SEEPROM interface expects the data to always be opposite
 3339		 * the native endian format.  We accomplish this by reversing
 3340		 * all the operations that would have been performed on the
 3341		 * data from a call to tg3_nvram_read_be32().
 3342		 */
 3343		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
 3344
 3345		val = tr32(GRC_EEPROM_ADDR);
 3346		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
 3347
 3348		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
 3349			EEPROM_ADDR_READ);
 3350		tw32(GRC_EEPROM_ADDR, val |
 3351			(0 << EEPROM_ADDR_DEVID_SHIFT) |
 3352			(addr & EEPROM_ADDR_ADDR_MASK) |
 3353			EEPROM_ADDR_START |
 3354			EEPROM_ADDR_WRITE);
 3355
 3356		for (j = 0; j < 1000; j++) {
 3357			val = tr32(GRC_EEPROM_ADDR);
 3358
 3359			if (val & EEPROM_ADDR_COMPLETE)
 3360				break;
 3361			msleep(1);
 3362		}
 3363		if (!(val & EEPROM_ADDR_COMPLETE)) {
 3364			rc = -EBUSY;
 3365			break;
 3366		}
 3367	}
 3368
 3369	return rc;
 3370}
 3371
 3372/* offset and length are dword aligned */
 3373static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 3374		u8 *buf)
 3375{
 3376	int ret = 0;
 3377	u32 pagesize = tp->nvram_pagesize;
 3378	u32 pagemask = pagesize - 1;
 3379	u32 nvram_cmd;
 3380	u8 *tmp;
 3381
 3382	tmp = kmalloc(pagesize, GFP_KERNEL);
 3383	if (tmp == NULL)
 3384		return -ENOMEM;
 3385
 3386	while (len) {
 3387		int j;
 3388		u32 phy_addr, page_off, size;
 3389
 3390		phy_addr = offset & ~pagemask;
 3391
 3392		for (j = 0; j < pagesize; j += 4) {
 3393			ret = tg3_nvram_read_be32(tp, phy_addr + j,
 3394						  (__be32 *) (tmp + j));
 3395			if (ret)
 3396				break;
 3397		}
 3398		if (ret)
 3399			break;
 3400
 3401		page_off = offset & pagemask;
 3402		size = pagesize;
 3403		if (len < size)
 3404			size = len;
 3405
 3406		len -= size;
 3407
 3408		memcpy(tmp + page_off, buf, size);
 3409
 3410		offset = offset + (pagesize - page_off);
 3411
 3412		tg3_enable_nvram_access(tp);
 3413
 3414		/*
 3415		 * Before we can erase the flash page, we need
 3416		 * to issue a special "write enable" command.
 3417		 */
 3418		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3419
 3420		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3421			break;
 3422
 3423		/* Erase the target page */
 3424		tw32(NVRAM_ADDR, phy_addr);
 3425
 3426		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
 3427			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
 3428
 3429		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3430			break;
 3431
 3432		/* Issue another write enable to start the write. */
 3433		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3434
 3435		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3436			break;
 3437
 3438		for (j = 0; j < pagesize; j += 4) {
 3439			__be32 data;
 3440
 3441			data = *((__be32 *) (tmp + j));
 3442
 3443			tw32(NVRAM_WRDATA, be32_to_cpu(data));
 3444
 3445			tw32(NVRAM_ADDR, phy_addr + j);
 3446
 3447			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
 3448				NVRAM_CMD_WR;
 3449
 3450			if (j == 0)
 3451				nvram_cmd |= NVRAM_CMD_FIRST;
 3452			else if (j == (pagesize - 4))
 3453				nvram_cmd |= NVRAM_CMD_LAST;
 3454
 3455			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
 3456			if (ret)
 3457				break;
 3458		}
 3459		if (ret)
 3460			break;
 3461	}
 3462
 3463	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3464	tg3_nvram_exec_cmd(tp, nvram_cmd);
 3465
 3466	kfree(tmp);
 3467
 3468	return ret;
 3469}
 3470
 3471/* offset and length are dword aligned */
 3472static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
 3473		u8 *buf)
 3474{
 3475	int i, ret = 0;
 3476
 3477	for (i = 0; i < len; i += 4, offset += 4) {
 3478		u32 page_off, phy_addr, nvram_cmd;
 3479		__be32 data;
 3480
 3481		memcpy(&data, buf + i, 4);
 3482		tw32(NVRAM_WRDATA, be32_to_cpu(data));
 3483
 3484		page_off = offset % tp->nvram_pagesize;
 3485
 3486		phy_addr = tg3_nvram_phys_addr(tp, offset);
 3487
 3488		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
 3489
 3490		if (page_off == 0 || i == 0)
 3491			nvram_cmd |= NVRAM_CMD_FIRST;
 3492		if (page_off == (tp->nvram_pagesize - 4))
 3493			nvram_cmd |= NVRAM_CMD_LAST;
 3494
 3495		if (i == (len - 4))
 3496			nvram_cmd |= NVRAM_CMD_LAST;
 3497
 3498		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
 3499		    !tg3_flag(tp, FLASH) ||
 3500		    !tg3_flag(tp, 57765_PLUS))
 3501			tw32(NVRAM_ADDR, phy_addr);
 3502
 3503		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
 3504		    !tg3_flag(tp, 5755_PLUS) &&
 3505		    (tp->nvram_jedecnum == JEDEC_ST) &&
 3506		    (nvram_cmd & NVRAM_CMD_FIRST)) {
 3507			u32 cmd;
 3508
 3509			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3510			ret = tg3_nvram_exec_cmd(tp, cmd);
 3511			if (ret)
 3512				break;
 3513		}
 3514		if (!tg3_flag(tp, FLASH)) {
 3515			/* We always do complete word writes to eeprom. */
 3516			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
 3517		}
 3518
 3519		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
 3520		if (ret)
 3521			break;
 3522	}
 3523	return ret;
 3524}
 3525
 3526/* offset and length are dword aligned */
 3527static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 3528{
 3529	int ret;
 3530
 3531	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 3532		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
 3533		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
 3534		udelay(40);
 3535	}
 3536
 3537	if (!tg3_flag(tp, NVRAM)) {
 3538		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
 3539	} else {
 3540		u32 grc_mode;
 3541
 3542		ret = tg3_nvram_lock(tp);
 3543		if (ret)
 3544			return ret;
 3545
 3546		tg3_enable_nvram_access(tp);
 3547		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
 3548			tw32(NVRAM_WRITE1, 0x406);
 3549
 3550		grc_mode = tr32(GRC_MODE);
 3551		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
 3552
 3553		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
 3554			ret = tg3_nvram_write_block_buffered(tp, offset, len,
 3555				buf);
 3556		} else {
 3557			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
 3558				buf);
 3559		}
 3560
 3561		grc_mode = tr32(GRC_MODE);
 3562		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
 3563
 3564		tg3_disable_nvram_access(tp);
 3565		tg3_nvram_unlock(tp);
 3566	}
 3567
 3568	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 3569		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 3570		udelay(40);
 3571	}
 3572
 3573	return ret;
 3574}
 3575
 3576#define RX_CPU_SCRATCH_BASE	0x30000
 3577#define RX_CPU_SCRATCH_SIZE	0x04000
 3578#define TX_CPU_SCRATCH_BASE	0x34000
 3579#define TX_CPU_SCRATCH_SIZE	0x04000
 3580
 3581/* tp->lock is held. */
 3582static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
 3583{
 3584	int i;
 3585	const int iters = 10000;
 3586
 3587	for (i = 0; i < iters; i++) {
 3588		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3589		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 3590		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
 3591			break;
 3592		if (pci_channel_offline(tp->pdev))
 3593			return -EBUSY;
 3594	}
 3595
 3596	return (i == iters) ? -EBUSY : 0;
 3597}
 3598
 3599/* tp->lock is held. */
 3600static int tg3_rxcpu_pause(struct tg3 *tp)
 3601{
 3602	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
 3603
 3604	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 3605	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
 3606	udelay(10);
 3607
 3608	return rc;
 3609}
 3610
 3611/* tp->lock is held. */
 3612static int tg3_txcpu_pause(struct tg3 *tp)
 3613{
 3614	return tg3_pause_cpu(tp, TX_CPU_BASE);
 3615}
 3616
 3617/* tp->lock is held. */
 3618static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
 3619{
 3620	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3621	tw32_f(cpu_base + CPU_MODE,  0x00000000);
 3622}
 3623
 3624/* tp->lock is held. */
 3625static void tg3_rxcpu_resume(struct tg3 *tp)
 3626{
 3627	tg3_resume_cpu(tp, RX_CPU_BASE);
 3628}
 3629
 3630/* tp->lock is held. */
 3631static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
 3632{
 3633	int rc;
 3634
 3635	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
 3636
 3637	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 3638		u32 val = tr32(GRC_VCPU_EXT_CTRL);
 3639
 3640		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
 3641		return 0;
 3642	}
 3643	if (cpu_base == RX_CPU_BASE) {
 3644		rc = tg3_rxcpu_pause(tp);
 3645	} else {
 3646		/*
 3647		 * There is only an Rx CPU for the 5750 derivative in the
 3648		 * BCM4785.
 3649		 */
 3650		if (tg3_flag(tp, IS_SSB_CORE))
 3651			return 0;
 3652
 3653		rc = tg3_txcpu_pause(tp);
 3654	}
 3655
 3656	if (rc) {
 3657		netdev_err(tp->dev, "%s timed out, %s CPU\n",
 3658			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
 3659		return -ENODEV;
 3660	}
 3661
 3662	/* Clear firmware's nvram arbitration. */
 3663	if (tg3_flag(tp, NVRAM))
 3664		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
 3665	return 0;
 3666}
 3667
 3668static int tg3_fw_data_len(struct tg3 *tp,
 3669			   const struct tg3_firmware_hdr *fw_hdr)
 3670{
 3671	int fw_len;
 3672
 3673	/* Non fragmented firmware have one firmware header followed by a
 3674	 * contiguous chunk of data to be written. The length field in that
 3675	 * header is not the length of data to be written but the complete
 3676	 * length of the bss. The data length is determined based on
 3677	 * tp->fw->size minus headers.
 3678	 *
 3679	 * Fragmented firmware have a main header followed by multiple
 3680	 * fragments. Each fragment is identical to non fragmented firmware
 3681	 * with a firmware header followed by a contiguous chunk of data. In
 3682	 * the main header, the length field is unused and set to 0xffffffff.
 3683	 * In each fragment header the length is the entire size of that
 3684	 * fragment i.e. fragment data + header length. Data length is
 3685	 * therefore length field in the header minus TG3_FW_HDR_LEN.
 3686	 */
 3687	if (tp->fw_len == 0xffffffff)
 3688		fw_len = be32_to_cpu(fw_hdr->len);
 3689	else
 3690		fw_len = tp->fw->size;
 3691
 3692	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
 3693}
 3694
 3695/* tp->lock is held. */
 3696static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
 3697				 u32 cpu_scratch_base, int cpu_scratch_size,
 3698				 const struct tg3_firmware_hdr *fw_hdr)
 3699{
 3700	int err, i;
 3701	void (*write_op)(struct tg3 *, u32, u32);
 3702	int total_len = tp->fw->size;
 3703
 3704	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
 3705		netdev_err(tp->dev,
 3706			   "%s: Trying to load TX cpu firmware which is 5705\n",
 3707			   __func__);
 3708		return -EINVAL;
 3709	}
 3710
 3711	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
 3712		write_op = tg3_write_mem;
 3713	else
 3714		write_op = tg3_write_indirect_reg32;
 3715
 3716	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
 3717		/* It is possible that bootcode is still loading at this point.
 3718		 * Get the nvram lock first before halting the cpu.
 3719		 */
 3720		int lock_err = tg3_nvram_lock(tp);
 3721		err = tg3_halt_cpu(tp, cpu_base);
 3722		if (!lock_err)
 3723			tg3_nvram_unlock(tp);
 3724		if (err)
 3725			goto out;
 3726
 3727		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
 3728			write_op(tp, cpu_scratch_base + i, 0);
 3729		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3730		tw32(cpu_base + CPU_MODE,
 3731		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
 3732	} else {
 3733		/* Subtract additional main header for fragmented firmware and
 3734		 * advance to the first fragment
 3735		 */
 3736		total_len -= TG3_FW_HDR_LEN;
 3737		fw_hdr++;
 3738	}
 3739
 3740	do {
 3741		__be32 *fw_data = (__be32 *)(fw_hdr + 1);
 3742		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
 3743			write_op(tp, cpu_scratch_base +
 3744				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
 3745				     (i * sizeof(u32)),
 3746				 be32_to_cpu(fw_data[i]));
 3747
 3748		total_len -= be32_to_cpu(fw_hdr->len);
 3749
 3750		/* Advance to next fragment */
 3751		fw_hdr = (struct tg3_firmware_hdr *)
 3752			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
 3753	} while (total_len > 0);
 3754
 3755	err = 0;
 3756
 3757out:
 3758	return err;
 3759}
 3760
 3761/* tp->lock is held. */
 3762static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
 3763{
 3764	int i;
 3765	const int iters = 5;
 3766
 3767	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3768	tw32_f(cpu_base + CPU_PC, pc);
 3769
 3770	for (i = 0; i < iters; i++) {
 3771		if (tr32(cpu_base + CPU_PC) == pc)
 3772			break;
 3773		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3774		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 3775		tw32_f(cpu_base + CPU_PC, pc);
 3776		udelay(1000);
 3777	}
 3778
 3779	return (i == iters) ? -EBUSY : 0;
 3780}
 3781
 3782/* tp->lock is held. */
 3783static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
 3784{
 3785	const struct tg3_firmware_hdr *fw_hdr;
 3786	int err;
 3787
 3788	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
 3789
 3790	/* Firmware blob starts with version numbers, followed by
 3791	   start address and length. We are setting complete length.
 3792	   length = end_address_of_bss - start_address_of_text.
 3793	   Remainder is the blob to be loaded contiguously
 3794	   from start address. */
 3795
 3796	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
 3797				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
 3798				    fw_hdr);
 3799	if (err)
 3800		return err;
 3801
 3802	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
 3803				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
 3804				    fw_hdr);
 3805	if (err)
 3806		return err;
 3807
 3808	/* Now startup only the RX cpu. */
 3809	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
 3810				       be32_to_cpu(fw_hdr->base_addr));
 3811	if (err) {
 3812		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
 3813			   "should be %08x\n", __func__,
 3814			   tr32(RX_CPU_BASE + CPU_PC),
 3815				be32_to_cpu(fw_hdr->base_addr));
 3816		return -ENODEV;
 3817	}
 3818
 3819	tg3_rxcpu_resume(tp);
 3820
 3821	return 0;
 3822}
 3823
 3824static int tg3_validate_rxcpu_state(struct tg3 *tp)
 3825{
 3826	const int iters = 1000;
 3827	int i;
 3828	u32 val;
 3829
 3830	/* Wait for boot code to complete initialization and enter service
 3831	 * loop. It is then safe to download service patches
 3832	 */
 3833	for (i = 0; i < iters; i++) {
 3834		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
 3835			break;
 3836
 3837		udelay(10);
 3838	}
 3839
 3840	if (i == iters) {
 3841		netdev_err(tp->dev, "Boot code not ready for service patches\n");
 3842		return -EBUSY;
 3843	}
 3844
 3845	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
 3846	if (val & 0xff) {
 3847		netdev_warn(tp->dev,
 3848			    "Other patches exist. Not downloading EEE patch\n");
 3849		return -EEXIST;
 3850	}
 3851
 3852	return 0;
 3853}
 3854
 3855/* tp->lock is held. */
 3856static void tg3_load_57766_firmware(struct tg3 *tp)
 3857{
 3858	struct tg3_firmware_hdr *fw_hdr;
 3859
 3860	if (!tg3_flag(tp, NO_NVRAM))
 3861		return;
 3862
 3863	if (tg3_validate_rxcpu_state(tp))
 3864		return;
 3865
 3866	if (!tp->fw)
 3867		return;
 3868
 3869	/* This firmware blob has a different format than older firmware
 3870	 * releases as given below. The main difference is we have fragmented
 3871	 * data to be written to non-contiguous locations.
 3872	 *
 3873	 * In the beginning we have a firmware header identical to other
 3874	 * firmware which consists of version, base addr and length. The length
 3875	 * here is unused and set to 0xffffffff.
 3876	 *
 3877	 * This is followed by a series of firmware fragments which are
 3878	 * individually identical to previous firmware. i.e. they have the
 3879	 * firmware header and followed by data for that fragment. The version
 3880	 * field of the individual fragment header is unused.
 3881	 */
 3882
 3883	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
 3884	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
 3885		return;
 3886
 3887	if (tg3_rxcpu_pause(tp))
 3888		return;
 3889
 3890	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
 3891	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
 3892
 3893	tg3_rxcpu_resume(tp);
 3894}
 3895
 3896/* tp->lock is held. */
 3897static int tg3_load_tso_firmware(struct tg3 *tp)
 3898{
 3899	const struct tg3_firmware_hdr *fw_hdr;
 3900	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
 3901	int err;
 3902
 3903	if (!tg3_flag(tp, FW_TSO))
 3904		return 0;
 3905
 3906	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
 3907
 3908	/* Firmware blob starts with version numbers, followed by
 3909	   start address and length. We are setting complete length.
 3910	   length = end_address_of_bss - start_address_of_text.
 3911	   Remainder is the blob to be loaded contiguously
 3912	   from start address. */
 3913
 3914	cpu_scratch_size = tp->fw_len;
 3915
 3916	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
 3917		cpu_base = RX_CPU_BASE;
 3918		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
 3919	} else {
 3920		cpu_base = TX_CPU_BASE;
 3921		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
 3922		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
 3923	}
 3924
 3925	err = tg3_load_firmware_cpu(tp, cpu_base,
 3926				    cpu_scratch_base, cpu_scratch_size,
 3927				    fw_hdr);
 3928	if (err)
 3929		return err;
 3930
 3931	/* Now startup the cpu. */
 3932	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
 3933				       be32_to_cpu(fw_hdr->base_addr));
 3934	if (err) {
 3935		netdev_err(tp->dev,
 3936			   "%s fails to set CPU PC, is %08x should be %08x\n",
 3937			   __func__, tr32(cpu_base + CPU_PC),
 3938			   be32_to_cpu(fw_hdr->base_addr));
 3939		return -ENODEV;
 3940	}
 3941
 3942	tg3_resume_cpu(tp, cpu_base);
 3943	return 0;
 3944}
 3945
 3946/* tp->lock is held. */
 3947static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
 3948				   int index)
 3949{
 3950	u32 addr_high, addr_low;
 3951
 3952	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
 3953	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
 3954		    (mac_addr[4] <<  8) | mac_addr[5]);
 3955
 3956	if (index < 4) {
 3957		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
 3958		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
 3959	} else {
 3960		index -= 4;
 3961		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
 3962		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
 3963	}
 3964}
 3965
 3966/* tp->lock is held. */
 3967static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
 3968{
 3969	u32 addr_high;
 3970	int i;
 3971
 3972	for (i = 0; i < 4; i++) {
 3973		if (i == 1 && skip_mac_1)
 3974			continue;
 3975		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
 3976	}
 3977
 3978	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
 3979	    tg3_asic_rev(tp) == ASIC_REV_5704) {
 3980		for (i = 4; i < 16; i++)
 3981			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
 3982	}
 3983
 3984	addr_high = (tp->dev->dev_addr[0] +
 3985		     tp->dev->dev_addr[1] +
 3986		     tp->dev->dev_addr[2] +
 3987		     tp->dev->dev_addr[3] +
 3988		     tp->dev->dev_addr[4] +
 3989		     tp->dev->dev_addr[5]) &
 3990		TX_BACKOFF_SEED_MASK;
 3991	tw32(MAC_TX_BACKOFF_SEED, addr_high);
 3992}
 3993
 3994static void tg3_enable_register_access(struct tg3 *tp)
 3995{
 3996	/*
 3997	 * Make sure register accesses (indirect or otherwise) will function
 3998	 * correctly.
 3999	 */
 4000	pci_write_config_dword(tp->pdev,
 4001			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
 4002}
 4003
 4004static int tg3_power_up(struct tg3 *tp)
 4005{
 4006	int err;
 4007
 4008	tg3_enable_register_access(tp);
 4009
 4010	err = pci_set_power_state(tp->pdev, PCI_D0);
 4011	if (!err) {
 4012		/* Switch out of Vaux if it is a NIC */
 4013		tg3_pwrsrc_switch_to_vmain(tp);
 4014	} else {
 4015		netdev_err(tp->dev, "Transition to D0 failed\n");
 4016	}
 4017
 4018	return err;
 4019}
 4020
 4021static int tg3_setup_phy(struct tg3 *, bool);
 4022
 4023static void tg3_power_down_prepare(struct tg3 *tp)
 4024{
 4025	u32 misc_host_ctrl;
 4026	bool device_should_wake, do_low_power;
 4027
 4028	tg3_enable_register_access(tp);
 4029
 4030	/* Restore the CLKREQ setting. */
 4031	if (tg3_flag(tp, CLKREQ_BUG))
 4032		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
 4033					 PCI_EXP_LNKCTL_CLKREQ_EN);
 4034
 4035	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 4036	tw32(TG3PCI_MISC_HOST_CTRL,
 4037	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
 4038
 4039	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
 4040			     tg3_flag(tp, WOL_ENABLE);
 4041
 4042	if (tg3_flag(tp, USE_PHYLIB)) {
 4043		do_low_power = false;
 4044		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
 4045		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4046			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
 4047			struct phy_device *phydev;
 4048			u32 phyid;
 4049
 4050			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
 4051
 4052			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 4053
 4054			tp->link_config.speed = phydev->speed;
 4055			tp->link_config.duplex = phydev->duplex;
 4056			tp->link_config.autoneg = phydev->autoneg;
 4057			ethtool_convert_link_mode_to_legacy_u32(
 4058				&tp->link_config.advertising,
 4059				phydev->advertising);
 4060
 4061			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
 4062			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
 4063					 advertising);
 4064			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
 4065					 advertising);
 4066			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
 4067					 advertising);
 4068
 4069			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
 4070				if (tg3_flag(tp, WOL_SPEED_100MB)) {
 4071					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
 4072							 advertising);
 4073					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
 4074							 advertising);
 4075					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 4076							 advertising);
 4077				} else {
 4078					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
 4079							 advertising);
 4080				}
 4081			}
 4082
 4083			linkmode_copy(phydev->advertising, advertising);
 4084			phy_start_aneg(phydev);
 4085
 4086			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
 4087			if (phyid != PHY_ID_BCMAC131) {
 4088				phyid &= PHY_BCM_OUI_MASK;
 4089				if (phyid == PHY_BCM_OUI_1 ||
 4090				    phyid == PHY_BCM_OUI_2 ||
 4091				    phyid == PHY_BCM_OUI_3)
 4092					do_low_power = true;
 4093			}
 4094		}
 4095	} else {
 4096		do_low_power = true;
 4097
 4098		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
 4099			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 4100
 4101		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 4102			tg3_setup_phy(tp, false);
 4103	}
 4104
 4105	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 4106		u32 val;
 4107
 4108		val = tr32(GRC_VCPU_EXT_CTRL);
 4109		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
 4110	} else if (!tg3_flag(tp, ENABLE_ASF)) {
 4111		int i;
 4112		u32 val;
 4113
 4114		for (i = 0; i < 200; i++) {
 4115			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
 4116			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 4117				break;
 4118			msleep(1);
 4119		}
 4120	}
 4121	if (tg3_flag(tp, WOL_CAP))
 4122		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
 4123						     WOL_DRV_STATE_SHUTDOWN |
 4124						     WOL_DRV_WOL |
 4125						     WOL_SET_MAGIC_PKT);
 4126
 4127	if (device_should_wake) {
 4128		u32 mac_mode;
 4129
 4130		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 4131			if (do_low_power &&
 4132			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 4133				tg3_phy_auxctl_write(tp,
 4134					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
 4135					       MII_TG3_AUXCTL_PCTL_WOL_EN |
 4136					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 4137					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
 4138				udelay(40);
 4139			}
 4140
 4141			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 4142				mac_mode = MAC_MODE_PORT_MODE_GMII;
 4143			else if (tp->phy_flags &
 4144				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
 4145				if (tp->link_config.active_speed == SPEED_1000)
 4146					mac_mode = MAC_MODE_PORT_MODE_GMII;
 4147				else
 4148					mac_mode = MAC_MODE_PORT_MODE_MII;
 4149			} else
 4150				mac_mode = MAC_MODE_PORT_MODE_MII;
 4151
 4152			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
 4153			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
 4154				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
 4155					     SPEED_100 : SPEED_10;
 4156				if (tg3_5700_link_polarity(tp, speed))
 4157					mac_mode |= MAC_MODE_LINK_POLARITY;
 4158				else
 4159					mac_mode &= ~MAC_MODE_LINK_POLARITY;
 4160			}
 4161		} else {
 4162			mac_mode = MAC_MODE_PORT_MODE_TBI;
 4163		}
 4164
 4165		if (!tg3_flag(tp, 5750_PLUS))
 4166			tw32(MAC_LED_CTRL, tp->led_ctrl);
 4167
 4168		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
 4169		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
 4170		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
 4171			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
 4172
 4173		if (tg3_flag(tp, ENABLE_APE))
 4174			mac_mode |= MAC_MODE_APE_TX_EN |
 4175				    MAC_MODE_APE_RX_EN |
 4176				    MAC_MODE_TDE_ENABLE;
 4177
 4178		tw32_f(MAC_MODE, mac_mode);
 4179		udelay(100);
 4180
 4181		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
 4182		udelay(10);
 4183	}
 4184
 4185	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
 4186	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4187	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
 4188		u32 base_val;
 4189
 4190		base_val = tp->pci_clock_ctrl;
 4191		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
 4192			     CLOCK_CTRL_TXCLK_DISABLE);
 4193
 4194		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
 4195			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
 4196	} else if (tg3_flag(tp, 5780_CLASS) ||
 4197		   tg3_flag(tp, CPMU_PRESENT) ||
 4198		   tg3_asic_rev(tp) == ASIC_REV_5906) {
 4199		/* do nothing */
 4200	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
 4201		u32 newbits1, newbits2;
 4202
 4203		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4204		    tg3_asic_rev(tp) == ASIC_REV_5701) {
 4205			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
 4206				    CLOCK_CTRL_TXCLK_DISABLE |
 4207				    CLOCK_CTRL_ALTCLK);
 4208			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 4209		} else if (tg3_flag(tp, 5705_PLUS)) {
 4210			newbits1 = CLOCK_CTRL_625_CORE;
 4211			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
 4212		} else {
 4213			newbits1 = CLOCK_CTRL_ALTCLK;
 4214			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 4215		}
 4216
 4217		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
 4218			    40);
 4219
 4220		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
 4221			    40);
 4222
 4223		if (!tg3_flag(tp, 5705_PLUS)) {
 4224			u32 newbits3;
 4225
 4226			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4227			    tg3_asic_rev(tp) == ASIC_REV_5701) {
 4228				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
 4229					    CLOCK_CTRL_TXCLK_DISABLE |
 4230					    CLOCK_CTRL_44MHZ_CORE);
 4231			} else {
 4232				newbits3 = CLOCK_CTRL_44MHZ_CORE;
 4233			}
 4234
 4235			tw32_wait_f(TG3PCI_CLOCK_CTRL,
 4236				    tp->pci_clock_ctrl | newbits3, 40);
 4237		}
 4238	}
 4239
 4240	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
 4241		tg3_power_down_phy(tp, do_low_power);
 4242
 4243	tg3_frob_aux_power(tp, true);
 4244
 4245	/* Workaround for unstable PLL clock */
 4246	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
 4247	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
 4248	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
 4249		u32 val = tr32(0x7d00);
 4250
 4251		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
 4252		tw32(0x7d00, val);
 4253		if (!tg3_flag(tp, ENABLE_ASF)) {
 4254			int err;
 4255
 4256			err = tg3_nvram_lock(tp);
 4257			tg3_halt_cpu(tp, RX_CPU_BASE);
 4258			if (!err)
 4259				tg3_nvram_unlock(tp);
 4260		}
 4261	}
 4262
 4263	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
 4264
 4265	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
 4266
 4267	return;
 4268}
 4269
 4270static void tg3_power_down(struct tg3 *tp)
 4271{
 4272	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
 4273	pci_set_power_state(tp->pdev, PCI_D3hot);
 4274}
 4275
 4276static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
 4277{
 4278	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
 4279	case MII_TG3_AUX_STAT_10HALF:
 4280		*speed = SPEED_10;
 4281		*duplex = DUPLEX_HALF;
 4282		break;
 4283
 4284	case MII_TG3_AUX_STAT_10FULL:
 4285		*speed = SPEED_10;
 4286		*duplex = DUPLEX_FULL;
 4287		break;
 4288
 4289	case MII_TG3_AUX_STAT_100HALF:
 4290		*speed = SPEED_100;
 4291		*duplex = DUPLEX_HALF;
 4292		break;
 4293
 4294	case MII_TG3_AUX_STAT_100FULL:
 4295		*speed = SPEED_100;
 4296		*duplex = DUPLEX_FULL;
 4297		break;
 4298
 4299	case MII_TG3_AUX_STAT_1000HALF:
 4300		*speed = SPEED_1000;
 4301		*duplex = DUPLEX_HALF;
 4302		break;
 4303
 4304	case MII_TG3_AUX_STAT_1000FULL:
 4305		*speed = SPEED_1000;
 4306		*duplex = DUPLEX_FULL;
 4307		break;
 4308
 4309	default:
 4310		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 4311			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
 4312				 SPEED_10;
 4313			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
 4314				  DUPLEX_HALF;
 4315			break;
 4316		}
 4317		*speed = SPEED_UNKNOWN;
 4318		*duplex = DUPLEX_UNKNOWN;
 4319		break;
 4320	}
 4321}
 4322
 4323static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
 4324{
 4325	int err = 0;
 4326	u32 val, new_adv;
 4327
 4328	new_adv = ADVERTISE_CSMA;
 4329	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
 4330	new_adv |= mii_advertise_flowctrl(flowctrl);
 4331
 4332	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
 4333	if (err)
 4334		goto done;
 4335
 4336	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4337		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 4338
 4339		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
 4340		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
 4341			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 4342
 4343		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
 4344		if (err)
 4345			goto done;
 4346	}
 4347
 4348	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 4349		goto done;
 4350
 4351	tw32(TG3_CPMU_EEE_MODE,
 4352	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 4353
 4354	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
 4355	if (!err) {
 4356		u32 err2;
 4357
 4358		if (!tp->eee.eee_enabled)
 4359			val = 0;
 4360		else
 4361			val = ethtool_adv_to_mmd_eee_adv_t(advertise);
 4362
 4363		mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
 4364		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
 4365		if (err)
 4366			val = 0;
 4367
 4368		switch (tg3_asic_rev(tp)) {
 4369		case ASIC_REV_5717:
 4370		case ASIC_REV_57765:
 4371		case ASIC_REV_57766:
 4372		case ASIC_REV_5719:
 4373			/* If we advertised any eee advertisements above... */
 4374			if (val)
 4375				val = MII_TG3_DSP_TAP26_ALNOKO |
 4376				      MII_TG3_DSP_TAP26_RMRXSTO |
 4377				      MII_TG3_DSP_TAP26_OPCSINPT;
 4378			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 4379			fallthrough;
 4380		case ASIC_REV_5720:
 4381		case ASIC_REV_5762:
 4382			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
 4383				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
 4384						 MII_TG3_DSP_CH34TP2_HIBW01);
 4385		}
 4386
 4387		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
 4388		if (!err)
 4389			err = err2;
 4390	}
 4391
 4392done:
 4393	return err;
 4394}
 4395
 4396static void tg3_phy_copper_begin(struct tg3 *tp)
 4397{
 4398	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
 4399	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4400		u32 adv, fc;
 4401
 4402		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
 4403		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
 4404			adv = ADVERTISED_10baseT_Half |
 4405			      ADVERTISED_10baseT_Full;
 4406			if (tg3_flag(tp, WOL_SPEED_100MB))
 4407				adv |= ADVERTISED_100baseT_Half |
 4408				       ADVERTISED_100baseT_Full;
 4409			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
 4410				if (!(tp->phy_flags &
 4411				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
 4412					adv |= ADVERTISED_1000baseT_Half;
 4413				adv |= ADVERTISED_1000baseT_Full;
 4414			}
 4415
 4416			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
 4417		} else {
 4418			adv = tp->link_config.advertising;
 4419			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 4420				adv &= ~(ADVERTISED_1000baseT_Half |
 4421					 ADVERTISED_1000baseT_Full);
 4422
 4423			fc = tp->link_config.flowctrl;
 4424		}
 4425
 4426		tg3_phy_autoneg_cfg(tp, adv, fc);
 4427
 4428		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
 4429		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
 4430			/* Normally during power down we want to autonegotiate
 4431			 * the lowest possible speed for WOL. However, to avoid
 4432			 * link flap, we leave it untouched.
 4433			 */
 4434			return;
 4435		}
 4436
 4437		tg3_writephy(tp, MII_BMCR,
 4438			     BMCR_ANENABLE | BMCR_ANRESTART);
 4439	} else {
 4440		int i;
 4441		u32 bmcr, orig_bmcr;
 4442
 4443		tp->link_config.active_speed = tp->link_config.speed;
 4444		tp->link_config.active_duplex = tp->link_config.duplex;
 4445
 4446		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 4447			/* With autoneg disabled, 5715 only links up when the
 4448			 * advertisement register has the configured speed
 4449			 * enabled.
 4450			 */
 4451			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
 4452		}
 4453
 4454		bmcr = 0;
 4455		switch (tp->link_config.speed) {
 4456		default:
 4457		case SPEED_10:
 4458			break;
 4459
 4460		case SPEED_100:
 4461			bmcr |= BMCR_SPEED100;
 4462			break;
 4463
 4464		case SPEED_1000:
 4465			bmcr |= BMCR_SPEED1000;
 4466			break;
 4467		}
 4468
 4469		if (tp->link_config.duplex == DUPLEX_FULL)
 4470			bmcr |= BMCR_FULLDPLX;
 4471
 4472		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
 4473		    (bmcr != orig_bmcr)) {
 4474			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
 4475			for (i = 0; i < 1500; i++) {
 4476				u32 tmp;
 4477
 4478				udelay(10);
 4479				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
 4480				    tg3_readphy(tp, MII_BMSR, &tmp))
 4481					continue;
 4482				if (!(tmp & BMSR_LSTATUS)) {
 4483					udelay(40);
 4484					break;
 4485				}
 4486			}
 4487			tg3_writephy(tp, MII_BMCR, bmcr);
 4488			udelay(40);
 4489		}
 4490	}
 4491}
 4492
 4493static int tg3_phy_pull_config(struct tg3 *tp)
 4494{
 4495	int err;
 4496	u32 val;
 4497
 4498	err = tg3_readphy(tp, MII_BMCR, &val);
 4499	if (err)
 4500		goto done;
 4501
 4502	if (!(val & BMCR_ANENABLE)) {
 4503		tp->link_config.autoneg = AUTONEG_DISABLE;
 4504		tp->link_config.advertising = 0;
 4505		tg3_flag_clear(tp, PAUSE_AUTONEG);
 4506
 4507		err = -EIO;
 4508
 4509		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
 4510		case 0:
 4511			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 4512				goto done;
 4513
 4514			tp->link_config.speed = SPEED_10;
 4515			break;
 4516		case BMCR_SPEED100:
 4517			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 4518				goto done;
 4519
 4520			tp->link_config.speed = SPEED_100;
 4521			break;
 4522		case BMCR_SPEED1000:
 4523			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4524				tp->link_config.speed = SPEED_1000;
 4525				break;
 4526			}
 4527			fallthrough;
 4528		default:
 4529			goto done;
 4530		}
 4531
 4532		if (val & BMCR_FULLDPLX)
 4533			tp->link_config.duplex = DUPLEX_FULL;
 4534		else
 4535			tp->link_config.duplex = DUPLEX_HALF;
 4536
 4537		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
 4538
 4539		err = 0;
 4540		goto done;
 4541	}
 4542
 4543	tp->link_config.autoneg = AUTONEG_ENABLE;
 4544	tp->link_config.advertising = ADVERTISED_Autoneg;
 4545	tg3_flag_set(tp, PAUSE_AUTONEG);
 4546
 4547	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 4548		u32 adv;
 4549
 4550		err = tg3_readphy(tp, MII_ADVERTISE, &val);
 4551		if (err)
 4552			goto done;
 4553
 4554		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
 4555		tp->link_config.advertising |= adv | ADVERTISED_TP;
 4556
 4557		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
 4558	} else {
 4559		tp->link_config.advertising |= ADVERTISED_FIBRE;
 4560	}
 4561
 4562	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4563		u32 adv;
 4564
 4565		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
 4566			err = tg3_readphy(tp, MII_CTRL1000, &val);
 4567			if (err)
 4568				goto done;
 4569
 4570			adv = mii_ctrl1000_to_ethtool_adv_t(val);
 4571		} else {
 4572			err = tg3_readphy(tp, MII_ADVERTISE, &val);
 4573			if (err)
 4574				goto done;
 4575
 4576			adv = tg3_decode_flowctrl_1000X(val);
 4577			tp->link_config.flowctrl = adv;
 4578
 4579			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
 4580			adv = mii_adv_to_ethtool_adv_x(val);
 4581		}
 4582
 4583		tp->link_config.advertising |= adv;
 4584	}
 4585
 4586done:
 4587	return err;
 4588}
 4589
 4590static int tg3_init_5401phy_dsp(struct tg3 *tp)
 4591{
 4592	int err;
 4593
 4594	/* Turn off tap power management. */
 4595	/* Set Extended packet length bit */
 4596	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 4597
 4598	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
 4599	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
 4600	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
 4601	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
 4602	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
 4603
 4604	udelay(40);
 4605
 4606	return err;
 4607}
 4608
 4609static bool tg3_phy_eee_config_ok(struct tg3 *tp)
 4610{
 4611	struct ethtool_keee eee = {};
 4612
 4613	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 4614		return true;
 4615
 4616	tg3_eee_pull_config(tp, &eee);
 4617
 4618	if (tp->eee.eee_enabled) {
 4619		if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
 4620		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
 4621		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
 4622			return false;
 4623	} else {
 4624		/* EEE is disabled but we're advertising */
 4625		if (!linkmode_empty(eee.advertised))
 4626			return false;
 4627	}
 4628
 4629	return true;
 4630}
 4631
 4632static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 4633{
 4634	u32 advmsk, tgtadv, advertising;
 4635
 4636	advertising = tp->link_config.advertising;
 4637	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 4638
 4639	advmsk = ADVERTISE_ALL;
 4640	if (tp->link_config.active_duplex == DUPLEX_FULL) {
 4641		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
 4642		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 4643	}
 4644
 4645	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
 4646		return false;
 4647
 4648	if ((*lcladv & advmsk) != tgtadv)
 4649		return false;
 4650
 4651	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4652		u32 tg3_ctrl;
 4653
 4654		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 4655
 4656		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
 4657			return false;
 4658
 4659		if (tgtadv &&
 4660		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
 4661		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
 4662			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 4663			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
 4664				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 4665		} else {
 4666			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 4667		}
 4668
 4669		if (tg3_ctrl != tgtadv)
 4670			return false;
 4671	}
 4672
 4673	return true;
 4674}
 4675
 4676static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 4677{
 4678	u32 lpeth = 0;
 4679
 4680	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4681		u32 val;
 4682
 4683		if (tg3_readphy(tp, MII_STAT1000, &val))
 4684			return false;
 4685
 4686		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
 4687	}
 4688
 4689	if (tg3_readphy(tp, MII_LPA, rmtadv))
 4690		return false;
 4691
 4692	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
 4693	tp->link_config.rmt_adv = lpeth;
 4694
 4695	return true;
 4696}
 4697
 4698static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
 4699{
 4700	if (curr_link_up != tp->link_up) {
 4701		if (curr_link_up) {
 4702			netif_carrier_on(tp->dev);
 4703		} else {
 4704			netif_carrier_off(tp->dev);
 4705			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 4706				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 4707		}
 4708
 4709		tg3_link_report(tp);
 4710		return true;
 4711	}
 4712
 4713	return false;
 4714}
 4715
 4716static void tg3_clear_mac_status(struct tg3 *tp)
 4717{
 4718	tw32(MAC_EVENT, 0);
 4719
 4720	tw32_f(MAC_STATUS,
 4721	       MAC_STATUS_SYNC_CHANGED |
 4722	       MAC_STATUS_CFG_CHANGED |
 4723	       MAC_STATUS_MI_COMPLETION |
 4724	       MAC_STATUS_LNKSTATE_CHANGED);
 4725	udelay(40);
 4726}
 4727
 4728static void tg3_setup_eee(struct tg3 *tp)
 4729{
 4730	u32 val;
 4731
 4732	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
 4733	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
 4734	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
 4735		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
 4736
 4737	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
 4738
 4739	tw32_f(TG3_CPMU_EEE_CTRL,
 4740	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
 4741
 4742	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
 4743	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
 4744	      TG3_CPMU_EEEMD_LPI_IN_RX |
 4745	      TG3_CPMU_EEEMD_EEE_ENABLE;
 4746
 4747	if (tg3_asic_rev(tp) != ASIC_REV_5717)
 4748		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
 4749
 4750	if (tg3_flag(tp, ENABLE_APE))
 4751		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
 4752
 4753	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
 4754
 4755	tw32_f(TG3_CPMU_EEE_DBTMR1,
 4756	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
 4757	       (tp->eee.tx_lpi_timer & 0xffff));
 4758
 4759	tw32_f(TG3_CPMU_EEE_DBTMR2,
 4760	       TG3_CPMU_DBTMR2_APE_TX_2047US |
 4761	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
 4762}
 4763
 4764static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
 4765{
 4766	bool current_link_up;
 4767	u32 bmsr, val;
 4768	u32 lcl_adv, rmt_adv;
 4769	u32 current_speed;
 4770	u8 current_duplex;
 4771	int i, err;
 4772
 4773	tg3_clear_mac_status(tp);
 4774
 4775	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 4776		tw32_f(MAC_MI_MODE,
 4777		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 4778		udelay(80);
 4779	}
 4780
 4781	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
 4782
 4783	/* Some third-party PHYs need to be reset on link going
 4784	 * down.
 4785	 */
 4786	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
 4787	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
 4788	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
 4789	    tp->link_up) {
 4790		tg3_readphy(tp, MII_BMSR, &bmsr);
 4791		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4792		    !(bmsr & BMSR_LSTATUS))
 4793			force_reset = true;
 4794	}
 4795	if (force_reset)
 4796		tg3_phy_reset(tp);
 4797
 4798	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 4799		tg3_readphy(tp, MII_BMSR, &bmsr);
 4800		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
 4801		    !tg3_flag(tp, INIT_COMPLETE))
 4802			bmsr = 0;
 4803
 4804		if (!(bmsr & BMSR_LSTATUS)) {
 4805			err = tg3_init_5401phy_dsp(tp);
 4806			if (err)
 4807				return err;
 4808
 4809			tg3_readphy(tp, MII_BMSR, &bmsr);
 4810			for (i = 0; i < 1000; i++) {
 4811				udelay(10);
 4812				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4813				    (bmsr & BMSR_LSTATUS)) {
 4814					udelay(40);
 4815					break;
 4816				}
 4817			}
 4818
 4819			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
 4820			    TG3_PHY_REV_BCM5401_B0 &&
 4821			    !(bmsr & BMSR_LSTATUS) &&
 4822			    tp->link_config.active_speed == SPEED_1000) {
 4823				err = tg3_phy_reset(tp);
 4824				if (!err)
 4825					err = tg3_init_5401phy_dsp(tp);
 4826				if (err)
 4827					return err;
 4828			}
 4829		}
 4830	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
 4831		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
 4832		/* 5701 {A0,B0} CRC bug workaround */
 4833		tg3_writephy(tp, 0x15, 0x0a75);
 4834		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 4835		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 4836		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 4837	}
 4838
 4839	/* Clear pending interrupts... */
 4840	tg3_readphy(tp, MII_TG3_ISTAT, &val);
 4841	tg3_readphy(tp, MII_TG3_ISTAT, &val);
 4842
 4843	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
 4844		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
 4845	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
 4846		tg3_writephy(tp, MII_TG3_IMASK, ~0);
 4847
 4848	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
 4849	    tg3_asic_rev(tp) == ASIC_REV_5701) {
 4850		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
 4851			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 4852				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 4853		else
 4854			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
 4855	}
 4856
 4857	current_link_up = false;
 4858	current_speed = SPEED_UNKNOWN;
 4859	current_duplex = DUPLEX_UNKNOWN;
 4860	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
 4861	tp->link_config.rmt_adv = 0;
 4862
 4863	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 4864		err = tg3_phy_auxctl_read(tp,
 4865					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 4866					  &val);
 4867		if (!err && !(val & (1 << 10))) {
 4868			tg3_phy_auxctl_write(tp,
 4869					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 4870					     val | (1 << 10));
 4871			goto relink;
 4872		}
 4873	}
 4874
 4875	bmsr = 0;
 4876	for (i = 0; i < 100; i++) {
 4877		tg3_readphy(tp, MII_BMSR, &bmsr);
 4878		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4879		    (bmsr & BMSR_LSTATUS))
 4880			break;
 4881		udelay(40);
 4882	}
 4883
 4884	if (bmsr & BMSR_LSTATUS) {
 4885		u32 aux_stat, bmcr;
 4886
 4887		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
 4888		for (i = 0; i < 2000; i++) {
 4889			udelay(10);
 4890			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
 4891			    aux_stat)
 4892				break;
 4893		}
 4894
 4895		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
 4896					     &current_speed,
 4897					     &current_duplex);
 4898
 4899		bmcr = 0;
 4900		for (i = 0; i < 200; i++) {
 4901			tg3_readphy(tp, MII_BMCR, &bmcr);
 4902			if (tg3_readphy(tp, MII_BMCR, &bmcr))
 4903				continue;
 4904			if (bmcr && bmcr != 0x7fff)
 4905				break;
 4906			udelay(10);
 4907		}
 4908
 4909		lcl_adv = 0;
 4910		rmt_adv = 0;
 4911
 4912		tp->link_config.active_speed = current_speed;
 4913		tp->link_config.active_duplex = current_duplex;
 4914
 4915		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 4916			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
 4917
 4918			if ((bmcr & BMCR_ANENABLE) &&
 4919			    eee_config_ok &&
 4920			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
 4921			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
 4922				current_link_up = true;
 4923
 4924			/* EEE settings changes take effect only after a phy
 4925			 * reset.  If we have skipped a reset due to Link Flap
 4926			 * Avoidance being enabled, do it now.
 4927			 */
 4928			if (!eee_config_ok &&
 4929			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
 4930			    !force_reset) {
 4931				tg3_setup_eee(tp);
 4932				tg3_phy_reset(tp);
 4933			}
 4934		} else {
 4935			if (!(bmcr & BMCR_ANENABLE) &&
 4936			    tp->link_config.speed == current_speed &&
 4937			    tp->link_config.duplex == current_duplex) {
 4938				current_link_up = true;
 4939			}
 4940		}
 4941
 4942		if (current_link_up &&
 4943		    tp->link_config.active_duplex == DUPLEX_FULL) {
 4944			u32 reg, bit;
 4945
 4946			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 4947				reg = MII_TG3_FET_GEN_STAT;
 4948				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
 4949			} else {
 4950				reg = MII_TG3_EXT_STAT;
 4951				bit = MII_TG3_EXT_STAT_MDIX;
 4952			}
 4953
 4954			if (!tg3_readphy(tp, reg, &val) && (val & bit))
 4955				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
 4956
 4957			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 4958		}
 4959	}
 4960
 4961relink:
 4962	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4963		tg3_phy_copper_begin(tp);
 4964
 4965		if (tg3_flag(tp, ROBOSWITCH)) {
 4966			current_link_up = true;
 4967			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
 4968			current_speed = SPEED_1000;
 4969			current_duplex = DUPLEX_FULL;
 4970			tp->link_config.active_speed = current_speed;
 4971			tp->link_config.active_duplex = current_duplex;
 4972		}
 4973
 4974		tg3_readphy(tp, MII_BMSR, &bmsr);
 4975		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
 4976		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 4977			current_link_up = true;
 4978	}
 4979
 4980	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
 4981	if (current_link_up) {
 4982		if (tp->link_config.active_speed == SPEED_100 ||
 4983		    tp->link_config.active_speed == SPEED_10)
 4984			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 4985		else
 4986			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 4987	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 4988		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 4989	else
 4990		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 4991
 4992	/* In order for the 5750 core in BCM4785 chip to work properly
 4993	 * in RGMII mode, the Led Control Register must be set up.
 4994	 */
 4995	if (tg3_flag(tp, RGMII_MODE)) {
 4996		u32 led_ctrl = tr32(MAC_LED_CTRL);
 4997		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
 4998
 4999		if (tp->link_config.active_speed == SPEED_10)
 5000			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
 5001		else if (tp->link_config.active_speed == SPEED_100)
 5002			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
 5003				     LED_CTRL_100MBPS_ON);
 5004		else if (tp->link_config.active_speed == SPEED_1000)
 5005			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
 5006				     LED_CTRL_1000MBPS_ON);
 5007
 5008		tw32(MAC_LED_CTRL, led_ctrl);
 5009		udelay(40);
 5010	}
 5011
 5012	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 5013	if (tp->link_config.active_duplex == DUPLEX_HALF)
 5014		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 5015
 5016	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
 5017		if (current_link_up &&
 5018		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
 5019			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 5020		else
 5021			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 5022	}
 5023
 5024	/* ??? Without this setting Netgear GA302T PHY does not
 5025	 * ??? send/receive packets...
 5026	 */
 5027	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
 5028	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
 5029		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
 5030		tw32_f(MAC_MI_MODE, tp->mi_mode);
 5031		udelay(80);
 5032	}
 5033
 5034	tw32_f(MAC_MODE, tp->mac_mode);
 5035	udelay(40);
 5036
 5037	tg3_phy_eee_adjust(tp, current_link_up);
 5038
 5039	if (tg3_flag(tp, USE_LINKCHG_REG)) {
 5040		/* Polled via timer. */
 5041		tw32_f(MAC_EVENT, 0);
 5042	} else {
 5043		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5044	}
 5045	udelay(40);
 5046
 5047	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
 5048	    current_link_up &&
 5049	    tp->link_config.active_speed == SPEED_1000 &&
 5050	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
 5051		udelay(120);
 5052		tw32_f(MAC_STATUS,
 5053		     (MAC_STATUS_SYNC_CHANGED |
 5054		      MAC_STATUS_CFG_CHANGED));
 5055		udelay(40);
 5056		tg3_write_mem(tp,
 5057			      NIC_SRAM_FIRMWARE_MBOX,
 5058			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
 5059	}
 5060
 5061	/* Prevent send BD corruption. */
 5062	if (tg3_flag(tp, CLKREQ_BUG)) {
 5063		if (tp->link_config.active_speed == SPEED_100 ||
 5064		    tp->link_config.active_speed == SPEED_10)
 5065			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
 5066						   PCI_EXP_LNKCTL_CLKREQ_EN);
 5067		else
 5068			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
 5069						 PCI_EXP_LNKCTL_CLKREQ_EN);
 5070	}
 5071
 5072	tg3_test_and_report_link_chg(tp, current_link_up);
 5073
 5074	return 0;
 5075}
 5076
 5077struct tg3_fiber_aneginfo {
 5078	int state;
 5079#define ANEG_STATE_UNKNOWN		0
 5080#define ANEG_STATE_AN_ENABLE		1
 5081#define ANEG_STATE_RESTART_INIT		2
 5082#define ANEG_STATE_RESTART		3
 5083#define ANEG_STATE_DISABLE_LINK_OK	4
 5084#define ANEG_STATE_ABILITY_DETECT_INIT	5
 5085#define ANEG_STATE_ABILITY_DETECT	6
 5086#define ANEG_STATE_ACK_DETECT_INIT	7
 5087#define ANEG_STATE_ACK_DETECT		8
 5088#define ANEG_STATE_COMPLETE_ACK_INIT	9
 5089#define ANEG_STATE_COMPLETE_ACK		10
 5090#define ANEG_STATE_IDLE_DETECT_INIT	11
 5091#define ANEG_STATE_IDLE_DETECT		12
 5092#define ANEG_STATE_LINK_OK		13
 5093#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
 5094#define ANEG_STATE_NEXT_PAGE_WAIT	15
 5095
 5096	u32 flags;
 5097#define MR_AN_ENABLE		0x00000001
 5098#define MR_RESTART_AN		0x00000002
 5099#define MR_AN_COMPLETE		0x00000004
 5100#define MR_PAGE_RX		0x00000008
 5101#define MR_NP_LOADED		0x00000010
 5102#define MR_TOGGLE_TX		0x00000020
 5103#define MR_LP_ADV_FULL_DUPLEX	0x00000040
 5104#define MR_LP_ADV_HALF_DUPLEX	0x00000080
 5105#define MR_LP_ADV_SYM_PAUSE	0x00000100
 5106#define MR_LP_ADV_ASYM_PAUSE	0x00000200
 5107#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
 5108#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
 5109#define MR_LP_ADV_NEXT_PAGE	0x00001000
 5110#define MR_TOGGLE_RX		0x00002000
 5111#define MR_NP_RX		0x00004000
 5112
 5113#define MR_LINK_OK		0x80000000
 5114
 5115	unsigned long link_time, cur_time;
 5116
 5117	u32 ability_match_cfg;
 5118	int ability_match_count;
 5119
 5120	char ability_match, idle_match, ack_match;
 5121
 5122	u32 txconfig, rxconfig;
 5123#define ANEG_CFG_NP		0x00000080
 5124#define ANEG_CFG_ACK		0x00000040
 5125#define ANEG_CFG_RF2		0x00000020
 5126#define ANEG_CFG_RF1		0x00000010
 5127#define ANEG_CFG_PS2		0x00000001
 5128#define ANEG_CFG_PS1		0x00008000
 5129#define ANEG_CFG_HD		0x00004000
 5130#define ANEG_CFG_FD		0x00002000
 5131#define ANEG_CFG_INVAL		0x00001f06
 5132
 5133};
 5134#define ANEG_OK		0
 5135#define ANEG_DONE	1
 5136#define ANEG_TIMER_ENAB	2
 5137#define ANEG_FAILED	-1
 5138
 5139#define ANEG_STATE_SETTLE_TIME	10000
 5140
 5141static int tg3_fiber_aneg_smachine(struct tg3 *tp,
 5142				   struct tg3_fiber_aneginfo *ap)
 5143{
 5144	u16 flowctrl;
 5145	unsigned long delta;
 5146	u32 rx_cfg_reg;
 5147	int ret;
 5148
 5149	if (ap->state == ANEG_STATE_UNKNOWN) {
 5150		ap->rxconfig = 0;
 5151		ap->link_time = 0;
 5152		ap->cur_time = 0;
 5153		ap->ability_match_cfg = 0;
 5154		ap->ability_match_count = 0;
 5155		ap->ability_match = 0;
 5156		ap->idle_match = 0;
 5157		ap->ack_match = 0;
 5158	}
 5159	ap->cur_time++;
 5160
 5161	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
 5162		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
 5163
 5164		if (rx_cfg_reg != ap->ability_match_cfg) {
 5165			ap->ability_match_cfg = rx_cfg_reg;
 5166			ap->ability_match = 0;
 5167			ap->ability_match_count = 0;
 5168		} else {
 5169			if (++ap->ability_match_count > 1) {
 5170				ap->ability_match = 1;
 5171				ap->ability_match_cfg = rx_cfg_reg;
 5172			}
 5173		}
 5174		if (rx_cfg_reg & ANEG_CFG_ACK)
 5175			ap->ack_match = 1;
 5176		else
 5177			ap->ack_match = 0;
 5178
 5179		ap->idle_match = 0;
 5180	} else {
 5181		ap->idle_match = 1;
 5182		ap->ability_match_cfg = 0;
 5183		ap->ability_match_count = 0;
 5184		ap->ability_match = 0;
 5185		ap->ack_match = 0;
 5186
 5187		rx_cfg_reg = 0;
 5188	}
 5189
 5190	ap->rxconfig = rx_cfg_reg;
 5191	ret = ANEG_OK;
 5192
 5193	switch (ap->state) {
 5194	case ANEG_STATE_UNKNOWN:
 5195		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
 5196			ap->state = ANEG_STATE_AN_ENABLE;
 5197
 5198		fallthrough;
 5199	case ANEG_STATE_AN_ENABLE:
 5200		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
 5201		if (ap->flags & MR_AN_ENABLE) {
 5202			ap->link_time = 0;
 5203			ap->cur_time = 0;
 5204			ap->ability_match_cfg = 0;
 5205			ap->ability_match_count = 0;
 5206			ap->ability_match = 0;
 5207			ap->idle_match = 0;
 5208			ap->ack_match = 0;
 5209
 5210			ap->state = ANEG_STATE_RESTART_INIT;
 5211		} else {
 5212			ap->state = ANEG_STATE_DISABLE_LINK_OK;
 5213		}
 5214		break;
 5215
 5216	case ANEG_STATE_RESTART_INIT:
 5217		ap->link_time = ap->cur_time;
 5218		ap->flags &= ~(MR_NP_LOADED);
 5219		ap->txconfig = 0;
 5220		tw32(MAC_TX_AUTO_NEG, 0);
 5221		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 5222		tw32_f(MAC_MODE, tp->mac_mode);
 5223		udelay(40);
 5224
 5225		ret = ANEG_TIMER_ENAB;
 5226		ap->state = ANEG_STATE_RESTART;
 5227
 5228		fallthrough;
 5229	case ANEG_STATE_RESTART:
 5230		delta = ap->cur_time - ap->link_time;
 5231		if (delta > ANEG_STATE_SETTLE_TIME)
 5232			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
 5233		else
 5234			ret = ANEG_TIMER_ENAB;
 5235		break;
 5236
 5237	case ANEG_STATE_DISABLE_LINK_OK:
 5238		ret = ANEG_DONE;
 5239		break;
 5240
 5241	case ANEG_STATE_ABILITY_DETECT_INIT:
 5242		ap->flags &= ~(MR_TOGGLE_TX);
 5243		ap->txconfig = ANEG_CFG_FD;
 5244		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5245		if (flowctrl & ADVERTISE_1000XPAUSE)
 5246			ap->txconfig |= ANEG_CFG_PS1;
 5247		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 5248			ap->txconfig |= ANEG_CFG_PS2;
 5249		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 5250		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 5251		tw32_f(MAC_MODE, tp->mac_mode);
 5252		udelay(40);
 5253
 5254		ap->state = ANEG_STATE_ABILITY_DETECT;
 5255		break;
 5256
 5257	case ANEG_STATE_ABILITY_DETECT:
 5258		if (ap->ability_match != 0 && ap->rxconfig != 0)
 5259			ap->state = ANEG_STATE_ACK_DETECT_INIT;
 5260		break;
 5261
 5262	case ANEG_STATE_ACK_DETECT_INIT:
 5263		ap->txconfig |= ANEG_CFG_ACK;
 5264		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 5265		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 5266		tw32_f(MAC_MODE, tp->mac_mode);
 5267		udelay(40);
 5268
 5269		ap->state = ANEG_STATE_ACK_DETECT;
 5270
 5271		fallthrough;
 5272	case ANEG_STATE_ACK_DETECT:
 5273		if (ap->ack_match != 0) {
 5274			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
 5275			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
 5276				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
 5277			} else {
 5278				ap->state = ANEG_STATE_AN_ENABLE;
 5279			}
 5280		} else if (ap->ability_match != 0 &&
 5281			   ap->rxconfig == 0) {
 5282			ap->state = ANEG_STATE_AN_ENABLE;
 5283		}
 5284		break;
 5285
 5286	case ANEG_STATE_COMPLETE_ACK_INIT:
 5287		if (ap->rxconfig & ANEG_CFG_INVAL) {
 5288			ret = ANEG_FAILED;
 5289			break;
 5290		}
 5291		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
 5292			       MR_LP_ADV_HALF_DUPLEX |
 5293			       MR_LP_ADV_SYM_PAUSE |
 5294			       MR_LP_ADV_ASYM_PAUSE |
 5295			       MR_LP_ADV_REMOTE_FAULT1 |
 5296			       MR_LP_ADV_REMOTE_FAULT2 |
 5297			       MR_LP_ADV_NEXT_PAGE |
 5298			       MR_TOGGLE_RX |
 5299			       MR_NP_RX);
 5300		if (ap->rxconfig & ANEG_CFG_FD)
 5301			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
 5302		if (ap->rxconfig & ANEG_CFG_HD)
 5303			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
 5304		if (ap->rxconfig & ANEG_CFG_PS1)
 5305			ap->flags |= MR_LP_ADV_SYM_PAUSE;
 5306		if (ap->rxconfig & ANEG_CFG_PS2)
 5307			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
 5308		if (ap->rxconfig & ANEG_CFG_RF1)
 5309			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
 5310		if (ap->rxconfig & ANEG_CFG_RF2)
 5311			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
 5312		if (ap->rxconfig & ANEG_CFG_NP)
 5313			ap->flags |= MR_LP_ADV_NEXT_PAGE;
 5314
 5315		ap->link_time = ap->cur_time;
 5316
 5317		ap->flags ^= (MR_TOGGLE_TX);
 5318		if (ap->rxconfig & 0x0008)
 5319			ap->flags |= MR_TOGGLE_RX;
 5320		if (ap->rxconfig & ANEG_CFG_NP)
 5321			ap->flags |= MR_NP_RX;
 5322		ap->flags |= MR_PAGE_RX;
 5323
 5324		ap->state = ANEG_STATE_COMPLETE_ACK;
 5325		ret = ANEG_TIMER_ENAB;
 5326		break;
 5327
 5328	case ANEG_STATE_COMPLETE_ACK:
 5329		if (ap->ability_match != 0 &&
 5330		    ap->rxconfig == 0) {
 5331			ap->state = ANEG_STATE_AN_ENABLE;
 5332			break;
 5333		}
 5334		delta = ap->cur_time - ap->link_time;
 5335		if (delta > ANEG_STATE_SETTLE_TIME) {
 5336			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
 5337				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 5338			} else {
 5339				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
 5340				    !(ap->flags & MR_NP_RX)) {
 5341					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 5342				} else {
 5343					ret = ANEG_FAILED;
 5344				}
 5345			}
 5346		}
 5347		break;
 5348
 5349	case ANEG_STATE_IDLE_DETECT_INIT:
 5350		ap->link_time = ap->cur_time;
 5351		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 5352		tw32_f(MAC_MODE, tp->mac_mode);
 5353		udelay(40);
 5354
 5355		ap->state = ANEG_STATE_IDLE_DETECT;
 5356		ret = ANEG_TIMER_ENAB;
 5357		break;
 5358
 5359	case ANEG_STATE_IDLE_DETECT:
 5360		if (ap->ability_match != 0 &&
 5361		    ap->rxconfig == 0) {
 5362			ap->state = ANEG_STATE_AN_ENABLE;
 5363			break;
 5364		}
 5365		delta = ap->cur_time - ap->link_time;
 5366		if (delta > ANEG_STATE_SETTLE_TIME) {
 5367			/* XXX another gem from the Broadcom driver :( */
 5368			ap->state = ANEG_STATE_LINK_OK;
 5369		}
 5370		break;
 5371
 5372	case ANEG_STATE_LINK_OK:
 5373		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
 5374		ret = ANEG_DONE;
 5375		break;
 5376
 5377	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
 5378		/* ??? unimplemented */
 5379		break;
 5380
 5381	case ANEG_STATE_NEXT_PAGE_WAIT:
 5382		/* ??? unimplemented */
 5383		break;
 5384
 5385	default:
 5386		ret = ANEG_FAILED;
 5387		break;
 5388	}
 5389
 5390	return ret;
 5391}
 5392
 5393static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
 5394{
 5395	int res = 0;
 5396	struct tg3_fiber_aneginfo aninfo;
 5397	int status = ANEG_FAILED;
 5398	unsigned int tick;
 5399	u32 tmp;
 5400
 5401	tw32_f(MAC_TX_AUTO_NEG, 0);
 5402
 5403	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
 5404	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
 5405	udelay(40);
 5406
 5407	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
 5408	udelay(40);
 5409
 5410	memset(&aninfo, 0, sizeof(aninfo));
 5411	aninfo.flags |= MR_AN_ENABLE;
 5412	aninfo.state = ANEG_STATE_UNKNOWN;
 5413	aninfo.cur_time = 0;
 5414	tick = 0;
 5415	while (++tick < 195000) {
 5416		status = tg3_fiber_aneg_smachine(tp, &aninfo);
 5417		if (status == ANEG_DONE || status == ANEG_FAILED)
 5418			break;
 5419
 5420		udelay(1);
 5421	}
 5422
 5423	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 5424	tw32_f(MAC_MODE, tp->mac_mode);
 5425	udelay(40);
 5426
 5427	*txflags = aninfo.txconfig;
 5428	*rxflags = aninfo.flags;
 5429
 5430	if (status == ANEG_DONE &&
 5431	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
 5432			     MR_LP_ADV_FULL_DUPLEX)))
 5433		res = 1;
 5434
 5435	return res;
 5436}
 5437
 5438static void tg3_init_bcm8002(struct tg3 *tp)
 5439{
 5440	u32 mac_status = tr32(MAC_STATUS);
 5441	int i;
 5442
 5443	/* Reset when initting first time or we have a link. */
 5444	if (tg3_flag(tp, INIT_COMPLETE) &&
 5445	    !(mac_status & MAC_STATUS_PCS_SYNCED))
 5446		return;
 5447
 5448	/* Set PLL lock range. */
 5449	tg3_writephy(tp, 0x16, 0x8007);
 5450
 5451	/* SW reset */
 5452	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
 5453
 5454	/* Wait for reset to complete. */
 5455	/* XXX schedule_timeout() ... */
 5456	for (i = 0; i < 500; i++)
 5457		udelay(10);
 5458
 5459	/* Config mode; select PMA/Ch 1 regs. */
 5460	tg3_writephy(tp, 0x10, 0x8411);
 5461
 5462	/* Enable auto-lock and comdet, select txclk for tx. */
 5463	tg3_writephy(tp, 0x11, 0x0a10);
 5464
 5465	tg3_writephy(tp, 0x18, 0x00a0);
 5466	tg3_writephy(tp, 0x16, 0x41ff);
 5467
 5468	/* Assert and deassert POR. */
 5469	tg3_writephy(tp, 0x13, 0x0400);
 5470	udelay(40);
 5471	tg3_writephy(tp, 0x13, 0x0000);
 5472
 5473	tg3_writephy(tp, 0x11, 0x0a50);
 5474	udelay(40);
 5475	tg3_writephy(tp, 0x11, 0x0a10);
 5476
 5477	/* Wait for signal to stabilize */
 5478	/* XXX schedule_timeout() ... */
 5479	for (i = 0; i < 15000; i++)
 5480		udelay(10);
 5481
 5482	/* Deselect the channel register so we can read the PHYID
 5483	 * later.
 5484	 */
 5485	tg3_writephy(tp, 0x10, 0x8011);
 5486}
 5487
 5488static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
 5489{
 5490	u16 flowctrl;
 5491	bool current_link_up;
 5492	u32 sg_dig_ctrl, sg_dig_status;
 5493	u32 serdes_cfg, expected_sg_dig_ctrl;
 5494	int workaround, port_a;
 5495
 5496	serdes_cfg = 0;
 5497	workaround = 0;
 5498	port_a = 1;
 5499	current_link_up = false;
 5500
 5501	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
 5502	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
 5503		workaround = 1;
 5504		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 5505			port_a = 0;
 5506
 5507		/* preserve bits 0-11,13,14 for signal pre-emphasis */
 5508		/* preserve bits 20-23 for voltage regulator */
 5509		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
 5510	}
 5511
 5512	sg_dig_ctrl = tr32(SG_DIG_CTRL);
 5513
 5514	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
 5515		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
 5516			if (workaround) {
 5517				u32 val = serdes_cfg;
 5518
 5519				if (port_a)
 5520					val |= 0xc010000;
 5521				else
 5522					val |= 0x4010000;
 5523				tw32_f(MAC_SERDES_CFG, val);
 5524			}
 5525
 5526			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 5527		}
 5528		if (mac_status & MAC_STATUS_PCS_SYNCED) {
 5529			tg3_setup_flow_control(tp, 0, 0);
 5530			current_link_up = true;
 5531		}
 5532		goto out;
 5533	}
 5534
 5535	/* Want auto-negotiation.  */
 5536	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
 5537
 5538	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5539	if (flowctrl & ADVERTISE_1000XPAUSE)
 5540		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
 5541	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 5542		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
 5543
 5544	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
 5545		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
 5546		    tp->serdes_counter &&
 5547		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
 5548				    MAC_STATUS_RCVD_CFG)) ==
 5549		     MAC_STATUS_PCS_SYNCED)) {
 5550			tp->serdes_counter--;
 5551			current_link_up = true;
 5552			goto out;
 5553		}
 5554restart_autoneg:
 5555		if (workaround)
 5556			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
 5557		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
 5558		udelay(5);
 5559		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
 5560
 5561		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 5562		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5563	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
 5564				 MAC_STATUS_SIGNAL_DET)) {
 5565		sg_dig_status = tr32(SG_DIG_STATUS);
 5566		mac_status = tr32(MAC_STATUS);
 5567
 5568		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
 5569		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
 5570			u32 local_adv = 0, remote_adv = 0;
 5571
 5572			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
 5573				local_adv |= ADVERTISE_1000XPAUSE;
 5574			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
 5575				local_adv |= ADVERTISE_1000XPSE_ASYM;
 5576
 5577			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
 5578				remote_adv |= LPA_1000XPAUSE;
 5579			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
 5580				remote_adv |= LPA_1000XPAUSE_ASYM;
 5581
 5582			tp->link_config.rmt_adv =
 5583					   mii_adv_to_ethtool_adv_x(remote_adv);
 5584
 5585			tg3_setup_flow_control(tp, local_adv, remote_adv);
 5586			current_link_up = true;
 5587			tp->serdes_counter = 0;
 5588			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5589		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
 5590			if (tp->serdes_counter)
 5591				tp->serdes_counter--;
 5592			else {
 5593				if (workaround) {
 5594					u32 val = serdes_cfg;
 5595
 5596					if (port_a)
 5597						val |= 0xc010000;
 5598					else
 5599						val |= 0x4010000;
 5600
 5601					tw32_f(MAC_SERDES_CFG, val);
 5602				}
 5603
 5604				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 5605				udelay(40);
 5606
 5607				/* Link parallel detection - link is up */
 5608				/* only if we have PCS_SYNC and not */
 5609				/* receiving config code words */
 5610				mac_status = tr32(MAC_STATUS);
 5611				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
 5612				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
 5613					tg3_setup_flow_control(tp, 0, 0);
 5614					current_link_up = true;
 5615					tp->phy_flags |=
 5616						TG3_PHYFLG_PARALLEL_DETECT;
 5617					tp->serdes_counter =
 5618						SERDES_PARALLEL_DET_TIMEOUT;
 5619				} else
 5620					goto restart_autoneg;
 5621			}
 5622		}
 5623	} else {
 5624		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 5625		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5626	}
 5627
 5628out:
 5629	return current_link_up;
 5630}
 5631
 5632static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
 5633{
 5634	bool current_link_up = false;
 5635
 5636	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
 5637		goto out;
 5638
 5639	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 5640		u32 txflags, rxflags;
 5641		int i;
 5642
 5643		if (fiber_autoneg(tp, &txflags, &rxflags)) {
 5644			u32 local_adv = 0, remote_adv = 0;
 5645
 5646			if (txflags & ANEG_CFG_PS1)
 5647				local_adv |= ADVERTISE_1000XPAUSE;
 5648			if (txflags & ANEG_CFG_PS2)
 5649				local_adv |= ADVERTISE_1000XPSE_ASYM;
 5650
 5651			if (rxflags & MR_LP_ADV_SYM_PAUSE)
 5652				remote_adv |= LPA_1000XPAUSE;
 5653			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
 5654				remote_adv |= LPA_1000XPAUSE_ASYM;
 5655
 5656			tp->link_config.rmt_adv =
 5657					   mii_adv_to_ethtool_adv_x(remote_adv);
 5658
 5659			tg3_setup_flow_control(tp, local_adv, remote_adv);
 5660
 5661			current_link_up = true;
 5662		}
 5663		for (i = 0; i < 30; i++) {
 5664			udelay(20);
 5665			tw32_f(MAC_STATUS,
 5666			       (MAC_STATUS_SYNC_CHANGED |
 5667				MAC_STATUS_CFG_CHANGED));
 5668			udelay(40);
 5669			if ((tr32(MAC_STATUS) &
 5670			     (MAC_STATUS_SYNC_CHANGED |
 5671			      MAC_STATUS_CFG_CHANGED)) == 0)
 5672				break;
 5673		}
 5674
 5675		mac_status = tr32(MAC_STATUS);
 5676		if (!current_link_up &&
 5677		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
 5678		    !(mac_status & MAC_STATUS_RCVD_CFG))
 5679			current_link_up = true;
 5680	} else {
 5681		tg3_setup_flow_control(tp, 0, 0);
 5682
 5683		/* Forcing 1000FD link up. */
 5684		current_link_up = true;
 5685
 5686		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
 5687		udelay(40);
 5688
 5689		tw32_f(MAC_MODE, tp->mac_mode);
 5690		udelay(40);
 5691	}
 5692
 5693out:
 5694	return current_link_up;
 5695}
 5696
 5697static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
 5698{
 5699	u32 orig_pause_cfg;
 5700	u32 orig_active_speed;
 5701	u8 orig_active_duplex;
 5702	u32 mac_status;
 5703	bool current_link_up;
 5704	int i;
 5705
 5706	orig_pause_cfg = tp->link_config.active_flowctrl;
 5707	orig_active_speed = tp->link_config.active_speed;
 5708	orig_active_duplex = tp->link_config.active_duplex;
 5709
 5710	if (!tg3_flag(tp, HW_AUTONEG) &&
 5711	    tp->link_up &&
 5712	    tg3_flag(tp, INIT_COMPLETE)) {
 5713		mac_status = tr32(MAC_STATUS);
 5714		mac_status &= (MAC_STATUS_PCS_SYNCED |
 5715			       MAC_STATUS_SIGNAL_DET |
 5716			       MAC_STATUS_CFG_CHANGED |
 5717			       MAC_STATUS_RCVD_CFG);
 5718		if (mac_status == (MAC_STATUS_PCS_SYNCED |
 5719				   MAC_STATUS_SIGNAL_DET)) {
 5720			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 5721					    MAC_STATUS_CFG_CHANGED));
 5722			return 0;
 5723		}
 5724	}
 5725
 5726	tw32_f(MAC_TX_AUTO_NEG, 0);
 5727
 5728	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 5729	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
 5730	tw32_f(MAC_MODE, tp->mac_mode);
 5731	udelay(40);
 5732
 5733	if (tp->phy_id == TG3_PHY_ID_BCM8002)
 5734		tg3_init_bcm8002(tp);
 5735
 5736	/* Enable link change event even when serdes polling.  */
 5737	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5738	udelay(40);
 5739
 5740	tp->link_config.rmt_adv = 0;
 5741	mac_status = tr32(MAC_STATUS);
 5742
 5743	if (tg3_flag(tp, HW_AUTONEG))
 5744		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
 5745	else
 5746		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
 5747
 5748	tp->napi[0].hw_status->status =
 5749		(SD_STATUS_UPDATED |
 5750		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
 5751
 5752	for (i = 0; i < 100; i++) {
 5753		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 5754				    MAC_STATUS_CFG_CHANGED));
 5755		udelay(5);
 5756		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
 5757					 MAC_STATUS_CFG_CHANGED |
 5758					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
 5759			break;
 5760	}
 5761
 5762	mac_status = tr32(MAC_STATUS);
 5763	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
 5764		current_link_up = false;
 5765		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 5766		    tp->serdes_counter == 0) {
 5767			tw32_f(MAC_MODE, (tp->mac_mode |
 5768					  MAC_MODE_SEND_CONFIGS));
 5769			udelay(1);
 5770			tw32_f(MAC_MODE, tp->mac_mode);
 5771		}
 5772	}
 5773
 5774	if (current_link_up) {
 5775		tp->link_config.active_speed = SPEED_1000;
 5776		tp->link_config.active_duplex = DUPLEX_FULL;
 5777		tw32(MAC_LED_CTRL, (tp->led_ctrl |
 5778				    LED_CTRL_LNKLED_OVERRIDE |
 5779				    LED_CTRL_1000MBPS_ON));
 5780	} else {
 5781		tp->link_config.active_speed = SPEED_UNKNOWN;
 5782		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
 5783		tw32(MAC_LED_CTRL, (tp->led_ctrl |
 5784				    LED_CTRL_LNKLED_OVERRIDE |
 5785				    LED_CTRL_TRAFFIC_OVERRIDE));
 5786	}
 5787
 5788	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
 5789		u32 now_pause_cfg = tp->link_config.active_flowctrl;
 5790		if (orig_pause_cfg != now_pause_cfg ||
 5791		    orig_active_speed != tp->link_config.active_speed ||
 5792		    orig_active_duplex != tp->link_config.active_duplex)
 5793			tg3_link_report(tp);
 5794	}
 5795
 5796	return 0;
 5797}
 5798
 5799static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
 5800{
 5801	int err = 0;
 5802	u32 bmsr, bmcr;
 5803	u32 current_speed = SPEED_UNKNOWN;
 5804	u8 current_duplex = DUPLEX_UNKNOWN;
 5805	bool current_link_up = false;
 5806	u32 local_adv, remote_adv, sgsr;
 5807
 5808	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
 5809	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
 5810	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
 5811	     (sgsr & SERDES_TG3_SGMII_MODE)) {
 5812
 5813		if (force_reset)
 5814			tg3_phy_reset(tp);
 5815
 5816		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
 5817
 5818		if (!(sgsr & SERDES_TG3_LINK_UP)) {
 5819			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5820		} else {
 5821			current_link_up = true;
 5822			if (sgsr & SERDES_TG3_SPEED_1000) {
 5823				current_speed = SPEED_1000;
 5824				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5825			} else if (sgsr & SERDES_TG3_SPEED_100) {
 5826				current_speed = SPEED_100;
 5827				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 5828			} else {
 5829				current_speed = SPEED_10;
 5830				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 5831			}
 5832
 5833			if (sgsr & SERDES_TG3_FULL_DUPLEX)
 5834				current_duplex = DUPLEX_FULL;
 5835			else
 5836				current_duplex = DUPLEX_HALF;
 5837		}
 5838
 5839		tw32_f(MAC_MODE, tp->mac_mode);
 5840		udelay(40);
 5841
 5842		tg3_clear_mac_status(tp);
 5843
 5844		goto fiber_setup_done;
 5845	}
 5846
 5847	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5848	tw32_f(MAC_MODE, tp->mac_mode);
 5849	udelay(40);
 5850
 5851	tg3_clear_mac_status(tp);
 5852
 5853	if (force_reset)
 5854		tg3_phy_reset(tp);
 5855
 5856	tp->link_config.rmt_adv = 0;
 5857
 5858	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5859	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5860	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 5861		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 5862			bmsr |= BMSR_LSTATUS;
 5863		else
 5864			bmsr &= ~BMSR_LSTATUS;
 5865	}
 5866
 5867	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 5868
 5869	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
 5870	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 5871		/* do nothing, just check for link up at the end */
 5872	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 5873		u32 adv, newadv;
 5874
 5875		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 5876		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
 5877				 ADVERTISE_1000XPAUSE |
 5878				 ADVERTISE_1000XPSE_ASYM |
 5879				 ADVERTISE_SLCT);
 5880
 5881		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5882		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
 5883
 5884		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
 5885			tg3_writephy(tp, MII_ADVERTISE, newadv);
 5886			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
 5887			tg3_writephy(tp, MII_BMCR, bmcr);
 5888
 5889			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5890			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
 5891			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5892
 5893			return err;
 5894		}
 5895	} else {
 5896		u32 new_bmcr;
 5897
 5898		bmcr &= ~BMCR_SPEED1000;
 5899		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
 5900
 5901		if (tp->link_config.duplex == DUPLEX_FULL)
 5902			new_bmcr |= BMCR_FULLDPLX;
 5903
 5904		if (new_bmcr != bmcr) {
 5905			/* BMCR_SPEED1000 is a reserved bit that needs
 5906			 * to be set on write.
 5907			 */
 5908			new_bmcr |= BMCR_SPEED1000;
 5909
 5910			/* Force a linkdown */
 5911			if (tp->link_up) {
 5912				u32 adv;
 5913
 5914				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 5915				adv &= ~(ADVERTISE_1000XFULL |
 5916					 ADVERTISE_1000XHALF |
 5917					 ADVERTISE_SLCT);
 5918				tg3_writephy(tp, MII_ADVERTISE, adv);
 5919				tg3_writephy(tp, MII_BMCR, bmcr |
 5920							   BMCR_ANRESTART |
 5921							   BMCR_ANENABLE);
 5922				udelay(10);
 5923				tg3_carrier_off(tp);
 5924			}
 5925			tg3_writephy(tp, MII_BMCR, new_bmcr);
 5926			bmcr = new_bmcr;
 5927			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5928			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5929			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
 5930				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 5931					bmsr |= BMSR_LSTATUS;
 5932				else
 5933					bmsr &= ~BMSR_LSTATUS;
 5934			}
 5935			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5936		}
 5937	}
 5938
 5939	if (bmsr & BMSR_LSTATUS) {
 5940		current_speed = SPEED_1000;
 5941		current_link_up = true;
 5942		if (bmcr & BMCR_FULLDPLX)
 5943			current_duplex = DUPLEX_FULL;
 5944		else
 5945			current_duplex = DUPLEX_HALF;
 5946
 5947		local_adv = 0;
 5948		remote_adv = 0;
 5949
 5950		if (bmcr & BMCR_ANENABLE) {
 5951			u32 common;
 5952
 5953			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
 5954			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
 5955			common = local_adv & remote_adv;
 5956			if (common & (ADVERTISE_1000XHALF |
 5957				      ADVERTISE_1000XFULL)) {
 5958				if (common & ADVERTISE_1000XFULL)
 5959					current_duplex = DUPLEX_FULL;
 5960				else
 5961					current_duplex = DUPLEX_HALF;
 5962
 5963				tp->link_config.rmt_adv =
 5964					   mii_adv_to_ethtool_adv_x(remote_adv);
 5965			} else if (!tg3_flag(tp, 5780_CLASS)) {
 5966				/* Link is up via parallel detect */
 5967			} else {
 5968				current_link_up = false;
 5969			}
 5970		}
 5971	}
 5972
 5973fiber_setup_done:
 5974	if (current_link_up && current_duplex == DUPLEX_FULL)
 5975		tg3_setup_flow_control(tp, local_adv, remote_adv);
 5976
 5977	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 5978	if (tp->link_config.active_duplex == DUPLEX_HALF)
 5979		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 5980
 5981	tw32_f(MAC_MODE, tp->mac_mode);
 5982	udelay(40);
 5983
 5984	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5985
 5986	tp->link_config.active_speed = current_speed;
 5987	tp->link_config.active_duplex = current_duplex;
 5988
 5989	tg3_test_and_report_link_chg(tp, current_link_up);
 5990	return err;
 5991}
 5992
 5993static void tg3_serdes_parallel_detect(struct tg3 *tp)
 5994{
 5995	if (tp->serdes_counter) {
 5996		/* Give autoneg time to complete. */
 5997		tp->serdes_counter--;
 5998		return;
 5999	}
 6000
 6001	if (!tp->link_up &&
 6002	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
 6003		u32 bmcr;
 6004
 6005		tg3_readphy(tp, MII_BMCR, &bmcr);
 6006		if (bmcr & BMCR_ANENABLE) {
 6007			u32 phy1, phy2;
 6008
 6009			/* Select shadow register 0x1f */
 6010			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
 6011			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
 6012
 6013			/* Select expansion interrupt status register */
 6014			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 6015					 MII_TG3_DSP_EXP1_INT_STAT);
 6016			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 6017			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 6018
 6019			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
 6020				/* We have signal detect and not receiving
 6021				 * config code words, link is up by parallel
 6022				 * detection.
 6023				 */
 6024
 6025				bmcr &= ~BMCR_ANENABLE;
 6026				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
 6027				tg3_writephy(tp, MII_BMCR, bmcr);
 6028				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
 6029			}
 6030		}
 6031	} else if (tp->link_up &&
 6032		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
 6033		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 6034		u32 phy2;
 6035
 6036		/* Select expansion interrupt status register */
 6037		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 6038				 MII_TG3_DSP_EXP1_INT_STAT);
 6039		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 6040		if (phy2 & 0x20) {
 6041			u32 bmcr;
 6042
 6043			/* Config code words received, turn on autoneg. */
 6044			tg3_readphy(tp, MII_BMCR, &bmcr);
 6045			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
 6046
 6047			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 6048
 6049		}
 6050	}
 6051}
 6052
 6053static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
 6054{
 6055	u32 val;
 6056	int err;
 6057
 6058	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 6059		err = tg3_setup_fiber_phy(tp, force_reset);
 6060	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 6061		err = tg3_setup_fiber_mii_phy(tp, force_reset);
 6062	else
 6063		err = tg3_setup_copper_phy(tp, force_reset);
 6064
 6065	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
 6066		u32 scale;
 6067
 6068		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
 6069		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
 6070			scale = 65;
 6071		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
 6072			scale = 6;
 6073		else
 6074			scale = 12;
 6075
 6076		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
 6077		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
 6078		tw32(GRC_MISC_CFG, val);
 6079	}
 6080
 6081	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 6082	      (6 << TX_LENGTHS_IPG_SHIFT);
 6083	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
 6084	    tg3_asic_rev(tp) == ASIC_REV_5762)
 6085		val |= tr32(MAC_TX_LENGTHS) &
 6086		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
 6087			TX_LENGTHS_CNT_DWN_VAL_MSK);
 6088
 6089	if (tp->link_config.active_speed == SPEED_1000 &&
 6090	    tp->link_config.active_duplex == DUPLEX_HALF)
 6091		tw32(MAC_TX_LENGTHS, val |
 6092		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
 6093	else
 6094		tw32(MAC_TX_LENGTHS, val |
 6095		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
 6096
 6097	if (!tg3_flag(tp, 5705_PLUS)) {
 6098		if (tp->link_up) {
 6099			tw32(HOSTCC_STAT_COAL_TICKS,
 6100			     tp->coal.stats_block_coalesce_usecs);
 6101		} else {
 6102			tw32(HOSTCC_STAT_COAL_TICKS, 0);
 6103		}
 6104	}
 6105
 6106	if (tg3_flag(tp, ASPM_WORKAROUND)) {
 6107		val = tr32(PCIE_PWR_MGMT_THRESH);
 6108		if (!tp->link_up)
 6109			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
 6110			      tp->pwrmgmt_thresh;
 6111		else
 6112			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
 6113		tw32(PCIE_PWR_MGMT_THRESH, val);
 6114	}
 6115
 6116	return err;
 6117}
 6118
 6119/* tp->lock must be held */
 6120static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
 6121{
 6122	u64 stamp;
 6123
 6124	ptp_read_system_prets(sts);
 6125	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
 6126	ptp_read_system_postts(sts);
 6127	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
 6128
 6129	return stamp;
 6130}
 6131
 6132/* tp->lock must be held */
 6133static void tg3_refclk_write(struct tg3 *tp, u64 newval)
 6134{
 6135	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
 6136
 6137	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
 6138	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
 6139	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
 6140	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
 6141}
 6142
 6143static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
 6144static inline void tg3_full_unlock(struct tg3 *tp);
 6145static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
 6146{
 6147	struct tg3 *tp = netdev_priv(dev);
 6148
 6149	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
 6150
 6151	if (tg3_flag(tp, PTP_CAPABLE)) {
 6152		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
 6153					SOF_TIMESTAMPING_RX_HARDWARE |
 6154					SOF_TIMESTAMPING_RAW_HARDWARE;
 6155	}
 6156
 6157	if (tp->ptp_clock)
 6158		info->phc_index = ptp_clock_index(tp->ptp_clock);
 6159
 6160	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
 6161
 6162	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
 6163			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
 6164			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
 6165			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
 6166	return 0;
 6167}
 6168
 6169static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
 6170{
 6171	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6172	u64 correction;
 6173	bool neg_adj;
 6174
 6175	/* Frequency adjustment is performed using hardware with a 24 bit
 6176	 * accumulator and a programmable correction value. On each clk, the
 6177	 * correction value gets added to the accumulator and when it
 6178	 * overflows, the time counter is incremented/decremented.
 6179	 */
 6180	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
 6181
 6182	tg3_full_lock(tp, 0);
 6183
 6184	if (correction)
 6185		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
 6186		     TG3_EAV_REF_CLK_CORRECT_EN |
 6187		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
 6188		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
 6189	else
 6190		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
 6191
 6192	tg3_full_unlock(tp);
 6193
 6194	return 0;
 6195}
 6196
 6197static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 6198{
 6199	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6200
 6201	tg3_full_lock(tp, 0);
 6202	tp->ptp_adjust += delta;
 6203	tg3_full_unlock(tp);
 6204
 6205	return 0;
 6206}
 6207
 6208static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
 6209			    struct ptp_system_timestamp *sts)
 6210{
 6211	u64 ns;
 6212	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6213
 6214	tg3_full_lock(tp, 0);
 6215	ns = tg3_refclk_read(tp, sts);
 6216	ns += tp->ptp_adjust;
 6217	tg3_full_unlock(tp);
 6218
 6219	*ts = ns_to_timespec64(ns);
 6220
 6221	return 0;
 6222}
 6223
 6224static int tg3_ptp_settime(struct ptp_clock_info *ptp,
 6225			   const struct timespec64 *ts)
 6226{
 6227	u64 ns;
 6228	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6229
 6230	ns = timespec64_to_ns(ts);
 6231
 6232	tg3_full_lock(tp, 0);
 6233	tg3_refclk_write(tp, ns);
 6234	tp->ptp_adjust = 0;
 6235	tg3_full_unlock(tp);
 6236
 6237	return 0;
 6238}
 6239
 6240static int tg3_ptp_enable(struct ptp_clock_info *ptp,
 6241			  struct ptp_clock_request *rq, int on)
 6242{
 6243	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6244	u32 clock_ctl;
 6245	int rval = 0;
 6246
 6247	switch (rq->type) {
 6248	case PTP_CLK_REQ_PEROUT:
 6249		/* Reject requests with unsupported flags */
 6250		if (rq->perout.flags)
 6251			return -EOPNOTSUPP;
 6252
 6253		if (rq->perout.index != 0)
 6254			return -EINVAL;
 6255
 6256		tg3_full_lock(tp, 0);
 6257		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
 6258		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
 6259
 6260		if (on) {
 6261			u64 nsec;
 6262
 6263			nsec = rq->perout.start.sec * 1000000000ULL +
 6264			       rq->perout.start.nsec;
 6265
 6266			if (rq->perout.period.sec || rq->perout.period.nsec) {
 6267				netdev_warn(tp->dev,
 6268					    "Device supports only a one-shot timesync output, period must be 0\n");
 6269				rval = -EINVAL;
 6270				goto err_out;
 6271			}
 6272
 6273			if (nsec & (1ULL << 63)) {
 6274				netdev_warn(tp->dev,
 6275					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
 6276				rval = -EINVAL;
 6277				goto err_out;
 6278			}
 6279
 6280			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
 6281			tw32(TG3_EAV_WATCHDOG0_MSB,
 6282			     TG3_EAV_WATCHDOG0_EN |
 6283			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
 6284
 6285			tw32(TG3_EAV_REF_CLCK_CTL,
 6286			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
 6287		} else {
 6288			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
 6289			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
 6290		}
 6291
 6292err_out:
 6293		tg3_full_unlock(tp);
 6294		return rval;
 6295
 6296	default:
 6297		break;
 6298	}
 6299
 6300	return -EOPNOTSUPP;
 6301}
 6302
 6303static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
 6304				     struct skb_shared_hwtstamps *timestamp)
 6305{
 6306	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
 6307	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
 6308					   tp->ptp_adjust);
 6309}
 6310
 6311static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
 6312{
 6313	*hwclock = tr32(TG3_TX_TSTAMP_LSB);
 6314	*hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
 6315}
 6316
 6317static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
 6318{
 6319	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
 6320	struct skb_shared_hwtstamps timestamp;
 6321	u64 hwclock;
 6322
 6323	if (tp->ptp_txts_retrycnt > 2)
 6324		goto done;
 6325
 6326	tg3_read_tx_tstamp(tp, &hwclock);
 6327
 6328	if (hwclock != tp->pre_tx_ts) {
 6329		tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
 6330		skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
 6331		goto done;
 6332	}
 6333	tp->ptp_txts_retrycnt++;
 6334	return HZ / 10;
 6335done:
 6336	dev_consume_skb_any(tp->tx_tstamp_skb);
 6337	tp->tx_tstamp_skb = NULL;
 6338	tp->ptp_txts_retrycnt = 0;
 6339	tp->pre_tx_ts = 0;
 6340	return -1;
 6341}
 6342
 6343static const struct ptp_clock_info tg3_ptp_caps = {
 6344	.owner		= THIS_MODULE,
 6345	.name		= "tg3 clock",
 6346	.max_adj	= 250000000,
 6347	.n_alarm	= 0,
 6348	.n_ext_ts	= 0,
 6349	.n_per_out	= 1,
 6350	.n_pins		= 0,
 6351	.pps		= 0,
 6352	.adjfine	= tg3_ptp_adjfine,
 6353	.adjtime	= tg3_ptp_adjtime,
 6354	.do_aux_work	= tg3_ptp_ts_aux_work,
 6355	.gettimex64	= tg3_ptp_gettimex,
 6356	.settime64	= tg3_ptp_settime,
 6357	.enable		= tg3_ptp_enable,
 6358};
 6359
 6360/* tp->lock must be held */
 6361static void tg3_ptp_init(struct tg3 *tp)
 6362{
 6363	if (!tg3_flag(tp, PTP_CAPABLE))
 6364		return;
 6365
 6366	/* Initialize the hardware clock to the system time. */
 6367	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
 6368	tp->ptp_adjust = 0;
 6369	tp->ptp_info = tg3_ptp_caps;
 6370}
 6371
 6372/* tp->lock must be held */
 6373static void tg3_ptp_resume(struct tg3 *tp)
 6374{
 6375	if (!tg3_flag(tp, PTP_CAPABLE))
 6376		return;
 6377
 6378	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
 6379	tp->ptp_adjust = 0;
 6380}
 6381
 6382static void tg3_ptp_fini(struct tg3 *tp)
 6383{
 6384	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
 6385		return;
 6386
 6387	ptp_clock_unregister(tp->ptp_clock);
 6388	tp->ptp_clock = NULL;
 6389	tp->ptp_adjust = 0;
 6390	dev_consume_skb_any(tp->tx_tstamp_skb);
 6391	tp->tx_tstamp_skb = NULL;
 6392}
 6393
 6394static inline int tg3_irq_sync(struct tg3 *tp)
 6395{
 6396	return tp->irq_sync;
 6397}
 6398
 6399static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
 6400{
 6401	int i;
 6402
 6403	dst = (u32 *)((u8 *)dst + off);
 6404	for (i = 0; i < len; i += sizeof(u32))
 6405		*dst++ = tr32(off + i);
 6406}
 6407
 6408static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
 6409{
 6410	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
 6411	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
 6412	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
 6413	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
 6414	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
 6415	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
 6416	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
 6417	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
 6418	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
 6419	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
 6420	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
 6421	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
 6422	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
 6423	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
 6424	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
 6425	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
 6426	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
 6427	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
 6428	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
 6429
 6430	if (tg3_flag(tp, SUPPORT_MSIX))
 6431		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
 6432
 6433	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
 6434	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
 6435	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
 6436	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
 6437	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
 6438	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
 6439	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
 6440	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
 6441
 6442	if (!tg3_flag(tp, 5705_PLUS)) {
 6443		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
 6444		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
 6445		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
 6446	}
 6447
 6448	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
 6449	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
 6450	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
 6451	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
 6452	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
 6453
 6454	if (tg3_flag(tp, NVRAM))
 6455		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
 6456}
 6457
 6458static void tg3_dump_state(struct tg3 *tp)
 6459{
 6460	int i;
 6461	u32 *regs;
 6462
 6463	/* If it is a PCI error, all registers will be 0xffff,
 6464	 * we don't dump them out, just report the error and return
 6465	 */
 6466	if (tp->pdev->error_state != pci_channel_io_normal) {
 6467		netdev_err(tp->dev, "PCI channel ERROR!\n");
 6468		return;
 6469	}
 6470
 6471	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
 6472	if (!regs)
 6473		return;
 6474
 6475	if (tg3_flag(tp, PCI_EXPRESS)) {
 6476		/* Read up to but not including private PCI registers */
 6477		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
 6478			regs[i / sizeof(u32)] = tr32(i);
 6479	} else
 6480		tg3_dump_legacy_regs(tp, regs);
 6481
 6482	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
 6483		if (!regs[i + 0] && !regs[i + 1] &&
 6484		    !regs[i + 2] && !regs[i + 3])
 6485			continue;
 6486
 6487		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
 6488			   i * 4,
 6489			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
 6490	}
 6491
 6492	kfree(regs);
 6493
 6494	for (i = 0; i < tp->irq_cnt; i++) {
 6495		struct tg3_napi *tnapi = &tp->napi[i];
 6496
 6497		/* SW status block */
 6498		netdev_err(tp->dev,
 6499			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
 6500			   i,
 6501			   tnapi->hw_status->status,
 6502			   tnapi->hw_status->status_tag,
 6503			   tnapi->hw_status->rx_jumbo_consumer,
 6504			   tnapi->hw_status->rx_consumer,
 6505			   tnapi->hw_status->rx_mini_consumer,
 6506			   tnapi->hw_status->idx[0].rx_producer,
 6507			   tnapi->hw_status->idx[0].tx_consumer);
 6508
 6509		netdev_err(tp->dev,
 6510		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
 6511			   i,
 6512			   tnapi->last_tag, tnapi->last_irq_tag,
 6513			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
 6514			   tnapi->rx_rcb_ptr,
 6515			   tnapi->prodring.rx_std_prod_idx,
 6516			   tnapi->prodring.rx_std_cons_idx,
 6517			   tnapi->prodring.rx_jmb_prod_idx,
 6518			   tnapi->prodring.rx_jmb_cons_idx);
 6519	}
 6520}
 6521
 6522/* This is called whenever we suspect that the system chipset is re-
 6523 * ordering the sequence of MMIO to the tx send mailbox. The symptom
 6524 * is bogus tx completions. We try to recover by setting the
 6525 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
 6526 * in the workqueue.
 6527 */
 6528static void tg3_tx_recover(struct tg3 *tp)
 6529{
 6530	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
 6531	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
 6532
 6533	netdev_warn(tp->dev,
 6534		    "The system may be re-ordering memory-mapped I/O "
 6535		    "cycles to the network device, attempting to recover. "
 6536		    "Please report the problem to the driver maintainer "
 6537		    "and include system chipset information.\n");
 6538
 6539	tg3_flag_set(tp, TX_RECOVERY_PENDING);
 6540}
 6541
 6542static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
 6543{
 6544	/* Tell compiler to fetch tx indices from memory. */
 6545	barrier();
 6546	return tnapi->tx_pending -
 6547	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
 6548}
 6549
 6550/* Tigon3 never reports partial packet sends.  So we do not
 6551 * need special logic to handle SKBs that have not had all
 6552 * of their frags sent yet, like SunGEM does.
 6553 */
 6554static void tg3_tx(struct tg3_napi *tnapi)
 6555{
 6556	struct tg3 *tp = tnapi->tp;
 6557	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
 6558	u32 sw_idx = tnapi->tx_cons;
 6559	struct netdev_queue *txq;
 6560	int index = tnapi - tp->napi;
 6561	unsigned int pkts_compl = 0, bytes_compl = 0;
 6562
 6563	if (tg3_flag(tp, ENABLE_TSS))
 6564		index--;
 6565
 6566	txq = netdev_get_tx_queue(tp->dev, index);
 6567
 6568	while (sw_idx != hw_idx) {
 6569		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
 6570		bool complete_skb_later = false;
 6571		struct sk_buff *skb = ri->skb;
 6572		int i, tx_bug = 0;
 6573
 6574		if (unlikely(skb == NULL)) {
 6575			tg3_tx_recover(tp);
 6576			return;
 6577		}
 6578
 6579		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
 6580			struct skb_shared_hwtstamps timestamp;
 6581			u64 hwclock;
 6582
 6583			tg3_read_tx_tstamp(tp, &hwclock);
 6584			if (hwclock != tp->pre_tx_ts) {
 6585				tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
 6586				skb_tstamp_tx(skb, &timestamp);
 6587				tp->pre_tx_ts = 0;
 6588			} else {
 6589				tp->tx_tstamp_skb = skb;
 6590				complete_skb_later = true;
 6591			}
 6592		}
 6593
 6594		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
 6595				 skb_headlen(skb), DMA_TO_DEVICE);
 6596
 6597		ri->skb = NULL;
 6598
 6599		while (ri->fragmented) {
 6600			ri->fragmented = false;
 6601			sw_idx = NEXT_TX(sw_idx);
 6602			ri = &tnapi->tx_buffers[sw_idx];
 6603		}
 6604
 6605		sw_idx = NEXT_TX(sw_idx);
 6606
 6607		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 6608			ri = &tnapi->tx_buffers[sw_idx];
 6609			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
 6610				tx_bug = 1;
 6611
 6612			dma_unmap_page(&tp->pdev->dev,
 6613				       dma_unmap_addr(ri, mapping),
 6614				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
 6615				       DMA_TO_DEVICE);
 6616
 6617			while (ri->fragmented) {
 6618				ri->fragmented = false;
 6619				sw_idx = NEXT_TX(sw_idx);
 6620				ri = &tnapi->tx_buffers[sw_idx];
 6621			}
 6622
 6623			sw_idx = NEXT_TX(sw_idx);
 6624		}
 6625
 6626		pkts_compl++;
 6627		bytes_compl += skb->len;
 6628
 6629		if (!complete_skb_later)
 6630			dev_consume_skb_any(skb);
 6631		else
 6632			ptp_schedule_worker(tp->ptp_clock, 0);
 6633
 6634		if (unlikely(tx_bug)) {
 6635			tg3_tx_recover(tp);
 6636			return;
 6637		}
 6638	}
 6639
 6640	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 6641
 6642	tnapi->tx_cons = sw_idx;
 6643
 6644	/* Need to make the tx_cons update visible to __tg3_start_xmit()
 6645	 * before checking for netif_queue_stopped().  Without the
 6646	 * memory barrier, there is a small possibility that __tg3_start_xmit()
 6647	 * will miss it and cause the queue to be stopped forever.
 6648	 */
 6649	smp_mb();
 6650
 6651	if (unlikely(netif_tx_queue_stopped(txq) &&
 6652		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
 6653		__netif_tx_lock(txq, smp_processor_id());
 6654		if (netif_tx_queue_stopped(txq) &&
 6655		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
 6656			netif_tx_wake_queue(txq);
 6657		__netif_tx_unlock(txq);
 6658	}
 6659}
 6660
 6661static void tg3_frag_free(bool is_frag, void *data)
 6662{
 6663	if (is_frag)
 6664		skb_free_frag(data);
 6665	else
 6666		kfree(data);
 6667}
 6668
 6669static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 6670{
 6671	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
 6672		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 6673
 6674	if (!ri->data)
 6675		return;
 6676
 6677	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
 6678			 DMA_FROM_DEVICE);
 6679	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
 6680	ri->data = NULL;
 6681}
 6682
 6683
 6684/* Returns size of skb allocated or < 0 on error.
 6685 *
 6686 * We only need to fill in the address because the other members
 6687 * of the RX descriptor are invariant, see tg3_init_rings.
 6688 *
 6689 * Note the purposeful assymetry of cpu vs. chip accesses.  For
 6690 * posting buffers we only dirty the first cache line of the RX
 6691 * descriptor (containing the address).  Whereas for the RX status
 6692 * buffers the cpu only reads the last cacheline of the RX descriptor
 6693 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
 6694 */
 6695static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
 6696			     u32 opaque_key, u32 dest_idx_unmasked,
 6697			     unsigned int *frag_size)
 6698{
 6699	struct tg3_rx_buffer_desc *desc;
 6700	struct ring_info *map;
 6701	u8 *data;
 6702	dma_addr_t mapping;
 6703	int skb_size, data_size, dest_idx;
 6704
 6705	switch (opaque_key) {
 6706	case RXD_OPAQUE_RING_STD:
 6707		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 6708		desc = &tpr->rx_std[dest_idx];
 6709		map = &tpr->rx_std_buffers[dest_idx];
 6710		data_size = tp->rx_pkt_map_sz;
 6711		break;
 6712
 6713	case RXD_OPAQUE_RING_JUMBO:
 6714		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 6715		desc = &tpr->rx_jmb[dest_idx].std;
 6716		map = &tpr->rx_jmb_buffers[dest_idx];
 6717		data_size = TG3_RX_JMB_MAP_SZ;
 6718		break;
 6719
 6720	default:
 6721		return -EINVAL;
 6722	}
 6723
 6724	/* Do not overwrite any of the map or rp information
 6725	 * until we are sure we can commit to a new buffer.
 6726	 *
 6727	 * Callers depend upon this behavior and assume that
 6728	 * we leave everything unchanged if we fail.
 6729	 */
 6730	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
 6731		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 6732	if (skb_size <= PAGE_SIZE) {
 6733		data = napi_alloc_frag(skb_size);
 6734		*frag_size = skb_size;
 6735	} else {
 6736		data = kmalloc(skb_size, GFP_ATOMIC);
 6737		*frag_size = 0;
 6738	}
 6739	if (!data)
 6740		return -ENOMEM;
 6741
 6742	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
 6743				 data_size, DMA_FROM_DEVICE);
 6744	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
 6745		tg3_frag_free(skb_size <= PAGE_SIZE, data);
 6746		return -EIO;
 6747	}
 6748
 6749	map->data = data;
 6750	dma_unmap_addr_set(map, mapping, mapping);
 6751
 6752	desc->addr_hi = ((u64)mapping >> 32);
 6753	desc->addr_lo = ((u64)mapping & 0xffffffff);
 6754
 6755	return data_size;
 6756}
 6757
 6758/* We only need to move over in the address because the other
 6759 * members of the RX descriptor are invariant.  See notes above
 6760 * tg3_alloc_rx_data for full details.
 6761 */
 6762static void tg3_recycle_rx(struct tg3_napi *tnapi,
 6763			   struct tg3_rx_prodring_set *dpr,
 6764			   u32 opaque_key, int src_idx,
 6765			   u32 dest_idx_unmasked)
 6766{
 6767	struct tg3 *tp = tnapi->tp;
 6768	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
 6769	struct ring_info *src_map, *dest_map;
 6770	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
 6771	int dest_idx;
 6772
 6773	switch (opaque_key) {
 6774	case RXD_OPAQUE_RING_STD:
 6775		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 6776		dest_desc = &dpr->rx_std[dest_idx];
 6777		dest_map = &dpr->rx_std_buffers[dest_idx];
 6778		src_desc = &spr->rx_std[src_idx];
 6779		src_map = &spr->rx_std_buffers[src_idx];
 6780		break;
 6781
 6782	case RXD_OPAQUE_RING_JUMBO:
 6783		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 6784		dest_desc = &dpr->rx_jmb[dest_idx].std;
 6785		dest_map = &dpr->rx_jmb_buffers[dest_idx];
 6786		src_desc = &spr->rx_jmb[src_idx].std;
 6787		src_map = &spr->rx_jmb_buffers[src_idx];
 6788		break;
 6789
 6790	default:
 6791		return;
 6792	}
 6793
 6794	dest_map->data = src_map->data;
 6795	dma_unmap_addr_set(dest_map, mapping,
 6796			   dma_unmap_addr(src_map, mapping));
 6797	dest_desc->addr_hi = src_desc->addr_hi;
 6798	dest_desc->addr_lo = src_desc->addr_lo;
 6799
 6800	/* Ensure that the update to the skb happens after the physical
 6801	 * addresses have been transferred to the new BD location.
 6802	 */
 6803	smp_wmb();
 6804
 6805	src_map->data = NULL;
 6806}
 6807
 6808/* The RX ring scheme is composed of multiple rings which post fresh
 6809 * buffers to the chip, and one special ring the chip uses to report
 6810 * status back to the host.
 6811 *
 6812 * The special ring reports the status of received packets to the
 6813 * host.  The chip does not write into the original descriptor the
 6814 * RX buffer was obtained from.  The chip simply takes the original
 6815 * descriptor as provided by the host, updates the status and length
 6816 * field, then writes this into the next status ring entry.
 6817 *
 6818 * Each ring the host uses to post buffers to the chip is described
 6819 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
 6820 * it is first placed into the on-chip ram.  When the packet's length
 6821 * is known, it walks down the TG3_BDINFO entries to select the ring.
 6822 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
 6823 * which is within the range of the new packet's length is chosen.
 6824 *
 6825 * The "separate ring for rx status" scheme may sound queer, but it makes
 6826 * sense from a cache coherency perspective.  If only the host writes
 6827 * to the buffer post rings, and only the chip writes to the rx status
 6828 * rings, then cache lines never move beyond shared-modified state.
 6829 * If both the host and chip were to write into the same ring, cache line
 6830 * eviction could occur since both entities want it in an exclusive state.
 6831 */
 6832static int tg3_rx(struct tg3_napi *tnapi, int budget)
 6833{
 6834	struct tg3 *tp = tnapi->tp;
 6835	u32 work_mask, rx_std_posted = 0;
 6836	u32 std_prod_idx, jmb_prod_idx;
 6837	u32 sw_idx = tnapi->rx_rcb_ptr;
 6838	u16 hw_idx;
 6839	int received;
 6840	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
 6841
 6842	hw_idx = *(tnapi->rx_rcb_prod_idx);
 6843	/*
 6844	 * We need to order the read of hw_idx and the read of
 6845	 * the opaque cookie.
 6846	 */
 6847	rmb();
 6848	work_mask = 0;
 6849	received = 0;
 6850	std_prod_idx = tpr->rx_std_prod_idx;
 6851	jmb_prod_idx = tpr->rx_jmb_prod_idx;
 6852	while (sw_idx != hw_idx && budget > 0) {
 6853		struct ring_info *ri;
 6854		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
 6855		unsigned int len;
 6856		struct sk_buff *skb;
 6857		dma_addr_t dma_addr;
 6858		u32 opaque_key, desc_idx, *post_ptr;
 6859		u8 *data;
 6860		u64 tstamp = 0;
 6861
 6862		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 6863		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 6864		if (opaque_key == RXD_OPAQUE_RING_STD) {
 6865			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
 6866			dma_addr = dma_unmap_addr(ri, mapping);
 6867			data = ri->data;
 6868			post_ptr = &std_prod_idx;
 6869			rx_std_posted++;
 6870		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
 6871			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
 6872			dma_addr = dma_unmap_addr(ri, mapping);
 6873			data = ri->data;
 6874			post_ptr = &jmb_prod_idx;
 6875		} else
 6876			goto next_pkt_nopost;
 6877
 6878		work_mask |= opaque_key;
 6879
 6880		if (desc->err_vlan & RXD_ERR_MASK) {
 6881		drop_it:
 6882			tg3_recycle_rx(tnapi, tpr, opaque_key,
 6883				       desc_idx, *post_ptr);
 6884		drop_it_no_recycle:
 6885			/* Other statistics kept track of by card. */
 6886			tnapi->rx_dropped++;
 6887			goto next_pkt;
 6888		}
 6889
 6890		prefetch(data + TG3_RX_OFFSET(tp));
 6891		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
 6892		      ETH_FCS_LEN;
 6893
 6894		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
 6895		     RXD_FLAG_PTPSTAT_PTPV1 ||
 6896		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
 6897		     RXD_FLAG_PTPSTAT_PTPV2) {
 6898			tstamp = tr32(TG3_RX_TSTAMP_LSB);
 6899			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
 6900		}
 6901
 6902		if (len > TG3_RX_COPY_THRESH(tp)) {
 6903			int skb_size;
 6904			unsigned int frag_size;
 6905
 6906			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
 6907						    *post_ptr, &frag_size);
 6908			if (skb_size < 0)
 6909				goto drop_it;
 6910
 6911			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
 6912					 DMA_FROM_DEVICE);
 6913
 6914			/* Ensure that the update to the data happens
 6915			 * after the usage of the old DMA mapping.
 6916			 */
 6917			smp_wmb();
 6918
 6919			ri->data = NULL;
 6920
 6921			if (frag_size)
 6922				skb = build_skb(data, frag_size);
 6923			else
 6924				skb = slab_build_skb(data);
 6925			if (!skb) {
 6926				tg3_frag_free(frag_size != 0, data);
 6927				goto drop_it_no_recycle;
 6928			}
 6929			skb_reserve(skb, TG3_RX_OFFSET(tp));
 6930		} else {
 6931			tg3_recycle_rx(tnapi, tpr, opaque_key,
 6932				       desc_idx, *post_ptr);
 6933
 6934			skb = netdev_alloc_skb(tp->dev,
 6935					       len + TG3_RAW_IP_ALIGN);
 6936			if (skb == NULL)
 6937				goto drop_it_no_recycle;
 6938
 6939			skb_reserve(skb, TG3_RAW_IP_ALIGN);
 6940			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
 6941						DMA_FROM_DEVICE);
 6942			memcpy(skb->data,
 6943			       data + TG3_RX_OFFSET(tp),
 6944			       len);
 6945			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
 6946						   len, DMA_FROM_DEVICE);
 6947		}
 6948
 6949		skb_put(skb, len);
 6950		if (tstamp)
 6951			tg3_hwclock_to_timestamp(tp, tstamp,
 6952						 skb_hwtstamps(skb));
 6953
 6954		if ((tp->dev->features & NETIF_F_RXCSUM) &&
 6955		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
 6956		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
 6957		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
 6958			skb->ip_summed = CHECKSUM_UNNECESSARY;
 6959		else
 6960			skb_checksum_none_assert(skb);
 6961
 6962		skb->protocol = eth_type_trans(skb, tp->dev);
 6963
 6964		if (len > (tp->dev->mtu + ETH_HLEN) &&
 6965		    skb->protocol != htons(ETH_P_8021Q) &&
 6966		    skb->protocol != htons(ETH_P_8021AD)) {
 6967			dev_kfree_skb_any(skb);
 6968			goto drop_it_no_recycle;
 6969		}
 6970
 6971		if (desc->type_flags & RXD_FLAG_VLAN &&
 6972		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
 6973			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 6974					       desc->err_vlan & RXD_VLAN_MASK);
 6975
 6976		napi_gro_receive(&tnapi->napi, skb);
 6977
 6978		received++;
 6979		budget--;
 6980
 6981next_pkt:
 6982		(*post_ptr)++;
 6983
 6984		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
 6985			tpr->rx_std_prod_idx = std_prod_idx &
 6986					       tp->rx_std_ring_mask;
 6987			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 6988				     tpr->rx_std_prod_idx);
 6989			work_mask &= ~RXD_OPAQUE_RING_STD;
 6990			rx_std_posted = 0;
 6991		}
 6992next_pkt_nopost:
 6993		sw_idx++;
 6994		sw_idx &= tp->rx_ret_ring_mask;
 6995
 6996		/* Refresh hw_idx to see if there is new work */
 6997		if (sw_idx == hw_idx) {
 6998			hw_idx = *(tnapi->rx_rcb_prod_idx);
 6999			rmb();
 7000		}
 7001	}
 7002
 7003	/* ACK the status ring. */
 7004	tnapi->rx_rcb_ptr = sw_idx;
 7005	tw32_rx_mbox(tnapi->consmbox, sw_idx);
 7006
 7007	/* Refill RX ring(s). */
 7008	if (!tg3_flag(tp, ENABLE_RSS)) {
 7009		/* Sync BD data before updating mailbox */
 7010		wmb();
 7011
 7012		if (work_mask & RXD_OPAQUE_RING_STD) {
 7013			tpr->rx_std_prod_idx = std_prod_idx &
 7014					       tp->rx_std_ring_mask;
 7015			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 7016				     tpr->rx_std_prod_idx);
 7017		}
 7018		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
 7019			tpr->rx_jmb_prod_idx = jmb_prod_idx &
 7020					       tp->rx_jmb_ring_mask;
 7021			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
 7022				     tpr->rx_jmb_prod_idx);
 7023		}
 7024	} else if (work_mask) {
 7025		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
 7026		 * updated before the producer indices can be updated.
 7027		 */
 7028		smp_wmb();
 7029
 7030		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
 7031		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
 7032
 7033		if (tnapi != &tp->napi[1]) {
 7034			tp->rx_refill = true;
 7035			napi_schedule(&tp->napi[1].napi);
 7036		}
 7037	}
 7038
 7039	return received;
 7040}
 7041
 7042static void tg3_poll_link(struct tg3 *tp)
 7043{
 7044	/* handle link change and other phy events */
 7045	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
 7046		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
 7047
 7048		if (sblk->status & SD_STATUS_LINK_CHG) {
 7049			sblk->status = SD_STATUS_UPDATED |
 7050				       (sblk->status & ~SD_STATUS_LINK_CHG);
 7051			spin_lock(&tp->lock);
 7052			if (tg3_flag(tp, USE_PHYLIB)) {
 7053				tw32_f(MAC_STATUS,
 7054				     (MAC_STATUS_SYNC_CHANGED |
 7055				      MAC_STATUS_CFG_CHANGED |
 7056				      MAC_STATUS_MI_COMPLETION |
 7057				      MAC_STATUS_LNKSTATE_CHANGED));
 7058				udelay(40);
 7059			} else
 7060				tg3_setup_phy(tp, false);
 7061			spin_unlock(&tp->lock);
 7062		}
 7063	}
 7064}
 7065
 7066static int tg3_rx_prodring_xfer(struct tg3 *tp,
 7067				struct tg3_rx_prodring_set *dpr,
 7068				struct tg3_rx_prodring_set *spr)
 7069{
 7070	u32 si, di, cpycnt, src_prod_idx;
 7071	int i, err = 0;
 7072
 7073	while (1) {
 7074		src_prod_idx = spr->rx_std_prod_idx;
 7075
 7076		/* Make sure updates to the rx_std_buffers[] entries and the
 7077		 * standard producer index are seen in the correct order.
 7078		 */
 7079		smp_rmb();
 7080
 7081		if (spr->rx_std_cons_idx == src_prod_idx)
 7082			break;
 7083
 7084		if (spr->rx_std_cons_idx < src_prod_idx)
 7085			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
 7086		else
 7087			cpycnt = tp->rx_std_ring_mask + 1 -
 7088				 spr->rx_std_cons_idx;
 7089
 7090		cpycnt = min(cpycnt,
 7091			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
 7092
 7093		si = spr->rx_std_cons_idx;
 7094		di = dpr->rx_std_prod_idx;
 7095
 7096		for (i = di; i < di + cpycnt; i++) {
 7097			if (dpr->rx_std_buffers[i].data) {
 7098				cpycnt = i - di;
 7099				err = -ENOSPC;
 7100				break;
 7101			}
 7102		}
 7103
 7104		if (!cpycnt)
 7105			break;
 7106
 7107		/* Ensure that updates to the rx_std_buffers ring and the
 7108		 * shadowed hardware producer ring from tg3_recycle_skb() are
 7109		 * ordered correctly WRT the skb check above.
 7110		 */
 7111		smp_rmb();
 7112
 7113		memcpy(&dpr->rx_std_buffers[di],
 7114		       &spr->rx_std_buffers[si],
 7115		       cpycnt * sizeof(struct ring_info));
 7116
 7117		for (i = 0; i < cpycnt; i++, di++, si++) {
 7118			struct tg3_rx_buffer_desc *sbd, *dbd;
 7119			sbd = &spr->rx_std[si];
 7120			dbd = &dpr->rx_std[di];
 7121			dbd->addr_hi = sbd->addr_hi;
 7122			dbd->addr_lo = sbd->addr_lo;
 7123		}
 7124
 7125		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
 7126				       tp->rx_std_ring_mask;
 7127		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
 7128				       tp->rx_std_ring_mask;
 7129	}
 7130
 7131	while (1) {
 7132		src_prod_idx = spr->rx_jmb_prod_idx;
 7133
 7134		/* Make sure updates to the rx_jmb_buffers[] entries and
 7135		 * the jumbo producer index are seen in the correct order.
 7136		 */
 7137		smp_rmb();
 7138
 7139		if (spr->rx_jmb_cons_idx == src_prod_idx)
 7140			break;
 7141
 7142		if (spr->rx_jmb_cons_idx < src_prod_idx)
 7143			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
 7144		else
 7145			cpycnt = tp->rx_jmb_ring_mask + 1 -
 7146				 spr->rx_jmb_cons_idx;
 7147
 7148		cpycnt = min(cpycnt,
 7149			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
 7150
 7151		si = spr->rx_jmb_cons_idx;
 7152		di = dpr->rx_jmb_prod_idx;
 7153
 7154		for (i = di; i < di + cpycnt; i++) {
 7155			if (dpr->rx_jmb_buffers[i].data) {
 7156				cpycnt = i - di;
 7157				err = -ENOSPC;
 7158				break;
 7159			}
 7160		}
 7161
 7162		if (!cpycnt)
 7163			break;
 7164
 7165		/* Ensure that updates to the rx_jmb_buffers ring and the
 7166		 * shadowed hardware producer ring from tg3_recycle_skb() are
 7167		 * ordered correctly WRT the skb check above.
 7168		 */
 7169		smp_rmb();
 7170
 7171		memcpy(&dpr->rx_jmb_buffers[di],
 7172		       &spr->rx_jmb_buffers[si],
 7173		       cpycnt * sizeof(struct ring_info));
 7174
 7175		for (i = 0; i < cpycnt; i++, di++, si++) {
 7176			struct tg3_rx_buffer_desc *sbd, *dbd;
 7177			sbd = &spr->rx_jmb[si].std;
 7178			dbd = &dpr->rx_jmb[di].std;
 7179			dbd->addr_hi = sbd->addr_hi;
 7180			dbd->addr_lo = sbd->addr_lo;
 7181		}
 7182
 7183		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
 7184				       tp->rx_jmb_ring_mask;
 7185		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
 7186				       tp->rx_jmb_ring_mask;
 7187	}
 7188
 7189	return err;
 7190}
 7191
 7192static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 7193{
 7194	struct tg3 *tp = tnapi->tp;
 7195
 7196	/* run TX completion thread */
 7197	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
 7198		tg3_tx(tnapi);
 7199		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 7200			return work_done;
 7201	}
 7202
 7203	if (!tnapi->rx_rcb_prod_idx)
 7204		return work_done;
 7205
 7206	/* run RX thread, within the bounds set by NAPI.
 7207	 * All RX "locking" is done by ensuring outside
 7208	 * code synchronizes with tg3->napi.poll()
 7209	 */
 7210	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 7211		work_done += tg3_rx(tnapi, budget - work_done);
 7212
 7213	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
 7214		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
 7215		int i, err = 0;
 7216		u32 std_prod_idx = dpr->rx_std_prod_idx;
 7217		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
 7218
 7219		tp->rx_refill = false;
 7220		for (i = 1; i <= tp->rxq_cnt; i++)
 7221			err |= tg3_rx_prodring_xfer(tp, dpr,
 7222						    &tp->napi[i].prodring);
 7223
 7224		wmb();
 7225
 7226		if (std_prod_idx != dpr->rx_std_prod_idx)
 7227			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 7228				     dpr->rx_std_prod_idx);
 7229
 7230		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
 7231			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
 7232				     dpr->rx_jmb_prod_idx);
 7233
 7234		if (err)
 7235			tw32_f(HOSTCC_MODE, tp->coal_now);
 7236	}
 7237
 7238	return work_done;
 7239}
 7240
 7241static inline void tg3_reset_task_schedule(struct tg3 *tp)
 7242{
 7243	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
 7244		schedule_work(&tp->reset_task);
 7245}
 7246
 7247static inline void tg3_reset_task_cancel(struct tg3 *tp)
 7248{
 7249	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
 7250		cancel_work_sync(&tp->reset_task);
 7251	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 7252}
 7253
 7254static int tg3_poll_msix(struct napi_struct *napi, int budget)
 7255{
 7256	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
 7257	struct tg3 *tp = tnapi->tp;
 7258	int work_done = 0;
 7259	struct tg3_hw_status *sblk = tnapi->hw_status;
 7260
 7261	while (1) {
 7262		work_done = tg3_poll_work(tnapi, work_done, budget);
 7263
 7264		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 7265			goto tx_recovery;
 7266
 7267		if (unlikely(work_done >= budget))
 7268			break;
 7269
 7270		/* tp->last_tag is used in tg3_int_reenable() below
 7271		 * to tell the hw how much work has been processed,
 7272		 * so we must read it before checking for more work.
 7273		 */
 7274		tnapi->last_tag = sblk->status_tag;
 7275		tnapi->last_irq_tag = tnapi->last_tag;
 7276		rmb();
 7277
 7278		/* check for RX/TX work to do */
 7279		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
 7280			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
 7281
 7282			/* This test here is not race free, but will reduce
 7283			 * the number of interrupts by looping again.
 7284			 */
 7285			if (tnapi == &tp->napi[1] && tp->rx_refill)
 7286				continue;
 7287
 7288			napi_complete_done(napi, work_done);
 7289			/* Reenable interrupts. */
 7290			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 7291
 7292			/* This test here is synchronized by napi_schedule()
 7293			 * and napi_complete() to close the race condition.
 7294			 */
 7295			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
 7296				tw32(HOSTCC_MODE, tp->coalesce_mode |
 7297						  HOSTCC_MODE_ENABLE |
 7298						  tnapi->coal_now);
 7299			}
 7300			break;
 7301		}
 7302	}
 7303
 7304	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
 7305	return work_done;
 7306
 7307tx_recovery:
 7308	/* work_done is guaranteed to be less than budget. */
 7309	napi_complete(napi);
 7310	tg3_reset_task_schedule(tp);
 7311	return work_done;
 7312}
 7313
 7314static void tg3_process_error(struct tg3 *tp)
 7315{
 7316	u32 val;
 7317	bool real_error = false;
 7318
 7319	if (tg3_flag(tp, ERROR_PROCESSED))
 7320		return;
 7321
 7322	/* Check Flow Attention register */
 7323	val = tr32(HOSTCC_FLOW_ATTN);
 7324	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
 7325		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
 7326		real_error = true;
 7327	}
 7328
 7329	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
 7330		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
 7331		real_error = true;
 7332	}
 7333
 7334	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
 7335		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
 7336		real_error = true;
 7337	}
 7338
 7339	if (!real_error)
 7340		return;
 7341
 7342	tg3_dump_state(tp);
 7343
 7344	tg3_flag_set(tp, ERROR_PROCESSED);
 7345	tg3_reset_task_schedule(tp);
 7346}
 7347
 7348static int tg3_poll(struct napi_struct *napi, int budget)
 7349{
 7350	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
 7351	struct tg3 *tp = tnapi->tp;
 7352	int work_done = 0;
 7353	struct tg3_hw_status *sblk = tnapi->hw_status;
 7354
 7355	while (1) {
 7356		if (sblk->status & SD_STATUS_ERROR)
 7357			tg3_process_error(tp);
 7358
 7359		tg3_poll_link(tp);
 7360
 7361		work_done = tg3_poll_work(tnapi, work_done, budget);
 7362
 7363		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 7364			goto tx_recovery;
 7365
 7366		if (unlikely(work_done >= budget))
 7367			break;
 7368
 7369		if (tg3_flag(tp, TAGGED_STATUS)) {
 7370			/* tp->last_tag is used in tg3_int_reenable() below
 7371			 * to tell the hw how much work has been processed,
 7372			 * so we must read it before checking for more work.
 7373			 */
 7374			tnapi->last_tag = sblk->status_tag;
 7375			tnapi->last_irq_tag = tnapi->last_tag;
 7376			rmb();
 7377		} else
 7378			sblk->status &= ~SD_STATUS_UPDATED;
 7379
 7380		if (likely(!tg3_has_work(tnapi))) {
 7381			napi_complete_done(napi, work_done);
 7382			tg3_int_reenable(tnapi);
 7383			break;
 7384		}
 7385	}
 7386
 7387	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
 7388	return work_done;
 7389
 7390tx_recovery:
 7391	/* work_done is guaranteed to be less than budget. */
 7392	napi_complete(napi);
 7393	tg3_reset_task_schedule(tp);
 7394	return work_done;
 7395}
 7396
 7397static void tg3_napi_disable(struct tg3 *tp)
 7398{
 7399	int txq_idx = tp->txq_cnt - 1;
 7400	int rxq_idx = tp->rxq_cnt - 1;
 7401	struct tg3_napi *tnapi;
 7402	int i;
 7403
 7404	for (i = tp->irq_cnt - 1; i >= 0; i--) {
 7405		tnapi = &tp->napi[i];
 7406		if (tnapi->tx_buffers) {
 7407			netif_queue_set_napi(tp->dev, txq_idx,
 7408					     NETDEV_QUEUE_TYPE_TX, NULL);
 7409			txq_idx--;
 7410		}
 7411		if (tnapi->rx_rcb) {
 7412			netif_queue_set_napi(tp->dev, rxq_idx,
 7413					     NETDEV_QUEUE_TYPE_RX, NULL);
 7414			rxq_idx--;
 7415		}
 7416		napi_disable(&tnapi->napi);
 7417	}
 7418}
 7419
 7420static void tg3_napi_enable(struct tg3 *tp)
 7421{
 7422	int txq_idx = 0, rxq_idx = 0;
 7423	struct tg3_napi *tnapi;
 7424	int i;
 7425
 7426	for (i = 0; i < tp->irq_cnt; i++) {
 7427		tnapi = &tp->napi[i];
 7428		napi_enable(&tnapi->napi);
 7429		if (tnapi->tx_buffers) {
 7430			netif_queue_set_napi(tp->dev, txq_idx,
 7431					     NETDEV_QUEUE_TYPE_TX,
 7432					     &tnapi->napi);
 7433			txq_idx++;
 7434		}
 7435		if (tnapi->rx_rcb) {
 7436			netif_queue_set_napi(tp->dev, rxq_idx,
 7437					     NETDEV_QUEUE_TYPE_RX,
 7438					     &tnapi->napi);
 7439			rxq_idx++;
 7440		}
 7441	}
 7442}
 7443
 7444static void tg3_napi_init(struct tg3 *tp)
 7445{
 7446	int i;
 7447
 7448	for (i = 0; i < tp->irq_cnt; i++) {
 7449		netif_napi_add(tp->dev, &tp->napi[i].napi,
 7450			       i ? tg3_poll_msix : tg3_poll);
 7451		netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
 7452	}
 7453}
 7454
 7455static void tg3_napi_fini(struct tg3 *tp)
 7456{
 7457	int i;
 7458
 7459	for (i = 0; i < tp->irq_cnt; i++)
 7460		netif_napi_del(&tp->napi[i].napi);
 7461}
 7462
 7463static inline void tg3_netif_stop(struct tg3 *tp)
 7464{
 7465	netif_trans_update(tp->dev);	/* prevent tx timeout */
 7466	tg3_napi_disable(tp);
 7467	netif_carrier_off(tp->dev);
 7468	netif_tx_disable(tp->dev);
 7469}
 7470
 7471/* tp->lock must be held */
 7472static inline void tg3_netif_start(struct tg3 *tp)
 7473{
 7474	tg3_ptp_resume(tp);
 7475
 7476	/* NOTE: unconditional netif_tx_wake_all_queues is only
 7477	 * appropriate so long as all callers are assured to
 7478	 * have free tx slots (such as after tg3_init_hw)
 7479	 */
 7480	netif_tx_wake_all_queues(tp->dev);
 7481
 7482	if (tp->link_up)
 7483		netif_carrier_on(tp->dev);
 7484
 7485	tg3_napi_enable(tp);
 7486	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
 7487	tg3_enable_ints(tp);
 7488}
 7489
 7490static void tg3_irq_quiesce(struct tg3 *tp)
 7491	__releases(tp->lock)
 7492	__acquires(tp->lock)
 7493{
 7494	int i;
 7495
 7496	BUG_ON(tp->irq_sync);
 7497
 7498	tp->irq_sync = 1;
 7499	smp_mb();
 7500
 7501	spin_unlock_bh(&tp->lock);
 7502
 7503	for (i = 0; i < tp->irq_cnt; i++)
 7504		synchronize_irq(tp->napi[i].irq_vec);
 7505
 7506	spin_lock_bh(&tp->lock);
 7507}
 7508
 7509/* Fully shutdown all tg3 driver activity elsewhere in the system.
 7510 * If irq_sync is non-zero, then the IRQ handler must be synchronized
 7511 * with as well.  Most of the time, this is not necessary except when
 7512 * shutting down the device.
 7513 */
 7514static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
 7515{
 7516	spin_lock_bh(&tp->lock);
 7517	if (irq_sync)
 7518		tg3_irq_quiesce(tp);
 7519}
 7520
 7521static inline void tg3_full_unlock(struct tg3 *tp)
 7522{
 7523	spin_unlock_bh(&tp->lock);
 7524}
 7525
 7526/* One-shot MSI handler - Chip automatically disables interrupt
 7527 * after sending MSI so driver doesn't have to do it.
 7528 */
 7529static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
 7530{
 7531	struct tg3_napi *tnapi = dev_id;
 7532	struct tg3 *tp = tnapi->tp;
 7533
 7534	prefetch(tnapi->hw_status);
 7535	if (tnapi->rx_rcb)
 7536		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 7537
 7538	if (likely(!tg3_irq_sync(tp)))
 7539		napi_schedule(&tnapi->napi);
 7540
 7541	return IRQ_HANDLED;
 7542}
 7543
 7544/* MSI ISR - No need to check for interrupt sharing and no need to
 7545 * flush status block and interrupt mailbox. PCI ordering rules
 7546 * guarantee that MSI will arrive after the status block.
 7547 */
 7548static irqreturn_t tg3_msi(int irq, void *dev_id)
 7549{
 7550	struct tg3_napi *tnapi = dev_id;
 7551	struct tg3 *tp = tnapi->tp;
 7552
 7553	prefetch(tnapi->hw_status);
 7554	if (tnapi->rx_rcb)
 7555		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 7556	/*
 7557	 * Writing any value to intr-mbox-0 clears PCI INTA# and
 7558	 * chip-internal interrupt pending events.
 7559	 * Writing non-zero to intr-mbox-0 additional tells the
 7560	 * NIC to stop sending us irqs, engaging "in-intr-handler"
 7561	 * event coalescing.
 7562	 */
 7563	tw32_mailbox(tnapi->int_mbox, 0x00000001);
 7564	if (likely(!tg3_irq_sync(tp)))
 7565		napi_schedule(&tnapi->napi);
 7566
 7567	return IRQ_RETVAL(1);
 7568}
 7569
 7570static irqreturn_t tg3_interrupt(int irq, void *dev_id)
 7571{
 7572	struct tg3_napi *tnapi = dev_id;
 7573	struct tg3 *tp = tnapi->tp;
 7574	struct tg3_hw_status *sblk = tnapi->hw_status;
 7575	unsigned int handled = 1;
 7576
 7577	/* In INTx mode, it is possible for the interrupt to arrive at
 7578	 * the CPU before the status block posted prior to the interrupt.
 7579	 * Reading the PCI State register will confirm whether the
 7580	 * interrupt is ours and will flush the status block.
 7581	 */
 7582	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
 7583		if (tg3_flag(tp, CHIP_RESETTING) ||
 7584		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 7585			handled = 0;
 7586			goto out;
 7587		}
 7588	}
 7589
 7590	/*
 7591	 * Writing any value to intr-mbox-0 clears PCI INTA# and
 7592	 * chip-internal interrupt pending events.
 7593	 * Writing non-zero to intr-mbox-0 additional tells the
 7594	 * NIC to stop sending us irqs, engaging "in-intr-handler"
 7595	 * event coalescing.
 7596	 *
 7597	 * Flush the mailbox to de-assert the IRQ immediately to prevent
 7598	 * spurious interrupts.  The flush impacts performance but
 7599	 * excessive spurious interrupts can be worse in some cases.
 7600	 */
 7601	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 7602	if (tg3_irq_sync(tp))
 7603		goto out;
 7604	sblk->status &= ~SD_STATUS_UPDATED;
 7605	if (likely(tg3_has_work(tnapi))) {
 7606		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 7607		napi_schedule(&tnapi->napi);
 7608	} else {
 7609		/* No work, shared interrupt perhaps?  re-enable
 7610		 * interrupts, and flush that PCI write
 7611		 */
 7612		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
 7613			       0x00000000);
 7614	}
 7615out:
 7616	return IRQ_RETVAL(handled);
 7617}
 7618
 7619static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
 7620{
 7621	struct tg3_napi *tnapi = dev_id;
 7622	struct tg3 *tp = tnapi->tp;
 7623	struct tg3_hw_status *sblk = tnapi->hw_status;
 7624	unsigned int handled = 1;
 7625
 7626	/* In INTx mode, it is possible for the interrupt to arrive at
 7627	 * the CPU before the status block posted prior to the interrupt.
 7628	 * Reading the PCI State register will confirm whether the
 7629	 * interrupt is ours and will flush the status block.
 7630	 */
 7631	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
 7632		if (tg3_flag(tp, CHIP_RESETTING) ||
 7633		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 7634			handled = 0;
 7635			goto out;
 7636		}
 7637	}
 7638
 7639	/*
 7640	 * writing any value to intr-mbox-0 clears PCI INTA# and
 7641	 * chip-internal interrupt pending events.
 7642	 * writing non-zero to intr-mbox-0 additional tells the
 7643	 * NIC to stop sending us irqs, engaging "in-intr-handler"
 7644	 * event coalescing.
 7645	 *
 7646	 * Flush the mailbox to de-assert the IRQ immediately to prevent
 7647	 * spurious interrupts.  The flush impacts performance but
 7648	 * excessive spurious interrupts can be worse in some cases.
 7649	 */
 7650	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 7651
 7652	/*
 7653	 * In a shared interrupt configuration, sometimes other devices'
 7654	 * interrupts will scream.  We record the current status tag here
 7655	 * so that the above check can report that the screaming interrupts
 7656	 * are unhandled.  Eventually they will be silenced.
 7657	 */
 7658	tnapi->last_irq_tag = sblk->status_tag;
 7659
 7660	if (tg3_irq_sync(tp))
 7661		goto out;
 7662
 7663	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 7664
 7665	napi_schedule(&tnapi->napi);
 7666
 7667out:
 7668	return IRQ_RETVAL(handled);
 7669}
 7670
 7671/* ISR for interrupt test */
 7672static irqreturn_t tg3_test_isr(int irq, void *dev_id)
 7673{
 7674	struct tg3_napi *tnapi = dev_id;
 7675	struct tg3 *tp = tnapi->tp;
 7676	struct tg3_hw_status *sblk = tnapi->hw_status;
 7677
 7678	if ((sblk->status & SD_STATUS_UPDATED) ||
 7679	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 7680		tg3_disable_ints(tp);
 7681		return IRQ_RETVAL(1);
 7682	}
 7683	return IRQ_RETVAL(0);
 7684}
 7685
 7686#ifdef CONFIG_NET_POLL_CONTROLLER
 7687static void tg3_poll_controller(struct net_device *dev)
 7688{
 7689	int i;
 7690	struct tg3 *tp = netdev_priv(dev);
 7691
 7692	if (tg3_irq_sync(tp))
 7693		return;
 7694
 7695	for (i = 0; i < tp->irq_cnt; i++)
 7696		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
 7697}
 7698#endif
 7699
 7700static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
 7701{
 7702	struct tg3 *tp = netdev_priv(dev);
 7703
 7704	if (netif_msg_tx_err(tp)) {
 7705		netdev_err(dev, "transmit timed out, resetting\n");
 7706		tg3_dump_state(tp);
 7707	}
 7708
 7709	tg3_reset_task_schedule(tp);
 7710}
 7711
 7712/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
 7713static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
 7714{
 7715	u32 base = (u32) mapping & 0xffffffff;
 7716
 7717	return base + len + 8 < base;
 7718}
 7719
 7720/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
 7721 * of any 4GB boundaries: 4G, 8G, etc
 7722 */
 7723static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
 7724					   u32 len, u32 mss)
 7725{
 7726	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
 7727		u32 base = (u32) mapping & 0xffffffff;
 7728
 7729		return ((base + len + (mss & 0x3fff)) < base);
 7730	}
 7731	return 0;
 7732}
 7733
 7734/* Test for DMA addresses > 40-bit */
 7735static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
 7736					  int len)
 7737{
 7738#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
 7739	if (tg3_flag(tp, 40BIT_DMA_BUG))
 7740		return ((u64) mapping + len) > DMA_BIT_MASK(40);
 7741	return 0;
 7742#else
 7743	return 0;
 7744#endif
 7745}
 7746
 7747static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
 7748				 dma_addr_t mapping, u32 len, u32 flags,
 7749				 u32 mss, u32 vlan)
 7750{
 7751	txbd->addr_hi = ((u64) mapping >> 32);
 7752	txbd->addr_lo = ((u64) mapping & 0xffffffff);
 7753	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
 7754	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
 7755}
 7756
 7757static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
 7758			    dma_addr_t map, u32 len, u32 flags,
 7759			    u32 mss, u32 vlan)
 7760{
 7761	struct tg3 *tp = tnapi->tp;
 7762	bool hwbug = false;
 7763
 7764	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
 7765		hwbug = true;
 7766
 7767	if (tg3_4g_overflow_test(map, len))
 7768		hwbug = true;
 7769
 7770	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
 7771		hwbug = true;
 7772
 7773	if (tg3_40bit_overflow_test(tp, map, len))
 7774		hwbug = true;
 7775
 7776	if (tp->dma_limit) {
 7777		u32 prvidx = *entry;
 7778		u32 tmp_flag = flags & ~TXD_FLAG_END;
 7779		while (len > tp->dma_limit && *budget) {
 7780			u32 frag_len = tp->dma_limit;
 7781			len -= tp->dma_limit;
 7782
 7783			/* Avoid the 8byte DMA problem */
 7784			if (len <= 8) {
 7785				len += tp->dma_limit / 2;
 7786				frag_len = tp->dma_limit / 2;
 7787			}
 7788
 7789			tnapi->tx_buffers[*entry].fragmented = true;
 7790
 7791			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 7792				      frag_len, tmp_flag, mss, vlan);
 7793			*budget -= 1;
 7794			prvidx = *entry;
 7795			*entry = NEXT_TX(*entry);
 7796
 7797			map += frag_len;
 7798		}
 7799
 7800		if (len) {
 7801			if (*budget) {
 7802				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 7803					      len, flags, mss, vlan);
 7804				*budget -= 1;
 7805				*entry = NEXT_TX(*entry);
 7806			} else {
 7807				hwbug = true;
 7808				tnapi->tx_buffers[prvidx].fragmented = false;
 7809			}
 7810		}
 7811	} else {
 7812		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 7813			      len, flags, mss, vlan);
 7814		*entry = NEXT_TX(*entry);
 7815	}
 7816
 7817	return hwbug;
 7818}
 7819
 7820static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
 7821{
 7822	int i;
 7823	struct sk_buff *skb;
 7824	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
 7825
 7826	skb = txb->skb;
 7827	txb->skb = NULL;
 7828
 7829	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
 7830			 skb_headlen(skb), DMA_TO_DEVICE);
 7831
 7832	while (txb->fragmented) {
 7833		txb->fragmented = false;
 7834		entry = NEXT_TX(entry);
 7835		txb = &tnapi->tx_buffers[entry];
 7836	}
 7837
 7838	for (i = 0; i <= last; i++) {
 7839		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 7840
 7841		entry = NEXT_TX(entry);
 7842		txb = &tnapi->tx_buffers[entry];
 7843
 7844		dma_unmap_page(&tnapi->tp->pdev->dev,
 7845			       dma_unmap_addr(txb, mapping),
 7846			       skb_frag_size(frag), DMA_TO_DEVICE);
 7847
 7848		while (txb->fragmented) {
 7849			txb->fragmented = false;
 7850			entry = NEXT_TX(entry);
 7851			txb = &tnapi->tx_buffers[entry];
 7852		}
 7853	}
 7854}
 7855
 7856/* Workaround 4GB and 40-bit hardware DMA bugs. */
 7857static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
 7858				       struct sk_buff **pskb,
 7859				       u32 *entry, u32 *budget,
 7860				       u32 base_flags, u32 mss, u32 vlan)
 7861{
 7862	struct tg3 *tp = tnapi->tp;
 7863	struct sk_buff *new_skb, *skb = *pskb;
 7864	dma_addr_t new_addr = 0;
 7865	int ret = 0;
 7866
 7867	if (tg3_asic_rev(tp) != ASIC_REV_5701)
 7868		new_skb = skb_copy(skb, GFP_ATOMIC);
 7869	else {
 7870		int more_headroom = 4 - ((unsigned long)skb->data & 3);
 7871
 7872		new_skb = skb_copy_expand(skb,
 7873					  skb_headroom(skb) + more_headroom,
 7874					  skb_tailroom(skb), GFP_ATOMIC);
 7875	}
 7876
 7877	if (!new_skb) {
 7878		ret = -1;
 7879	} else {
 7880		/* New SKB is guaranteed to be linear. */
 7881		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
 7882					  new_skb->len, DMA_TO_DEVICE);
 7883		/* Make sure the mapping succeeded */
 7884		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
 7885			dev_kfree_skb_any(new_skb);
 7886			ret = -1;
 7887		} else {
 7888			u32 save_entry = *entry;
 7889
 7890			base_flags |= TXD_FLAG_END;
 7891
 7892			tnapi->tx_buffers[*entry].skb = new_skb;
 7893			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
 7894					   mapping, new_addr);
 7895
 7896			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
 7897					    new_skb->len, base_flags,
 7898					    mss, vlan)) {
 7899				tg3_tx_skb_unmap(tnapi, save_entry, -1);
 7900				dev_kfree_skb_any(new_skb);
 7901				ret = -1;
 7902			}
 7903		}
 7904	}
 7905
 7906	dev_consume_skb_any(skb);
 7907	*pskb = new_skb;
 7908	return ret;
 7909}
 7910
 7911static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
 7912{
 7913	/* Check if we will never have enough descriptors,
 7914	 * as gso_segs can be more than current ring size
 7915	 */
 7916	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
 7917}
 7918
 7919static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
 7920
 7921/* Use GSO to workaround all TSO packets that meet HW bug conditions
 7922 * indicated in tg3_tx_frag_set()
 7923 */
 7924static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
 7925		       struct netdev_queue *txq, struct sk_buff *skb)
 7926{
 7927	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
 7928	struct sk_buff *segs, *seg, *next;
 7929
 7930	/* Estimate the number of fragments in the worst case */
 7931	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
 7932		netif_tx_stop_queue(txq);
 7933
 7934		/* netif_tx_stop_queue() must be done before checking
 7935		 * checking tx index in tg3_tx_avail() below, because in
 7936		 * tg3_tx(), we update tx index before checking for
 7937		 * netif_tx_queue_stopped().
 7938		 */
 7939		smp_mb();
 7940		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
 7941			return NETDEV_TX_BUSY;
 7942
 7943		netif_tx_wake_queue(txq);
 7944	}
 7945
 7946	segs = skb_gso_segment(skb, tp->dev->features &
 7947				    ~(NETIF_F_TSO | NETIF_F_TSO6));
 7948	if (IS_ERR(segs) || !segs) {
 7949		tnapi->tx_dropped++;
 7950		goto tg3_tso_bug_end;
 7951	}
 7952
 7953	skb_list_walk_safe(segs, seg, next) {
 7954		skb_mark_not_on_list(seg);
 7955		__tg3_start_xmit(seg, tp->dev);
 7956	}
 7957
 7958tg3_tso_bug_end:
 7959	dev_consume_skb_any(skb);
 7960
 7961	return NETDEV_TX_OK;
 7962}
 7963
 7964/* hard_start_xmit for all devices */
 7965static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 7966{
 7967	struct tg3 *tp = netdev_priv(dev);
 7968	u32 len, entry, base_flags, mss, vlan = 0;
 7969	u32 budget;
 7970	int i = -1, would_hit_hwbug;
 7971	dma_addr_t mapping;
 7972	struct tg3_napi *tnapi;
 7973	struct netdev_queue *txq;
 7974	unsigned int last;
 7975	struct iphdr *iph = NULL;
 7976	struct tcphdr *tcph = NULL;
 7977	__sum16 tcp_csum = 0, ip_csum = 0;
 7978	__be16 ip_tot_len = 0;
 7979
 7980	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 7981	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
 7982	if (tg3_flag(tp, ENABLE_TSS))
 7983		tnapi++;
 7984
 7985	budget = tg3_tx_avail(tnapi);
 7986
 7987	/* We are running in BH disabled context with netif_tx_lock
 7988	 * and TX reclaim runs via tp->napi.poll inside of a software
 7989	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
 7990	 * no IRQ context deadlocks to worry about either.  Rejoice!
 7991	 */
 7992	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
 7993		if (!netif_tx_queue_stopped(txq)) {
 7994			netif_tx_stop_queue(txq);
 7995
 7996			/* This is a hard error, log it. */
 7997			netdev_err(dev,
 7998				   "BUG! Tx Ring full when queue awake!\n");
 7999		}
 8000		return NETDEV_TX_BUSY;
 8001	}
 8002
 8003	entry = tnapi->tx_prod;
 8004	base_flags = 0;
 8005
 8006	mss = skb_shinfo(skb)->gso_size;
 8007	if (mss) {
 8008		u32 tcp_opt_len, hdr_len;
 8009
 8010		if (skb_cow_head(skb, 0))
 8011			goto drop;
 8012
 8013		iph = ip_hdr(skb);
 8014		tcp_opt_len = tcp_optlen(skb);
 8015
 8016		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
 8017
 8018		/* HW/FW can not correctly segment packets that have been
 8019		 * vlan encapsulated.
 8020		 */
 8021		if (skb->protocol == htons(ETH_P_8021Q) ||
 8022		    skb->protocol == htons(ETH_P_8021AD)) {
 8023			if (tg3_tso_bug_gso_check(tnapi, skb))
 8024				return tg3_tso_bug(tp, tnapi, txq, skb);
 8025			goto drop;
 8026		}
 8027
 8028		if (!skb_is_gso_v6(skb)) {
 8029			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
 8030			    tg3_flag(tp, TSO_BUG)) {
 8031				if (tg3_tso_bug_gso_check(tnapi, skb))
 8032					return tg3_tso_bug(tp, tnapi, txq, skb);
 8033				goto drop;
 8034			}
 8035			ip_csum = iph->check;
 8036			ip_tot_len = iph->tot_len;
 8037			iph->check = 0;
 8038			iph->tot_len = htons(mss + hdr_len);
 8039		}
 8040
 8041		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
 8042			       TXD_FLAG_CPU_POST_DMA);
 8043
 8044		tcph = tcp_hdr(skb);
 8045		tcp_csum = tcph->check;
 8046
 8047		if (tg3_flag(tp, HW_TSO_1) ||
 8048		    tg3_flag(tp, HW_TSO_2) ||
 8049		    tg3_flag(tp, HW_TSO_3)) {
 8050			tcph->check = 0;
 8051			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
 8052		} else {
 8053			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
 8054							 0, IPPROTO_TCP, 0);
 8055		}
 8056
 8057		if (tg3_flag(tp, HW_TSO_3)) {
 8058			mss |= (hdr_len & 0xc) << 12;
 8059			if (hdr_len & 0x10)
 8060				base_flags |= 0x00000010;
 8061			base_flags |= (hdr_len & 0x3e0) << 5;
 8062		} else if (tg3_flag(tp, HW_TSO_2))
 8063			mss |= hdr_len << 9;
 8064		else if (tg3_flag(tp, HW_TSO_1) ||
 8065			 tg3_asic_rev(tp) == ASIC_REV_5705) {
 8066			if (tcp_opt_len || iph->ihl > 5) {
 8067				int tsflags;
 8068
 8069				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
 8070				mss |= (tsflags << 11);
 8071			}
 8072		} else {
 8073			if (tcp_opt_len || iph->ihl > 5) {
 8074				int tsflags;
 8075
 8076				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
 8077				base_flags |= tsflags << 12;
 8078			}
 8079		}
 8080	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 8081		/* HW/FW can not correctly checksum packets that have been
 8082		 * vlan encapsulated.
 8083		 */
 8084		if (skb->protocol == htons(ETH_P_8021Q) ||
 8085		    skb->protocol == htons(ETH_P_8021AD)) {
 8086			if (skb_checksum_help(skb))
 8087				goto drop;
 8088		} else  {
 8089			base_flags |= TXD_FLAG_TCPUDP_CSUM;
 8090		}
 8091	}
 8092
 8093	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
 8094	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
 8095		base_flags |= TXD_FLAG_JMB_PKT;
 8096
 8097	if (skb_vlan_tag_present(skb)) {
 8098		base_flags |= TXD_FLAG_VLAN;
 8099		vlan = skb_vlan_tag_get(skb);
 8100	}
 8101
 8102	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
 8103	    tg3_flag(tp, TX_TSTAMP_EN)) {
 8104		tg3_full_lock(tp, 0);
 8105		if (!tp->pre_tx_ts) {
 8106			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 8107			base_flags |= TXD_FLAG_HWTSTAMP;
 8108			tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
 8109		}
 8110		tg3_full_unlock(tp);
 8111	}
 8112
 8113	len = skb_headlen(skb);
 8114
 8115	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
 8116				 DMA_TO_DEVICE);
 8117	if (dma_mapping_error(&tp->pdev->dev, mapping))
 8118		goto drop;
 8119
 8120
 8121	tnapi->tx_buffers[entry].skb = skb;
 8122	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 8123
 8124	would_hit_hwbug = 0;
 8125
 8126	if (tg3_flag(tp, 5701_DMA_BUG))
 8127		would_hit_hwbug = 1;
 8128
 8129	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
 8130			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
 8131			    mss, vlan)) {
 8132		would_hit_hwbug = 1;
 8133	} else if (skb_shinfo(skb)->nr_frags > 0) {
 8134		u32 tmp_mss = mss;
 8135
 8136		if (!tg3_flag(tp, HW_TSO_1) &&
 8137		    !tg3_flag(tp, HW_TSO_2) &&
 8138		    !tg3_flag(tp, HW_TSO_3))
 8139			tmp_mss = 0;
 8140
 8141		/* Now loop through additional data
 8142		 * fragments, and queue them.
 8143		 */
 8144		last = skb_shinfo(skb)->nr_frags - 1;
 8145		for (i = 0; i <= last; i++) {
 8146			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 8147
 8148			len = skb_frag_size(frag);
 8149			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
 8150						   len, DMA_TO_DEVICE);
 8151
 8152			tnapi->tx_buffers[entry].skb = NULL;
 8153			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
 8154					   mapping);
 8155			if (dma_mapping_error(&tp->pdev->dev, mapping))
 8156				goto dma_error;
 8157
 8158			if (!budget ||
 8159			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
 8160					    len, base_flags |
 8161					    ((i == last) ? TXD_FLAG_END : 0),
 8162					    tmp_mss, vlan)) {
 8163				would_hit_hwbug = 1;
 8164				break;
 8165			}
 8166		}
 8167	}
 8168
 8169	if (would_hit_hwbug) {
 8170		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 8171
 8172		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
 8173			/* If it's a TSO packet, do GSO instead of
 8174			 * allocating and copying to a large linear SKB
 8175			 */
 8176			if (ip_tot_len) {
 8177				iph->check = ip_csum;
 8178				iph->tot_len = ip_tot_len;
 8179			}
 8180			tcph->check = tcp_csum;
 8181			return tg3_tso_bug(tp, tnapi, txq, skb);
 8182		}
 8183
 8184		/* If the workaround fails due to memory/mapping
 8185		 * failure, silently drop this packet.
 8186		 */
 8187		entry = tnapi->tx_prod;
 8188		budget = tg3_tx_avail(tnapi);
 8189		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
 8190						base_flags, mss, vlan))
 8191			goto drop_nofree;
 8192	}
 8193
 8194	skb_tx_timestamp(skb);
 8195	netdev_tx_sent_queue(txq, skb->len);
 8196
 8197	/* Sync BD data before updating mailbox */
 8198	wmb();
 8199
 8200	tnapi->tx_prod = entry;
 8201	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
 8202		netif_tx_stop_queue(txq);
 8203
 8204		/* netif_tx_stop_queue() must be done before checking
 8205		 * checking tx index in tg3_tx_avail() below, because in
 8206		 * tg3_tx(), we update tx index before checking for
 8207		 * netif_tx_queue_stopped().
 8208		 */
 8209		smp_mb();
 8210		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
 8211			netif_tx_wake_queue(txq);
 8212	}
 8213
 8214	return NETDEV_TX_OK;
 8215
 8216dma_error:
 8217	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
 8218	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
 8219drop:
 8220	dev_kfree_skb_any(skb);
 8221drop_nofree:
 8222	tnapi->tx_dropped++;
 8223	return NETDEV_TX_OK;
 8224}
 8225
 8226static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 8227{
 8228	struct netdev_queue *txq;
 8229	u16 skb_queue_mapping;
 8230	netdev_tx_t ret;
 8231
 8232	skb_queue_mapping = skb_get_queue_mapping(skb);
 8233	txq = netdev_get_tx_queue(dev, skb_queue_mapping);
 8234
 8235	ret = __tg3_start_xmit(skb, dev);
 8236
 8237	/* Notify the hardware that packets are ready by updating the TX ring
 8238	 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
 8239	 * the hardware for every packet. To guarantee forward progress the TX
 8240	 * ring must be drained when it is full as indicated by
 8241	 * netif_xmit_stopped(). This needs to happen even when the current
 8242	 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
 8243	 * queued by previous __tg3_start_xmit() calls might get stuck in
 8244	 * the queue forever.
 8245	 */
 8246	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
 8247		struct tg3_napi *tnapi;
 8248		struct tg3 *tp;
 8249
 8250		tp = netdev_priv(dev);
 8251		tnapi = &tp->napi[skb_queue_mapping];
 8252
 8253		if (tg3_flag(tp, ENABLE_TSS))
 8254			tnapi++;
 8255
 8256		tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
 8257	}
 8258
 8259	return ret;
 8260}
 8261
 8262static void tg3_mac_loopback(struct tg3 *tp, bool enable)
 8263{
 8264	if (enable) {
 8265		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
 8266				  MAC_MODE_PORT_MODE_MASK);
 8267
 8268		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
 8269
 8270		if (!tg3_flag(tp, 5705_PLUS))
 8271			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 8272
 8273		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 8274			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 8275		else
 8276			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 8277	} else {
 8278		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
 8279
 8280		if (tg3_flag(tp, 5705_PLUS) ||
 8281		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
 8282		    tg3_asic_rev(tp) == ASIC_REV_5700)
 8283			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 8284	}
 8285
 8286	tw32(MAC_MODE, tp->mac_mode);
 8287	udelay(40);
 8288}
 8289
 8290static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
 8291{
 8292	u32 val, bmcr, mac_mode, ptest = 0;
 8293
 8294	tg3_phy_toggle_apd(tp, false);
 8295	tg3_phy_toggle_automdix(tp, false);
 8296
 8297	if (extlpbk && tg3_phy_set_extloopbk(tp))
 8298		return -EIO;
 8299
 8300	bmcr = BMCR_FULLDPLX;
 8301	switch (speed) {
 8302	case SPEED_10:
 8303		break;
 8304	case SPEED_100:
 8305		bmcr |= BMCR_SPEED100;
 8306		break;
 8307	case SPEED_1000:
 8308	default:
 8309		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 8310			speed = SPEED_100;
 8311			bmcr |= BMCR_SPEED100;
 8312		} else {
 8313			speed = SPEED_1000;
 8314			bmcr |= BMCR_SPEED1000;
 8315		}
 8316	}
 8317
 8318	if (extlpbk) {
 8319		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 8320			tg3_readphy(tp, MII_CTRL1000, &val);
 8321			val |= CTL1000_AS_MASTER |
 8322			       CTL1000_ENABLE_MASTER;
 8323			tg3_writephy(tp, MII_CTRL1000, val);
 8324		} else {
 8325			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
 8326				MII_TG3_FET_PTEST_TRIM_2;
 8327			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
 8328		}
 8329	} else
 8330		bmcr |= BMCR_LOOPBACK;
 8331
 8332	tg3_writephy(tp, MII_BMCR, bmcr);
 8333
 8334	/* The write needs to be flushed for the FETs */
 8335	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 8336		tg3_readphy(tp, MII_BMCR, &bmcr);
 8337
 8338	udelay(40);
 8339
 8340	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 8341	    tg3_asic_rev(tp) == ASIC_REV_5785) {
 8342		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
 8343			     MII_TG3_FET_PTEST_FRC_TX_LINK |
 8344			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
 8345
 8346		/* The write needs to be flushed for the AC131 */
 8347		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
 8348	}
 8349
 8350	/* Reset to prevent losing 1st rx packet intermittently */
 8351	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 8352	    tg3_flag(tp, 5780_CLASS)) {
 8353		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 8354		udelay(10);
 8355		tw32_f(MAC_RX_MODE, tp->rx_mode);
 8356	}
 8357
 8358	mac_mode = tp->mac_mode &
 8359		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 8360	if (speed == SPEED_1000)
 8361		mac_mode |= MAC_MODE_PORT_MODE_GMII;
 8362	else
 8363		mac_mode |= MAC_MODE_PORT_MODE_MII;
 8364
 8365	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
 8366		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
 8367
 8368		if (masked_phy_id == TG3_PHY_ID_BCM5401)
 8369			mac_mode &= ~MAC_MODE_LINK_POLARITY;
 8370		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
 8371			mac_mode |= MAC_MODE_LINK_POLARITY;
 8372
 8373		tg3_writephy(tp, MII_TG3_EXT_CTRL,
 8374			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 8375	}
 8376
 8377	tw32(MAC_MODE, mac_mode);
 8378	udelay(40);
 8379
 8380	return 0;
 8381}
 8382
 8383static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
 8384{
 8385	struct tg3 *tp = netdev_priv(dev);
 8386
 8387	if (features & NETIF_F_LOOPBACK) {
 8388		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
 8389			return;
 8390
 8391		spin_lock_bh(&tp->lock);
 8392		tg3_mac_loopback(tp, true);
 8393		netif_carrier_on(tp->dev);
 8394		spin_unlock_bh(&tp->lock);
 8395		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
 8396	} else {
 8397		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 8398			return;
 8399
 8400		spin_lock_bh(&tp->lock);
 8401		tg3_mac_loopback(tp, false);
 8402		/* Force link status check */
 8403		tg3_setup_phy(tp, true);
 8404		spin_unlock_bh(&tp->lock);
 8405		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
 8406	}
 8407}
 8408
 8409static netdev_features_t tg3_fix_features(struct net_device *dev,
 8410	netdev_features_t features)
 8411{
 8412	struct tg3 *tp = netdev_priv(dev);
 8413
 8414	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
 8415		features &= ~NETIF_F_ALL_TSO;
 8416
 8417	return features;
 8418}
 8419
 8420static int tg3_set_features(struct net_device *dev, netdev_features_t features)
 8421{
 8422	netdev_features_t changed = dev->features ^ features;
 8423
 8424	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
 8425		tg3_set_loopback(dev, features);
 8426
 8427	return 0;
 8428}
 8429
 8430static void tg3_rx_prodring_free(struct tg3 *tp,
 8431				 struct tg3_rx_prodring_set *tpr)
 8432{
 8433	int i;
 8434
 8435	if (tpr != &tp->napi[0].prodring) {
 8436		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
 8437		     i = (i + 1) & tp->rx_std_ring_mask)
 8438			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
 8439					tp->rx_pkt_map_sz);
 8440
 8441		if (tg3_flag(tp, JUMBO_CAPABLE)) {
 8442			for (i = tpr->rx_jmb_cons_idx;
 8443			     i != tpr->rx_jmb_prod_idx;
 8444			     i = (i + 1) & tp->rx_jmb_ring_mask) {
 8445				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
 8446						TG3_RX_JMB_MAP_SZ);
 8447			}
 8448		}
 8449
 8450		return;
 8451	}
 8452
 8453	for (i = 0; i <= tp->rx_std_ring_mask; i++)
 8454		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
 8455				tp->rx_pkt_map_sz);
 8456
 8457	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 8458		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
 8459			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
 8460					TG3_RX_JMB_MAP_SZ);
 8461	}
 8462}
 8463
 8464/* Initialize rx rings for packet processing.
 8465 *
 8466 * The chip has been shut down and the driver detached from
 8467 * the networking, so no interrupts or new tx packets will
 8468 * end up in the driver.  tp->{tx,}lock are held and thus
 8469 * we may not sleep.
 8470 */
 8471static int tg3_rx_prodring_alloc(struct tg3 *tp,
 8472				 struct tg3_rx_prodring_set *tpr)
 8473{
 8474	u32 i, rx_pkt_dma_sz;
 8475
 8476	tpr->rx_std_cons_idx = 0;
 8477	tpr->rx_std_prod_idx = 0;
 8478	tpr->rx_jmb_cons_idx = 0;
 8479	tpr->rx_jmb_prod_idx = 0;
 8480
 8481	if (tpr != &tp->napi[0].prodring) {
 8482		memset(&tpr->rx_std_buffers[0], 0,
 8483		       TG3_RX_STD_BUFF_RING_SIZE(tp));
 8484		if (tpr->rx_jmb_buffers)
 8485			memset(&tpr->rx_jmb_buffers[0], 0,
 8486			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
 8487		goto done;
 8488	}
 8489
 8490	/* Zero out all descriptors. */
 8491	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
 8492
 8493	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
 8494	if (tg3_flag(tp, 5780_CLASS) &&
 8495	    tp->dev->mtu > ETH_DATA_LEN)
 8496		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
 8497	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
 8498
 8499	/* Initialize invariants of the rings, we only set this
 8500	 * stuff once.  This works because the card does not
 8501	 * write into the rx buffer posting rings.
 8502	 */
 8503	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
 8504		struct tg3_rx_buffer_desc *rxd;
 8505
 8506		rxd = &tpr->rx_std[i];
 8507		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
 8508		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
 8509		rxd->opaque = (RXD_OPAQUE_RING_STD |
 8510			       (i << RXD_OPAQUE_INDEX_SHIFT));
 8511	}
 8512
 8513	/* Now allocate fresh SKBs for each rx ring. */
 8514	for (i = 0; i < tp->rx_pending; i++) {
 8515		unsigned int frag_size;
 8516
 8517		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
 8518				      &frag_size) < 0) {
 8519			netdev_warn(tp->dev,
 8520				    "Using a smaller RX standard ring. Only "
 8521				    "%d out of %d buffers were allocated "
 8522				    "successfully\n", i, tp->rx_pending);
 8523			if (i == 0)
 8524				goto initfail;
 8525			tp->rx_pending = i;
 8526			break;
 8527		}
 8528	}
 8529
 8530	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 8531		goto done;
 8532
 8533	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
 8534
 8535	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
 8536		goto done;
 8537
 8538	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
 8539		struct tg3_rx_buffer_desc *rxd;
 8540
 8541		rxd = &tpr->rx_jmb[i].std;
 8542		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
 8543		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
 8544				  RXD_FLAG_JUMBO;
 8545		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
 8546		       (i << RXD_OPAQUE_INDEX_SHIFT));
 8547	}
 8548
 8549	for (i = 0; i < tp->rx_jumbo_pending; i++) {
 8550		unsigned int frag_size;
 8551
 8552		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
 8553				      &frag_size) < 0) {
 8554			netdev_warn(tp->dev,
 8555				    "Using a smaller RX jumbo ring. Only %d "
 8556				    "out of %d buffers were allocated "
 8557				    "successfully\n", i, tp->rx_jumbo_pending);
 8558			if (i == 0)
 8559				goto initfail;
 8560			tp->rx_jumbo_pending = i;
 8561			break;
 8562		}
 8563	}
 8564
 8565done:
 8566	return 0;
 8567
 8568initfail:
 8569	tg3_rx_prodring_free(tp, tpr);
 8570	return -ENOMEM;
 8571}
 8572
 8573static void tg3_rx_prodring_fini(struct tg3 *tp,
 8574				 struct tg3_rx_prodring_set *tpr)
 8575{
 8576	kfree(tpr->rx_std_buffers);
 8577	tpr->rx_std_buffers = NULL;
 8578	kfree(tpr->rx_jmb_buffers);
 8579	tpr->rx_jmb_buffers = NULL;
 8580	if (tpr->rx_std) {
 8581		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
 8582				  tpr->rx_std, tpr->rx_std_mapping);
 8583		tpr->rx_std = NULL;
 8584	}
 8585	if (tpr->rx_jmb) {
 8586		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
 8587				  tpr->rx_jmb, tpr->rx_jmb_mapping);
 8588		tpr->rx_jmb = NULL;
 8589	}
 8590}
 8591
 8592static int tg3_rx_prodring_init(struct tg3 *tp,
 8593				struct tg3_rx_prodring_set *tpr)
 8594{
 8595	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
 8596				      GFP_KERNEL);
 8597	if (!tpr->rx_std_buffers)
 8598		return -ENOMEM;
 8599
 8600	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
 8601					 TG3_RX_STD_RING_BYTES(tp),
 8602					 &tpr->rx_std_mapping,
 8603					 GFP_KERNEL);
 8604	if (!tpr->rx_std)
 8605		goto err_out;
 8606
 8607	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 8608		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
 8609					      GFP_KERNEL);
 8610		if (!tpr->rx_jmb_buffers)
 8611			goto err_out;
 8612
 8613		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
 8614						 TG3_RX_JMB_RING_BYTES(tp),
 8615						 &tpr->rx_jmb_mapping,
 8616						 GFP_KERNEL);
 8617		if (!tpr->rx_jmb)
 8618			goto err_out;
 8619	}
 8620
 8621	return 0;
 8622
 8623err_out:
 8624	tg3_rx_prodring_fini(tp, tpr);
 8625	return -ENOMEM;
 8626}
 8627
 8628/* Free up pending packets in all rx/tx rings.
 8629 *
 8630 * The chip has been shut down and the driver detached from
 8631 * the networking, so no interrupts or new tx packets will
 8632 * end up in the driver.  tp->{tx,}lock is not held and we are not
 8633 * in an interrupt context and thus may sleep.
 8634 */
 8635static void tg3_free_rings(struct tg3 *tp)
 8636{
 8637	int i, j;
 8638
 8639	for (j = 0; j < tp->irq_cnt; j++) {
 8640		struct tg3_napi *tnapi = &tp->napi[j];
 8641
 8642		tg3_rx_prodring_free(tp, &tnapi->prodring);
 8643
 8644		if (!tnapi->tx_buffers)
 8645			continue;
 8646
 8647		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
 8648			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
 8649
 8650			if (!skb)
 8651				continue;
 8652
 8653			tg3_tx_skb_unmap(tnapi, i,
 8654					 skb_shinfo(skb)->nr_frags - 1);
 8655
 8656			dev_consume_skb_any(skb);
 8657		}
 8658		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
 8659	}
 8660}
 8661
 8662/* Initialize tx/rx rings for packet processing.
 8663 *
 8664 * The chip has been shut down and the driver detached from
 8665 * the networking, so no interrupts or new tx packets will
 8666 * end up in the driver.  tp->{tx,}lock are held and thus
 8667 * we may not sleep.
 8668 */
 8669static int tg3_init_rings(struct tg3 *tp)
 8670{
 8671	int i;
 8672
 8673	/* Free up all the SKBs. */
 8674	tg3_free_rings(tp);
 8675
 8676	for (i = 0; i < tp->irq_cnt; i++) {
 8677		struct tg3_napi *tnapi = &tp->napi[i];
 8678
 8679		tnapi->last_tag = 0;
 8680		tnapi->last_irq_tag = 0;
 8681		tnapi->hw_status->status = 0;
 8682		tnapi->hw_status->status_tag = 0;
 8683		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 8684
 8685		tnapi->tx_prod = 0;
 8686		tnapi->tx_cons = 0;
 8687		if (tnapi->tx_ring)
 8688			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
 8689
 8690		tnapi->rx_rcb_ptr = 0;
 8691		if (tnapi->rx_rcb)
 8692			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 8693
 8694		if (tnapi->prodring.rx_std &&
 8695		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
 8696			tg3_free_rings(tp);
 8697			return -ENOMEM;
 8698		}
 8699	}
 8700
 8701	return 0;
 8702}
 8703
 8704static void tg3_mem_tx_release(struct tg3 *tp)
 8705{
 8706	int i;
 8707
 8708	for (i = 0; i < tp->irq_max; i++) {
 8709		struct tg3_napi *tnapi = &tp->napi[i];
 8710
 8711		if (tnapi->tx_ring) {
 8712			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
 8713				tnapi->tx_ring, tnapi->tx_desc_mapping);
 8714			tnapi->tx_ring = NULL;
 8715		}
 8716
 8717		kfree(tnapi->tx_buffers);
 8718		tnapi->tx_buffers = NULL;
 8719	}
 8720}
 8721
 8722static int tg3_mem_tx_acquire(struct tg3 *tp)
 8723{
 8724	int i;
 8725	struct tg3_napi *tnapi = &tp->napi[0];
 8726
 8727	/* If multivector TSS is enabled, vector 0 does not handle
 8728	 * tx interrupts.  Don't allocate any resources for it.
 8729	 */
 8730	if (tg3_flag(tp, ENABLE_TSS))
 8731		tnapi++;
 8732
 8733	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
 8734		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
 8735					    sizeof(struct tg3_tx_ring_info),
 8736					    GFP_KERNEL);
 8737		if (!tnapi->tx_buffers)
 8738			goto err_out;
 8739
 8740		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
 8741						    TG3_TX_RING_BYTES,
 8742						    &tnapi->tx_desc_mapping,
 8743						    GFP_KERNEL);
 8744		if (!tnapi->tx_ring)
 8745			goto err_out;
 8746	}
 8747
 8748	return 0;
 8749
 8750err_out:
 8751	tg3_mem_tx_release(tp);
 8752	return -ENOMEM;
 8753}
 8754
 8755static void tg3_mem_rx_release(struct tg3 *tp)
 8756{
 8757	int i;
 8758
 8759	for (i = 0; i < tp->irq_max; i++) {
 8760		struct tg3_napi *tnapi = &tp->napi[i];
 8761
 8762		tg3_rx_prodring_fini(tp, &tnapi->prodring);
 8763
 8764		if (!tnapi->rx_rcb)
 8765			continue;
 8766
 8767		dma_free_coherent(&tp->pdev->dev,
 8768				  TG3_RX_RCB_RING_BYTES(tp),
 8769				  tnapi->rx_rcb,
 8770				  tnapi->rx_rcb_mapping);
 8771		tnapi->rx_rcb = NULL;
 8772	}
 8773}
 8774
 8775static int tg3_mem_rx_acquire(struct tg3 *tp)
 8776{
 8777	unsigned int i, limit;
 8778
 8779	limit = tp->rxq_cnt;
 8780
 8781	/* If RSS is enabled, we need a (dummy) producer ring
 8782	 * set on vector zero.  This is the true hw prodring.
 8783	 */
 8784	if (tg3_flag(tp, ENABLE_RSS))
 8785		limit++;
 8786
 8787	for (i = 0; i < limit; i++) {
 8788		struct tg3_napi *tnapi = &tp->napi[i];
 8789
 8790		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
 8791			goto err_out;
 8792
 8793		/* If multivector RSS is enabled, vector 0
 8794		 * does not handle rx or tx interrupts.
 8795		 * Don't allocate any resources for it.
 8796		 */
 8797		if (!i && tg3_flag(tp, ENABLE_RSS))
 8798			continue;
 8799
 8800		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
 8801						   TG3_RX_RCB_RING_BYTES(tp),
 8802						   &tnapi->rx_rcb_mapping,
 8803						   GFP_KERNEL);
 8804		if (!tnapi->rx_rcb)
 8805			goto err_out;
 8806	}
 8807
 8808	return 0;
 8809
 8810err_out:
 8811	tg3_mem_rx_release(tp);
 8812	return -ENOMEM;
 8813}
 8814
 8815/*
 8816 * Must not be invoked with interrupt sources disabled and
 8817 * the hardware shutdown down.
 8818 */
 8819static void tg3_free_consistent(struct tg3 *tp)
 8820{
 8821	int i;
 8822
 8823	for (i = 0; i < tp->irq_cnt; i++) {
 8824		struct tg3_napi *tnapi = &tp->napi[i];
 8825
 8826		if (tnapi->hw_status) {
 8827			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
 8828					  tnapi->hw_status,
 8829					  tnapi->status_mapping);
 8830			tnapi->hw_status = NULL;
 8831		}
 8832	}
 8833
 8834	tg3_mem_rx_release(tp);
 8835	tg3_mem_tx_release(tp);
 8836
 8837	/* tp->hw_stats can be referenced safely:
 8838	 *     1. under rtnl_lock
 8839	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
 8840	 */
 8841	if (tp->hw_stats) {
 8842		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
 8843				  tp->hw_stats, tp->stats_mapping);
 8844		tp->hw_stats = NULL;
 8845	}
 8846}
 8847
 8848/*
 8849 * Must not be invoked with interrupt sources disabled and
 8850 * the hardware shutdown down.  Can sleep.
 8851 */
 8852static int tg3_alloc_consistent(struct tg3 *tp)
 8853{
 8854	int i;
 8855
 8856	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
 8857					  sizeof(struct tg3_hw_stats),
 8858					  &tp->stats_mapping, GFP_KERNEL);
 8859	if (!tp->hw_stats)
 8860		goto err_out;
 8861
 8862	for (i = 0; i < tp->irq_cnt; i++) {
 8863		struct tg3_napi *tnapi = &tp->napi[i];
 8864		struct tg3_hw_status *sblk;
 8865
 8866		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
 8867						      TG3_HW_STATUS_SIZE,
 8868						      &tnapi->status_mapping,
 8869						      GFP_KERNEL);
 8870		if (!tnapi->hw_status)
 8871			goto err_out;
 8872
 8873		sblk = tnapi->hw_status;
 8874
 8875		if (tg3_flag(tp, ENABLE_RSS)) {
 8876			u16 *prodptr = NULL;
 8877
 8878			/*
 8879			 * When RSS is enabled, the status block format changes
 8880			 * slightly.  The "rx_jumbo_consumer", "reserved",
 8881			 * and "rx_mini_consumer" members get mapped to the
 8882			 * other three rx return ring producer indexes.
 8883			 */
 8884			switch (i) {
 8885			case 1:
 8886				prodptr = &sblk->idx[0].rx_producer;
 8887				break;
 8888			case 2:
 8889				prodptr = &sblk->rx_jumbo_consumer;
 8890				break;
 8891			case 3:
 8892				prodptr = &sblk->reserved;
 8893				break;
 8894			case 4:
 8895				prodptr = &sblk->rx_mini_consumer;
 8896				break;
 8897			}
 8898			tnapi->rx_rcb_prod_idx = prodptr;
 8899		} else {
 8900			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
 8901		}
 8902	}
 8903
 8904	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
 8905		goto err_out;
 8906
 8907	return 0;
 8908
 8909err_out:
 8910	tg3_free_consistent(tp);
 8911	return -ENOMEM;
 8912}
 8913
 8914#define MAX_WAIT_CNT 1000
 8915
 8916/* To stop a block, clear the enable bit and poll till it
 8917 * clears.  tp->lock is held.
 8918 */
 8919static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
 8920{
 8921	unsigned int i;
 8922	u32 val;
 8923
 8924	if (tg3_flag(tp, 5705_PLUS)) {
 8925		switch (ofs) {
 8926		case RCVLSC_MODE:
 8927		case DMAC_MODE:
 8928		case MBFREE_MODE:
 8929		case BUFMGR_MODE:
 8930		case MEMARB_MODE:
 8931			/* We can't enable/disable these bits of the
 8932			 * 5705/5750, just say success.
 8933			 */
 8934			return 0;
 8935
 8936		default:
 8937			break;
 8938		}
 8939	}
 8940
 8941	val = tr32(ofs);
 8942	val &= ~enable_bit;
 8943	tw32_f(ofs, val);
 8944
 8945	for (i = 0; i < MAX_WAIT_CNT; i++) {
 8946		if (pci_channel_offline(tp->pdev)) {
 8947			dev_err(&tp->pdev->dev,
 8948				"tg3_stop_block device offline, "
 8949				"ofs=%lx enable_bit=%x\n",
 8950				ofs, enable_bit);
 8951			return -ENODEV;
 8952		}
 8953
 8954		udelay(100);
 8955		val = tr32(ofs);
 8956		if ((val & enable_bit) == 0)
 8957			break;
 8958	}
 8959
 8960	if (i == MAX_WAIT_CNT && !silent) {
 8961		dev_err(&tp->pdev->dev,
 8962			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
 8963			ofs, enable_bit);
 8964		return -ENODEV;
 8965	}
 8966
 8967	return 0;
 8968}
 8969
 8970/* tp->lock is held. */
 8971static int tg3_abort_hw(struct tg3 *tp, bool silent)
 8972{
 8973	int i, err;
 8974
 8975	tg3_disable_ints(tp);
 8976
 8977	if (pci_channel_offline(tp->pdev)) {
 8978		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
 8979		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
 8980		err = -ENODEV;
 8981		goto err_no_dev;
 8982	}
 8983
 8984	tp->rx_mode &= ~RX_MODE_ENABLE;
 8985	tw32_f(MAC_RX_MODE, tp->rx_mode);
 8986	udelay(10);
 8987
 8988	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
 8989	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
 8990	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
 8991	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
 8992	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
 8993	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
 8994
 8995	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
 8996	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
 8997	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
 8998	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
 8999	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
 9000	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
 9001	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
 9002
 9003	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
 9004	tw32_f(MAC_MODE, tp->mac_mode);
 9005	udelay(40);
 9006
 9007	tp->tx_mode &= ~TX_MODE_ENABLE;
 9008	tw32_f(MAC_TX_MODE, tp->tx_mode);
 9009
 9010	for (i = 0; i < MAX_WAIT_CNT; i++) {
 9011		udelay(100);
 9012		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
 9013			break;
 9014	}
 9015	if (i >= MAX_WAIT_CNT) {
 9016		dev_err(&tp->pdev->dev,
 9017			"%s timed out, TX_MODE_ENABLE will not clear "
 9018			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
 9019		err |= -ENODEV;
 9020	}
 9021
 9022	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
 9023	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
 9024	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
 9025
 9026	tw32(FTQ_RESET, 0xffffffff);
 9027	tw32(FTQ_RESET, 0x00000000);
 9028
 9029	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
 9030	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 9031
 9032err_no_dev:
 9033	for (i = 0; i < tp->irq_cnt; i++) {
 9034		struct tg3_napi *tnapi = &tp->napi[i];
 9035		if (tnapi->hw_status)
 9036			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 9037	}
 9038
 9039	return err;
 9040}
 9041
 9042/* Save PCI command register before chip reset */
 9043static void tg3_save_pci_state(struct tg3 *tp)
 9044{
 9045	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
 9046}
 9047
 9048/* Restore PCI state after chip reset */
 9049static void tg3_restore_pci_state(struct tg3 *tp)
 9050{
 9051	u32 val;
 9052
 9053	/* Re-enable indirect register accesses. */
 9054	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 9055			       tp->misc_host_ctrl);
 9056
 9057	/* Set MAX PCI retry to zero. */
 9058	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
 9059	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
 9060	    tg3_flag(tp, PCIX_MODE))
 9061		val |= PCISTATE_RETRY_SAME_DMA;
 9062	/* Allow reads and writes to the APE register and memory space. */
 9063	if (tg3_flag(tp, ENABLE_APE))
 9064		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 9065		       PCISTATE_ALLOW_APE_SHMEM_WR |
 9066		       PCISTATE_ALLOW_APE_PSPACE_WR;
 9067	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
 9068
 9069	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
 9070
 9071	if (!tg3_flag(tp, PCI_EXPRESS)) {
 9072		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
 9073				      tp->pci_cacheline_sz);
 9074		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
 9075				      tp->pci_lat_timer);
 9076	}
 9077
 9078	/* Make sure PCI-X relaxed ordering bit is clear. */
 9079	if (tg3_flag(tp, PCIX_MODE)) {
 9080		u16 pcix_cmd;
 9081
 9082		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 9083				     &pcix_cmd);
 9084		pcix_cmd &= ~PCI_X_CMD_ERO;
 9085		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 9086				      pcix_cmd);
 9087	}
 9088
 9089	if (tg3_flag(tp, 5780_CLASS)) {
 9090
 9091		/* Chip reset on 5780 will reset MSI enable bit,
 9092		 * so need to restore it.
 9093		 */
 9094		if (tg3_flag(tp, USING_MSI)) {
 9095			u16 ctrl;
 9096
 9097			pci_read_config_word(tp->pdev,
 9098					     tp->msi_cap + PCI_MSI_FLAGS,
 9099					     &ctrl);
 9100			pci_write_config_word(tp->pdev,
 9101					      tp->msi_cap + PCI_MSI_FLAGS,
 9102					      ctrl | PCI_MSI_FLAGS_ENABLE);
 9103			val = tr32(MSGINT_MODE);
 9104			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
 9105		}
 9106	}
 9107}
 9108
 9109static void tg3_override_clk(struct tg3 *tp)
 9110{
 9111	u32 val;
 9112
 9113	switch (tg3_asic_rev(tp)) {
 9114	case ASIC_REV_5717:
 9115		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
 9116		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
 9117		     TG3_CPMU_MAC_ORIDE_ENABLE);
 9118		break;
 9119
 9120	case ASIC_REV_5719:
 9121	case ASIC_REV_5720:
 9122		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 9123		break;
 9124
 9125	default:
 9126		return;
 9127	}
 9128}
 9129
 9130static void tg3_restore_clk(struct tg3 *tp)
 9131{
 9132	u32 val;
 9133
 9134	switch (tg3_asic_rev(tp)) {
 9135	case ASIC_REV_5717:
 9136		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
 9137		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
 9138		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
 9139		break;
 9140
 9141	case ASIC_REV_5719:
 9142	case ASIC_REV_5720:
 9143		val = tr32(TG3_CPMU_CLCK_ORIDE);
 9144		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 9145		break;
 9146
 9147	default:
 9148		return;
 9149	}
 9150}
 9151
 9152/* tp->lock is held. */
 9153static int tg3_chip_reset(struct tg3 *tp)
 9154	__releases(tp->lock)
 9155	__acquires(tp->lock)
 9156{
 9157	u32 val;
 9158	void (*write_op)(struct tg3 *, u32, u32);
 9159	int i, err;
 9160
 9161	if (!pci_device_is_present(tp->pdev))
 9162		return -ENODEV;
 9163
 9164	tg3_nvram_lock(tp);
 9165
 9166	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
 9167
 9168	/* No matching tg3_nvram_unlock() after this because
 9169	 * chip reset below will undo the nvram lock.
 9170	 */
 9171	tp->nvram_lock_cnt = 0;
 9172
 9173	/* GRC_MISC_CFG core clock reset will clear the memory
 9174	 * enable bit in PCI register 4 and the MSI enable bit
 9175	 * on some chips, so we save relevant registers here.
 9176	 */
 9177	tg3_save_pci_state(tp);
 9178
 9179	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
 9180	    tg3_flag(tp, 5755_PLUS))
 9181		tw32(GRC_FASTBOOT_PC, 0);
 9182
 9183	/*
 9184	 * We must avoid the readl() that normally takes place.
 9185	 * It locks machines, causes machine checks, and other
 9186	 * fun things.  So, temporarily disable the 5701
 9187	 * hardware workaround, while we do the reset.
 9188	 */
 9189	write_op = tp->write32;
 9190	if (write_op == tg3_write_flush_reg32)
 9191		tp->write32 = tg3_write32;
 9192
 9193	/* Prevent the irq handler from reading or writing PCI registers
 9194	 * during chip reset when the memory enable bit in the PCI command
 9195	 * register may be cleared.  The chip does not generate interrupt
 9196	 * at this time, but the irq handler may still be called due to irq
 9197	 * sharing or irqpoll.
 9198	 */
 9199	tg3_flag_set(tp, CHIP_RESETTING);
 9200	for (i = 0; i < tp->irq_cnt; i++) {
 9201		struct tg3_napi *tnapi = &tp->napi[i];
 9202		if (tnapi->hw_status) {
 9203			tnapi->hw_status->status = 0;
 9204			tnapi->hw_status->status_tag = 0;
 9205		}
 9206		tnapi->last_tag = 0;
 9207		tnapi->last_irq_tag = 0;
 9208	}
 9209	smp_mb();
 9210
 9211	tg3_full_unlock(tp);
 9212
 9213	for (i = 0; i < tp->irq_cnt; i++)
 9214		synchronize_irq(tp->napi[i].irq_vec);
 9215
 9216	tg3_full_lock(tp, 0);
 9217
 9218	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
 9219		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
 9220		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 9221	}
 9222
 9223	/* do the reset */
 9224	val = GRC_MISC_CFG_CORECLK_RESET;
 9225
 9226	if (tg3_flag(tp, PCI_EXPRESS)) {
 9227		/* Force PCIe 1.0a mode */
 9228		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
 9229		    !tg3_flag(tp, 57765_PLUS) &&
 9230		    tr32(TG3_PCIE_PHY_TSTCTL) ==
 9231		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
 9232			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
 9233
 9234		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
 9235			tw32(GRC_MISC_CFG, (1 << 29));
 9236			val |= (1 << 29);
 9237		}
 9238	}
 9239
 9240	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
 9241		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
 9242		tw32(GRC_VCPU_EXT_CTRL,
 9243		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
 9244	}
 9245
 9246	/* Set the clock to the highest frequency to avoid timeouts. With link
 9247	 * aware mode, the clock speed could be slow and bootcode does not
 9248	 * complete within the expected time. Override the clock to allow the
 9249	 * bootcode to finish sooner and then restore it.
 9250	 */
 9251	tg3_override_clk(tp);
 9252
 9253	/* Manage gphy power for all CPMU absent PCIe devices. */
 9254	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
 9255		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
 9256
 9257	tw32(GRC_MISC_CFG, val);
 9258
 9259	/* restore 5701 hardware bug workaround write method */
 9260	tp->write32 = write_op;
 9261
 9262	/* Unfortunately, we have to delay before the PCI read back.
 9263	 * Some 575X chips even will not respond to a PCI cfg access
 9264	 * when the reset command is given to the chip.
 9265	 *
 9266	 * How do these hardware designers expect things to work
 9267	 * properly if the PCI write is posted for a long period
 9268	 * of time?  It is always necessary to have some method by
 9269	 * which a register read back can occur to push the write
 9270	 * out which does the reset.
 9271	 *
 9272	 * For most tg3 variants the trick below was working.
 9273	 * Ho hum...
 9274	 */
 9275	udelay(120);
 9276
 9277	/* Flush PCI posted writes.  The normal MMIO registers
 9278	 * are inaccessible at this time so this is the only
 9279	 * way to make this reliably (actually, this is no longer
 9280	 * the case, see above).  I tried to use indirect
 9281	 * register read/write but this upset some 5701 variants.
 9282	 */
 9283	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
 9284
 9285	udelay(120);
 9286
 9287	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
 9288		u16 val16;
 9289
 9290		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
 9291			int j;
 9292			u32 cfg_val;
 9293
 9294			/* Wait for link training to complete.  */
 9295			for (j = 0; j < 5000; j++)
 9296				udelay(100);
 9297
 9298			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
 9299			pci_write_config_dword(tp->pdev, 0xc4,
 9300					       cfg_val | (1 << 15));
 9301		}
 9302
 9303		/* Clear the "no snoop" and "relaxed ordering" bits. */
 9304		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
 9305		/*
 9306		 * Older PCIe devices only support the 128 byte
 9307		 * MPS setting.  Enforce the restriction.
 9308		 */
 9309		if (!tg3_flag(tp, CPMU_PRESENT))
 9310			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
 9311		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
 9312
 9313		/* Clear error status */
 9314		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
 9315				      PCI_EXP_DEVSTA_CED |
 9316				      PCI_EXP_DEVSTA_NFED |
 9317				      PCI_EXP_DEVSTA_FED |
 9318				      PCI_EXP_DEVSTA_URD);
 9319	}
 9320
 9321	tg3_restore_pci_state(tp);
 9322
 9323	tg3_flag_clear(tp, CHIP_RESETTING);
 9324	tg3_flag_clear(tp, ERROR_PROCESSED);
 9325
 9326	val = 0;
 9327	if (tg3_flag(tp, 5780_CLASS))
 9328		val = tr32(MEMARB_MODE);
 9329	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 9330
 9331	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
 9332		tg3_stop_fw(tp);
 9333		tw32(0x5000, 0x400);
 9334	}
 9335
 9336	if (tg3_flag(tp, IS_SSB_CORE)) {
 9337		/*
 9338		 * BCM4785: In order to avoid repercussions from using
 9339		 * potentially defective internal ROM, stop the Rx RISC CPU,
 9340		 * which is not required.
 9341		 */
 9342		tg3_stop_fw(tp);
 9343		tg3_halt_cpu(tp, RX_CPU_BASE);
 9344	}
 9345
 9346	err = tg3_poll_fw(tp);
 9347	if (err)
 9348		return err;
 9349
 9350	tw32(GRC_MODE, tp->grc_mode);
 9351
 9352	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
 9353		val = tr32(0xc4);
 9354
 9355		tw32(0xc4, val | (1 << 15));
 9356	}
 9357
 9358	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
 9359	    tg3_asic_rev(tp) == ASIC_REV_5705) {
 9360		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
 9361		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
 9362			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
 9363		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 9364	}
 9365
 9366	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 9367		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
 9368		val = tp->mac_mode;
 9369	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 9370		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
 9371		val = tp->mac_mode;
 9372	} else
 9373		val = 0;
 9374
 9375	tw32_f(MAC_MODE, val);
 9376	udelay(40);
 9377
 9378	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
 9379
 9380	tg3_mdio_start(tp);
 9381
 9382	if (tg3_flag(tp, PCI_EXPRESS) &&
 9383	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
 9384	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
 9385	    !tg3_flag(tp, 57765_PLUS)) {
 9386		val = tr32(0x7c00);
 9387
 9388		tw32(0x7c00, val | (1 << 25));
 9389	}
 9390
 9391	tg3_restore_clk(tp);
 9392
 9393	/* Increase the core clock speed to fix tx timeout issue for 5762
 9394	 * with 100Mbps link speed.
 9395	 */
 9396	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
 9397		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
 9398		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
 9399		     TG3_CPMU_MAC_ORIDE_ENABLE);
 9400	}
 9401
 9402	/* Reprobe ASF enable state.  */
 9403	tg3_flag_clear(tp, ENABLE_ASF);
 9404	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
 9405			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
 9406
 9407	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
 9408	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 9409	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
 9410		u32 nic_cfg;
 9411
 9412		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
 9413		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 9414			tg3_flag_set(tp, ENABLE_ASF);
 9415			tp->last_event_jiffies = jiffies;
 9416			if (tg3_flag(tp, 5750_PLUS))
 9417				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
 9418
 9419			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
 9420			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
 9421				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
 9422			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
 9423				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
 9424		}
 9425	}
 9426
 9427	return 0;
 9428}
 9429
 9430static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
 9431static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
 9432static void __tg3_set_rx_mode(struct net_device *);
 9433
 9434/* tp->lock is held. */
 9435static int tg3_halt(struct tg3 *tp, int kind, bool silent)
 9436{
 9437	int err, i;
 9438
 9439	tg3_stop_fw(tp);
 9440
 9441	tg3_write_sig_pre_reset(tp, kind);
 9442
 9443	tg3_abort_hw(tp, silent);
 9444	err = tg3_chip_reset(tp);
 9445
 9446	__tg3_set_mac_addr(tp, false);
 9447
 9448	tg3_write_sig_legacy(tp, kind);
 9449	tg3_write_sig_post_reset(tp, kind);
 9450
 9451	if (tp->hw_stats) {
 9452		/* Save the stats across chip resets... */
 9453		tg3_get_nstats(tp, &tp->net_stats_prev);
 9454		tg3_get_estats(tp, &tp->estats_prev);
 9455
 9456		/* And make sure the next sample is new data */
 9457		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 9458
 9459		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
 9460			struct tg3_napi *tnapi = &tp->napi[i];
 9461
 9462			tnapi->rx_dropped = 0;
 9463			tnapi->tx_dropped = 0;
 9464		}
 9465	}
 9466
 9467	return err;
 9468}
 9469
 9470static int tg3_set_mac_addr(struct net_device *dev, void *p)
 9471{
 9472	struct tg3 *tp = netdev_priv(dev);
 9473	struct sockaddr *addr = p;
 9474	int err = 0;
 9475	bool skip_mac_1 = false;
 9476
 9477	if (!is_valid_ether_addr(addr->sa_data))
 9478		return -EADDRNOTAVAIL;
 9479
 9480	eth_hw_addr_set(dev, addr->sa_data);
 9481
 9482	if (!netif_running(dev))
 9483		return 0;
 9484
 9485	if (tg3_flag(tp, ENABLE_ASF)) {
 9486		u32 addr0_high, addr0_low, addr1_high, addr1_low;
 9487
 9488		addr0_high = tr32(MAC_ADDR_0_HIGH);
 9489		addr0_low = tr32(MAC_ADDR_0_LOW);
 9490		addr1_high = tr32(MAC_ADDR_1_HIGH);
 9491		addr1_low = tr32(MAC_ADDR_1_LOW);
 9492
 9493		/* Skip MAC addr 1 if ASF is using it. */
 9494		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
 9495		    !(addr1_high == 0 && addr1_low == 0))
 9496			skip_mac_1 = true;
 9497	}
 9498	spin_lock_bh(&tp->lock);
 9499	__tg3_set_mac_addr(tp, skip_mac_1);
 9500	__tg3_set_rx_mode(dev);
 9501	spin_unlock_bh(&tp->lock);
 9502
 9503	return err;
 9504}
 9505
 9506/* tp->lock is held. */
 9507static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
 9508			   dma_addr_t mapping, u32 maxlen_flags,
 9509			   u32 nic_addr)
 9510{
 9511	tg3_write_mem(tp,
 9512		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
 9513		      ((u64) mapping >> 32));
 9514	tg3_write_mem(tp,
 9515		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
 9516		      ((u64) mapping & 0xffffffff));
 9517	tg3_write_mem(tp,
 9518		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
 9519		       maxlen_flags);
 9520
 9521	if (!tg3_flag(tp, 5705_PLUS))
 9522		tg3_write_mem(tp,
 9523			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
 9524			      nic_addr);
 9525}
 9526
 9527
 9528static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
 9529{
 9530	int i = 0;
 9531
 9532	if (!tg3_flag(tp, ENABLE_TSS)) {
 9533		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
 9534		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
 9535		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
 9536	} else {
 9537		tw32(HOSTCC_TXCOL_TICKS, 0);
 9538		tw32(HOSTCC_TXMAX_FRAMES, 0);
 9539		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
 9540
 9541		for (; i < tp->txq_cnt; i++) {
 9542			u32 reg;
 9543
 9544			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
 9545			tw32(reg, ec->tx_coalesce_usecs);
 9546			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
 9547			tw32(reg, ec->tx_max_coalesced_frames);
 9548			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
 9549			tw32(reg, ec->tx_max_coalesced_frames_irq);
 9550		}
 9551	}
 9552
 9553	for (; i < tp->irq_max - 1; i++) {
 9554		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
 9555		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
 9556		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
 9557	}
 9558}
 9559
 9560static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
 9561{
 9562	int i = 0;
 9563	u32 limit = tp->rxq_cnt;
 9564
 9565	if (!tg3_flag(tp, ENABLE_RSS)) {
 9566		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
 9567		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
 9568		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
 9569		limit--;
 9570	} else {
 9571		tw32(HOSTCC_RXCOL_TICKS, 0);
 9572		tw32(HOSTCC_RXMAX_FRAMES, 0);
 9573		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
 9574	}
 9575
 9576	for (; i < limit; i++) {
 9577		u32 reg;
 9578
 9579		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
 9580		tw32(reg, ec->rx_coalesce_usecs);
 9581		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
 9582		tw32(reg, ec->rx_max_coalesced_frames);
 9583		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
 9584		tw32(reg, ec->rx_max_coalesced_frames_irq);
 9585	}
 9586
 9587	for (; i < tp->irq_max - 1; i++) {
 9588		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
 9589		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
 9590		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
 9591	}
 9592}
 9593
 9594static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 9595{
 9596	tg3_coal_tx_init(tp, ec);
 9597	tg3_coal_rx_init(tp, ec);
 9598
 9599	if (!tg3_flag(tp, 5705_PLUS)) {
 9600		u32 val = ec->stats_block_coalesce_usecs;
 9601
 9602		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
 9603		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
 9604
 9605		if (!tp->link_up)
 9606			val = 0;
 9607
 9608		tw32(HOSTCC_STAT_COAL_TICKS, val);
 9609	}
 9610}
 9611
 9612/* tp->lock is held. */
 9613static void tg3_tx_rcbs_disable(struct tg3 *tp)
 9614{
 9615	u32 txrcb, limit;
 9616
 9617	/* Disable all transmit rings but the first. */
 9618	if (!tg3_flag(tp, 5705_PLUS))
 9619		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
 9620	else if (tg3_flag(tp, 5717_PLUS))
 9621		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
 9622	else if (tg3_flag(tp, 57765_CLASS) ||
 9623		 tg3_asic_rev(tp) == ASIC_REV_5762)
 9624		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
 9625	else
 9626		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 9627
 9628	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 9629	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
 9630		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
 9631			      BDINFO_FLAGS_DISABLED);
 9632}
 9633
 9634/* tp->lock is held. */
 9635static void tg3_tx_rcbs_init(struct tg3 *tp)
 9636{
 9637	int i = 0;
 9638	u32 txrcb = NIC_SRAM_SEND_RCB;
 9639
 9640	if (tg3_flag(tp, ENABLE_TSS))
 9641		i++;
 9642
 9643	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
 9644		struct tg3_napi *tnapi = &tp->napi[i];
 9645
 9646		if (!tnapi->tx_ring)
 9647			continue;
 9648
 9649		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
 9650			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
 9651			       NIC_SRAM_TX_BUFFER_DESC);
 9652	}
 9653}
 9654
 9655/* tp->lock is held. */
 9656static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
 9657{
 9658	u32 rxrcb, limit;
 9659
 9660	/* Disable all receive return rings but the first. */
 9661	if (tg3_flag(tp, 5717_PLUS))
 9662		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
 9663	else if (!tg3_flag(tp, 5705_PLUS))
 9664		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
 9665	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
 9666		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
 9667		 tg3_flag(tp, 57765_CLASS))
 9668		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
 9669	else
 9670		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
 9671
 9672	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
 9673	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
 9674		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
 9675			      BDINFO_FLAGS_DISABLED);
 9676}
 9677
 9678/* tp->lock is held. */
 9679static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
 9680{
 9681	int i = 0;
 9682	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
 9683
 9684	if (tg3_flag(tp, ENABLE_RSS))
 9685		i++;
 9686
 9687	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
 9688		struct tg3_napi *tnapi = &tp->napi[i];
 9689
 9690		if (!tnapi->rx_rcb)
 9691			continue;
 9692
 9693		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
 9694			       (tp->rx_ret_ring_mask + 1) <<
 9695				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
 9696	}
 9697}
 9698
 9699/* tp->lock is held. */
 9700static void tg3_rings_reset(struct tg3 *tp)
 9701{
 9702	int i;
 9703	u32 stblk;
 9704	struct tg3_napi *tnapi = &tp->napi[0];
 9705
 9706	tg3_tx_rcbs_disable(tp);
 9707
 9708	tg3_rx_ret_rcbs_disable(tp);
 9709
 9710	/* Disable interrupts */
 9711	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
 9712	tp->napi[0].chk_msi_cnt = 0;
 9713	tp->napi[0].last_rx_cons = 0;
 9714	tp->napi[0].last_tx_cons = 0;
 9715
 9716	/* Zero mailbox registers. */
 9717	if (tg3_flag(tp, SUPPORT_MSIX)) {
 9718		for (i = 1; i < tp->irq_max; i++) {
 9719			tp->napi[i].tx_prod = 0;
 9720			tp->napi[i].tx_cons = 0;
 9721			if (tg3_flag(tp, ENABLE_TSS))
 9722				tw32_mailbox(tp->napi[i].prodmbox, 0);
 9723			tw32_rx_mbox(tp->napi[i].consmbox, 0);
 9724			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
 9725			tp->napi[i].chk_msi_cnt = 0;
 9726			tp->napi[i].last_rx_cons = 0;
 9727			tp->napi[i].last_tx_cons = 0;
 9728		}
 9729		if (!tg3_flag(tp, ENABLE_TSS))
 9730			tw32_mailbox(tp->napi[0].prodmbox, 0);
 9731	} else {
 9732		tp->napi[0].tx_prod = 0;
 9733		tp->napi[0].tx_cons = 0;
 9734		tw32_mailbox(tp->napi[0].prodmbox, 0);
 9735		tw32_rx_mbox(tp->napi[0].consmbox, 0);
 9736	}
 9737
 9738	/* Make sure the NIC-based send BD rings are disabled. */
 9739	if (!tg3_flag(tp, 5705_PLUS)) {
 9740		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
 9741		for (i = 0; i < 16; i++)
 9742			tw32_tx_mbox(mbox + i * 8, 0);
 9743	}
 9744
 9745	/* Clear status block in ram. */
 9746	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 9747
 9748	/* Set status block DMA address */
 9749	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
 9750	     ((u64) tnapi->status_mapping >> 32));
 9751	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
 9752	     ((u64) tnapi->status_mapping & 0xffffffff));
 9753
 9754	stblk = HOSTCC_STATBLCK_RING1;
 9755
 9756	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
 9757		u64 mapping = (u64)tnapi->status_mapping;
 9758		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
 9759		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
 9760		stblk += 8;
 9761
 9762		/* Clear status block in ram. */
 9763		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 9764	}
 9765
 9766	tg3_tx_rcbs_init(tp);
 9767	tg3_rx_ret_rcbs_init(tp);
 9768}
 9769
 9770static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
 9771{
 9772	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
 9773
 9774	if (!tg3_flag(tp, 5750_PLUS) ||
 9775	    tg3_flag(tp, 5780_CLASS) ||
 9776	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
 9777	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
 9778	    tg3_flag(tp, 57765_PLUS))
 9779		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
 9780	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
 9781		 tg3_asic_rev(tp) == ASIC_REV_5787)
 9782		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
 9783	else
 9784		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
 9785
 9786	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
 9787	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
 9788
 9789	val = min(nic_rep_thresh, host_rep_thresh);
 9790	tw32(RCVBDI_STD_THRESH, val);
 9791
 9792	if (tg3_flag(tp, 57765_PLUS))
 9793		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
 9794
 9795	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 9796		return;
 9797
 9798	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 9799
 9800	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 9801
 9802	val = min(bdcache_maxcnt / 2, host_rep_thresh);
 9803	tw32(RCVBDI_JUMBO_THRESH, val);
 9804
 9805	if (tg3_flag(tp, 57765_PLUS))
 9806		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 9807}
 9808
 9809static inline u32 calc_crc(unsigned char *buf, int len)
 9810{
 9811	u32 reg;
 9812	u32 tmp;
 9813	int j, k;
 9814
 9815	reg = 0xffffffff;
 9816
 9817	for (j = 0; j < len; j++) {
 9818		reg ^= buf[j];
 9819
 9820		for (k = 0; k < 8; k++) {
 9821			tmp = reg & 0x01;
 9822
 9823			reg >>= 1;
 9824
 9825			if (tmp)
 9826				reg ^= CRC32_POLY_LE;
 9827		}
 9828	}
 9829
 9830	return ~reg;
 9831}
 9832
 9833static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
 9834{
 9835	/* accept or reject all multicast frames */
 9836	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
 9837	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
 9838	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
 9839	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
 9840}
 9841
 9842static void __tg3_set_rx_mode(struct net_device *dev)
 9843{
 9844	struct tg3 *tp = netdev_priv(dev);
 9845	u32 rx_mode;
 9846
 9847	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
 9848				  RX_MODE_KEEP_VLAN_TAG);
 9849
 9850#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
 9851	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
 9852	 * flag clear.
 9853	 */
 9854	if (!tg3_flag(tp, ENABLE_ASF))
 9855		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
 9856#endif
 9857
 9858	if (dev->flags & IFF_PROMISC) {
 9859		/* Promiscuous mode. */
 9860		rx_mode |= RX_MODE_PROMISC;
 9861	} else if (dev->flags & IFF_ALLMULTI) {
 9862		/* Accept all multicast. */
 9863		tg3_set_multi(tp, 1);
 9864	} else if (netdev_mc_empty(dev)) {
 9865		/* Reject all multicast. */
 9866		tg3_set_multi(tp, 0);
 9867	} else {
 9868		/* Accept one or more multicast(s). */
 9869		struct netdev_hw_addr *ha;
 9870		u32 mc_filter[4] = { 0, };
 9871		u32 regidx;
 9872		u32 bit;
 9873		u32 crc;
 9874
 9875		netdev_for_each_mc_addr(ha, dev) {
 9876			crc = calc_crc(ha->addr, ETH_ALEN);
 9877			bit = ~crc & 0x7f;
 9878			regidx = (bit & 0x60) >> 5;
 9879			bit &= 0x1f;
 9880			mc_filter[regidx] |= (1 << bit);
 9881		}
 9882
 9883		tw32(MAC_HASH_REG_0, mc_filter[0]);
 9884		tw32(MAC_HASH_REG_1, mc_filter[1]);
 9885		tw32(MAC_HASH_REG_2, mc_filter[2]);
 9886		tw32(MAC_HASH_REG_3, mc_filter[3]);
 9887	}
 9888
 9889	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
 9890		rx_mode |= RX_MODE_PROMISC;
 9891	} else if (!(dev->flags & IFF_PROMISC)) {
 9892		/* Add all entries into to the mac addr filter list */
 9893		int i = 0;
 9894		struct netdev_hw_addr *ha;
 9895
 9896		netdev_for_each_uc_addr(ha, dev) {
 9897			__tg3_set_one_mac_addr(tp, ha->addr,
 9898					       i + TG3_UCAST_ADDR_IDX(tp));
 9899			i++;
 9900		}
 9901	}
 9902
 9903	if (rx_mode != tp->rx_mode) {
 9904		tp->rx_mode = rx_mode;
 9905		tw32_f(MAC_RX_MODE, rx_mode);
 9906		udelay(10);
 9907	}
 9908}
 9909
 9910static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
 9911{
 9912	int i;
 9913
 9914	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
 9915		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
 9916}
 9917
 9918static void tg3_rss_check_indir_tbl(struct tg3 *tp)
 9919{
 9920	int i;
 9921
 9922	if (!tg3_flag(tp, SUPPORT_MSIX))
 9923		return;
 9924
 9925	if (tp->rxq_cnt == 1) {
 9926		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
 9927		return;
 9928	}
 9929
 9930	/* Validate table against current IRQ count */
 9931	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
 9932		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
 9933			break;
 9934	}
 9935
 9936	if (i != TG3_RSS_INDIR_TBL_SIZE)
 9937		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
 9938}
 9939
 9940static void tg3_rss_write_indir_tbl(struct tg3 *tp)
 9941{
 9942	int i = 0;
 9943	u32 reg = MAC_RSS_INDIR_TBL_0;
 9944
 9945	while (i < TG3_RSS_INDIR_TBL_SIZE) {
 9946		u32 val = tp->rss_ind_tbl[i];
 9947		i++;
 9948		for (; i % 8; i++) {
 9949			val <<= 4;
 9950			val |= tp->rss_ind_tbl[i];
 9951		}
 9952		tw32(reg, val);
 9953		reg += 4;
 9954	}
 9955}
 9956
 9957static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
 9958{
 9959	if (tg3_asic_rev(tp) == ASIC_REV_5719)
 9960		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
 9961	else
 9962		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
 9963}
 9964
 9965/* tp->lock is held. */
 9966static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
 9967{
 9968	u32 val, rdmac_mode;
 9969	int i, err, limit;
 9970	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
 9971
 9972	tg3_disable_ints(tp);
 9973
 9974	tg3_stop_fw(tp);
 9975
 9976	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
 9977
 9978	if (tg3_flag(tp, INIT_COMPLETE))
 9979		tg3_abort_hw(tp, 1);
 9980
 9981	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
 9982	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
 9983		tg3_phy_pull_config(tp);
 9984		tg3_eee_pull_config(tp, NULL);
 9985		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
 9986	}
 9987
 9988	/* Enable MAC control of LPI */
 9989	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
 9990		tg3_setup_eee(tp);
 9991
 9992	if (reset_phy)
 9993		tg3_phy_reset(tp);
 9994
 9995	err = tg3_chip_reset(tp);
 9996	if (err)
 9997		return err;
 9998
 9999	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
10000
10001	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
10002		val = tr32(TG3_CPMU_CTRL);
10003		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
10004		tw32(TG3_CPMU_CTRL, val);
10005
10006		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10007		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10008		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10009		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10010
10011		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
10012		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
10013		val |= CPMU_LNK_AWARE_MACCLK_6_25;
10014		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
10015
10016		val = tr32(TG3_CPMU_HST_ACC);
10017		val &= ~CPMU_HST_ACC_MACCLK_MASK;
10018		val |= CPMU_HST_ACC_MACCLK_6_25;
10019		tw32(TG3_CPMU_HST_ACC, val);
10020	}
10021
10022	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10023		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10024		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10025		       PCIE_PWR_MGMT_L1_THRESH_4MS;
10026		tw32(PCIE_PWR_MGMT_THRESH, val);
10027
10028		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10029		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10030
10031		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10032
10033		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10034		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10035	}
10036
10037	if (tg3_flag(tp, L1PLLPD_EN)) {
10038		u32 grc_mode = tr32(GRC_MODE);
10039
10040		/* Access the lower 1K of PL PCIE block registers. */
10041		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10042		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10043
10044		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10045		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10046		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10047
10048		tw32(GRC_MODE, grc_mode);
10049	}
10050
10051	if (tg3_flag(tp, 57765_CLASS)) {
10052		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10053			u32 grc_mode = tr32(GRC_MODE);
10054
10055			/* Access the lower 1K of PL PCIE block registers. */
10056			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10057			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10058
10059			val = tr32(TG3_PCIE_TLDLPL_PORT +
10060				   TG3_PCIE_PL_LO_PHYCTL5);
10061			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10062			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10063
10064			tw32(GRC_MODE, grc_mode);
10065		}
10066
10067		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10068			u32 grc_mode;
10069
10070			/* Fix transmit hangs */
10071			val = tr32(TG3_CPMU_PADRNG_CTL);
10072			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10073			tw32(TG3_CPMU_PADRNG_CTL, val);
10074
10075			grc_mode = tr32(GRC_MODE);
10076
10077			/* Access the lower 1K of DL PCIE block registers. */
10078			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10079			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10080
10081			val = tr32(TG3_PCIE_TLDLPL_PORT +
10082				   TG3_PCIE_DL_LO_FTSMAX);
10083			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10084			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10085			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10086
10087			tw32(GRC_MODE, grc_mode);
10088		}
10089
10090		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10091		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10092		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10093		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10094	}
10095
10096	/* This works around an issue with Athlon chipsets on
10097	 * B3 tigon3 silicon.  This bit has no effect on any
10098	 * other revision.  But do not set this on PCI Express
10099	 * chips and don't even touch the clocks if the CPMU is present.
10100	 */
10101	if (!tg3_flag(tp, CPMU_PRESENT)) {
10102		if (!tg3_flag(tp, PCI_EXPRESS))
10103			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10104		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10105	}
10106
10107	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10108	    tg3_flag(tp, PCIX_MODE)) {
10109		val = tr32(TG3PCI_PCISTATE);
10110		val |= PCISTATE_RETRY_SAME_DMA;
10111		tw32(TG3PCI_PCISTATE, val);
10112	}
10113
10114	if (tg3_flag(tp, ENABLE_APE)) {
10115		/* Allow reads and writes to the
10116		 * APE register and memory space.
10117		 */
10118		val = tr32(TG3PCI_PCISTATE);
10119		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10120		       PCISTATE_ALLOW_APE_SHMEM_WR |
10121		       PCISTATE_ALLOW_APE_PSPACE_WR;
10122		tw32(TG3PCI_PCISTATE, val);
10123	}
10124
10125	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10126		/* Enable some hw fixes.  */
10127		val = tr32(TG3PCI_MSI_DATA);
10128		val |= (1 << 26) | (1 << 28) | (1 << 29);
10129		tw32(TG3PCI_MSI_DATA, val);
10130	}
10131
10132	/* Descriptor ring init may make accesses to the
10133	 * NIC SRAM area to setup the TX descriptors, so we
10134	 * can only do this after the hardware has been
10135	 * successfully reset.
10136	 */
10137	err = tg3_init_rings(tp);
10138	if (err)
10139		return err;
10140
10141	if (tg3_flag(tp, 57765_PLUS)) {
10142		val = tr32(TG3PCI_DMA_RW_CTRL) &
10143		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10144		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10145			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10146		if (!tg3_flag(tp, 57765_CLASS) &&
10147		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10148		    tg3_asic_rev(tp) != ASIC_REV_5762)
10149			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10150		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10151	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10152		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10153		/* This value is determined during the probe time DMA
10154		 * engine test, tg3_test_dma.
10155		 */
10156		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10157	}
10158
10159	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10160			  GRC_MODE_4X_NIC_SEND_RINGS |
10161			  GRC_MODE_NO_TX_PHDR_CSUM |
10162			  GRC_MODE_NO_RX_PHDR_CSUM);
10163	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10164
10165	/* Pseudo-header checksum is done by hardware logic and not
10166	 * the offload processers, so make the chip do the pseudo-
10167	 * header checksums on receive.  For transmit it is more
10168	 * convenient to do the pseudo-header checksum in software
10169	 * as Linux does that on transmit for us in all cases.
10170	 */
10171	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10172
10173	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10174	if (tp->rxptpctl)
10175		tw32(TG3_RX_PTP_CTL,
10176		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10177
10178	if (tg3_flag(tp, PTP_CAPABLE))
10179		val |= GRC_MODE_TIME_SYNC_ENABLE;
10180
10181	tw32(GRC_MODE, tp->grc_mode | val);
10182
10183	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10184	 * south bridge limitation. As a workaround, Driver is setting MRRS
10185	 * to 2048 instead of default 4096.
10186	 */
10187	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10188	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10189		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10190		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10191	}
10192
10193	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10194	val = tr32(GRC_MISC_CFG);
10195	val &= ~0xff;
10196	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10197	tw32(GRC_MISC_CFG, val);
10198
10199	/* Initialize MBUF/DESC pool. */
10200	if (tg3_flag(tp, 5750_PLUS)) {
10201		/* Do nothing.  */
10202	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10203		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10204		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10205			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10206		else
10207			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10208		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10209		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10210	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10211		int fw_len;
10212
10213		fw_len = tp->fw_len;
10214		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10215		tw32(BUFMGR_MB_POOL_ADDR,
10216		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10217		tw32(BUFMGR_MB_POOL_SIZE,
10218		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10219	}
10220
10221	if (tp->dev->mtu <= ETH_DATA_LEN) {
10222		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10223		     tp->bufmgr_config.mbuf_read_dma_low_water);
10224		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10225		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10226		tw32(BUFMGR_MB_HIGH_WATER,
10227		     tp->bufmgr_config.mbuf_high_water);
10228	} else {
10229		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10230		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10231		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10232		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10233		tw32(BUFMGR_MB_HIGH_WATER,
10234		     tp->bufmgr_config.mbuf_high_water_jumbo);
10235	}
10236	tw32(BUFMGR_DMA_LOW_WATER,
10237	     tp->bufmgr_config.dma_low_water);
10238	tw32(BUFMGR_DMA_HIGH_WATER,
10239	     tp->bufmgr_config.dma_high_water);
10240
10241	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10242	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10243		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10244	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10245	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10246	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10247	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10248		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10249	tw32(BUFMGR_MODE, val);
10250	for (i = 0; i < 2000; i++) {
10251		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10252			break;
10253		udelay(10);
10254	}
10255	if (i >= 2000) {
10256		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10257		return -ENODEV;
10258	}
10259
10260	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10261		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10262
10263	tg3_setup_rxbd_thresholds(tp);
10264
10265	/* Initialize TG3_BDINFO's at:
10266	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10267	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10268	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10269	 *
10270	 * like so:
10271	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10272	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10273	 *                              ring attribute flags
10274	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10275	 *
10276	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10277	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10278	 *
10279	 * The size of each ring is fixed in the firmware, but the location is
10280	 * configurable.
10281	 */
10282	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10283	     ((u64) tpr->rx_std_mapping >> 32));
10284	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10285	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10286	if (!tg3_flag(tp, 5717_PLUS))
10287		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10288		     NIC_SRAM_RX_BUFFER_DESC);
10289
10290	/* Disable the mini ring */
10291	if (!tg3_flag(tp, 5705_PLUS))
10292		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10293		     BDINFO_FLAGS_DISABLED);
10294
10295	/* Program the jumbo buffer descriptor ring control
10296	 * blocks on those devices that have them.
10297	 */
10298	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10299	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10300
10301		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10302			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10303			     ((u64) tpr->rx_jmb_mapping >> 32));
10304			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10305			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10306			val = TG3_RX_JMB_RING_SIZE(tp) <<
10307			      BDINFO_FLAGS_MAXLEN_SHIFT;
10308			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10309			     val | BDINFO_FLAGS_USE_EXT_RECV);
10310			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10311			    tg3_flag(tp, 57765_CLASS) ||
10312			    tg3_asic_rev(tp) == ASIC_REV_5762)
10313				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10314				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10315		} else {
10316			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10317			     BDINFO_FLAGS_DISABLED);
10318		}
10319
10320		if (tg3_flag(tp, 57765_PLUS)) {
10321			val = TG3_RX_STD_RING_SIZE(tp);
10322			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10323			val |= (TG3_RX_STD_DMA_SZ << 2);
10324		} else
10325			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10326	} else
10327		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10328
10329	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10330
10331	tpr->rx_std_prod_idx = tp->rx_pending;
10332	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10333
10334	tpr->rx_jmb_prod_idx =
10335		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10336	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10337
10338	tg3_rings_reset(tp);
10339
10340	/* Initialize MAC address and backoff seed. */
10341	__tg3_set_mac_addr(tp, false);
10342
10343	/* MTU + ethernet header + FCS + optional VLAN tag */
10344	tw32(MAC_RX_MTU_SIZE,
10345	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10346
10347	/* The slot time is changed by tg3_setup_phy if we
10348	 * run at gigabit with half duplex.
10349	 */
10350	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10351	      (6 << TX_LENGTHS_IPG_SHIFT) |
10352	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10353
10354	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10355	    tg3_asic_rev(tp) == ASIC_REV_5762)
10356		val |= tr32(MAC_TX_LENGTHS) &
10357		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10358			TX_LENGTHS_CNT_DWN_VAL_MSK);
10359
10360	tw32(MAC_TX_LENGTHS, val);
10361
10362	/* Receive rules. */
10363	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10364	tw32(RCVLPC_CONFIG, 0x0181);
10365
10366	/* Calculate RDMAC_MODE setting early, we need it to determine
10367	 * the RCVLPC_STATE_ENABLE mask.
10368	 */
10369	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10370		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10371		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10372		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10373		      RDMAC_MODE_LNGREAD_ENAB);
10374
10375	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10376		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10377
10378	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10379	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10380	    tg3_asic_rev(tp) == ASIC_REV_57780)
10381		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10382			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10383			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10384
10385	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10386	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10387		if (tg3_flag(tp, TSO_CAPABLE)) {
10388			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10389		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10390			   !tg3_flag(tp, IS_5788)) {
10391			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10392		}
10393	}
10394
10395	if (tg3_flag(tp, PCI_EXPRESS))
10396		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10397
10398	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10399		tp->dma_limit = 0;
10400		if (tp->dev->mtu <= ETH_DATA_LEN) {
10401			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10402			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10403		}
10404	}
10405
10406	if (tg3_flag(tp, HW_TSO_1) ||
10407	    tg3_flag(tp, HW_TSO_2) ||
10408	    tg3_flag(tp, HW_TSO_3))
10409		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10410
10411	if (tg3_flag(tp, 57765_PLUS) ||
10412	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10413	    tg3_asic_rev(tp) == ASIC_REV_57780)
10414		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10415
10416	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10417	    tg3_asic_rev(tp) == ASIC_REV_5762)
10418		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10419
10420	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10421	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10422	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10423	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10424	    tg3_flag(tp, 57765_PLUS)) {
10425		u32 tgtreg;
10426
10427		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10428			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10429		else
10430			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10431
10432		val = tr32(tgtreg);
10433		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10434		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10435			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10436				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10437				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10438			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10439			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10440			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10441		}
10442		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10443	}
10444
10445	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10446	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10447	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10448		u32 tgtreg;
10449
10450		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10451			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10452		else
10453			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10454
10455		val = tr32(tgtreg);
10456		tw32(tgtreg, val |
10457		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10458		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10459	}
10460
10461	/* Receive/send statistics. */
10462	if (tg3_flag(tp, 5750_PLUS)) {
10463		val = tr32(RCVLPC_STATS_ENABLE);
10464		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10465		tw32(RCVLPC_STATS_ENABLE, val);
10466	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10467		   tg3_flag(tp, TSO_CAPABLE)) {
10468		val = tr32(RCVLPC_STATS_ENABLE);
10469		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10470		tw32(RCVLPC_STATS_ENABLE, val);
10471	} else {
10472		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10473	}
10474	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10475	tw32(SNDDATAI_STATSENAB, 0xffffff);
10476	tw32(SNDDATAI_STATSCTRL,
10477	     (SNDDATAI_SCTRL_ENABLE |
10478	      SNDDATAI_SCTRL_FASTUPD));
10479
10480	/* Setup host coalescing engine. */
10481	tw32(HOSTCC_MODE, 0);
10482	for (i = 0; i < 2000; i++) {
10483		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10484			break;
10485		udelay(10);
10486	}
10487
10488	__tg3_set_coalesce(tp, &tp->coal);
10489
10490	if (!tg3_flag(tp, 5705_PLUS)) {
10491		/* Status/statistics block address.  See tg3_timer,
10492		 * the tg3_periodic_fetch_stats call there, and
10493		 * tg3_get_stats to see how this works for 5705/5750 chips.
10494		 */
10495		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10496		     ((u64) tp->stats_mapping >> 32));
10497		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10498		     ((u64) tp->stats_mapping & 0xffffffff));
10499		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10500
10501		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10502
10503		/* Clear statistics and status block memory areas */
10504		for (i = NIC_SRAM_STATS_BLK;
10505		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10506		     i += sizeof(u32)) {
10507			tg3_write_mem(tp, i, 0);
10508			udelay(40);
10509		}
10510	}
10511
10512	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10513
10514	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10515	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10516	if (!tg3_flag(tp, 5705_PLUS))
10517		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10518
10519	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10520		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10521		/* reset to prevent losing 1st rx packet intermittently */
10522		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10523		udelay(10);
10524	}
10525
10526	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10527			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10528			MAC_MODE_FHDE_ENABLE;
10529	if (tg3_flag(tp, ENABLE_APE))
10530		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10531	if (!tg3_flag(tp, 5705_PLUS) &&
10532	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10533	    tg3_asic_rev(tp) != ASIC_REV_5700)
10534		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10535	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10536	udelay(40);
10537
10538	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10539	 * If TG3_FLAG_IS_NIC is zero, we should read the
10540	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10541	 * whether used as inputs or outputs, are set by boot code after
10542	 * reset.
10543	 */
10544	if (!tg3_flag(tp, IS_NIC)) {
10545		u32 gpio_mask;
10546
10547		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10548			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10549			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10550
10551		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10552			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10553				     GRC_LCLCTRL_GPIO_OUTPUT3;
10554
10555		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10556			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10557
10558		tp->grc_local_ctrl &= ~gpio_mask;
10559		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10560
10561		/* GPIO1 must be driven high for eeprom write protect */
10562		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10563			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10564					       GRC_LCLCTRL_GPIO_OUTPUT1);
10565	}
10566	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10567	udelay(100);
10568
10569	if (tg3_flag(tp, USING_MSIX)) {
10570		val = tr32(MSGINT_MODE);
10571		val |= MSGINT_MODE_ENABLE;
10572		if (tp->irq_cnt > 1)
10573			val |= MSGINT_MODE_MULTIVEC_EN;
10574		if (!tg3_flag(tp, 1SHOT_MSI))
10575			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10576		tw32(MSGINT_MODE, val);
10577	}
10578
10579	if (!tg3_flag(tp, 5705_PLUS)) {
10580		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10581		udelay(40);
10582	}
10583
10584	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10585	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10586	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10587	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10588	       WDMAC_MODE_LNGREAD_ENAB);
10589
10590	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10591	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10592		if (tg3_flag(tp, TSO_CAPABLE) &&
10593		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10594		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10595			/* nothing */
10596		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10597			   !tg3_flag(tp, IS_5788)) {
10598			val |= WDMAC_MODE_RX_ACCEL;
10599		}
10600	}
10601
10602	/* Enable host coalescing bug fix */
10603	if (tg3_flag(tp, 5755_PLUS))
10604		val |= WDMAC_MODE_STATUS_TAG_FIX;
10605
10606	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10607		val |= WDMAC_MODE_BURST_ALL_DATA;
10608
10609	tw32_f(WDMAC_MODE, val);
10610	udelay(40);
10611
10612	if (tg3_flag(tp, PCIX_MODE)) {
10613		u16 pcix_cmd;
10614
10615		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10616				     &pcix_cmd);
10617		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10618			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10619			pcix_cmd |= PCI_X_CMD_READ_2K;
10620		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10621			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10622			pcix_cmd |= PCI_X_CMD_READ_2K;
10623		}
10624		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10625				      pcix_cmd);
10626	}
10627
10628	tw32_f(RDMAC_MODE, rdmac_mode);
10629	udelay(40);
10630
10631	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10632	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10633		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10634			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10635				break;
10636		}
10637		if (i < TG3_NUM_RDMA_CHANNELS) {
10638			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10639			val |= tg3_lso_rd_dma_workaround_bit(tp);
10640			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10641			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10642		}
10643	}
10644
10645	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10646	if (!tg3_flag(tp, 5705_PLUS))
10647		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10648
10649	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10650		tw32(SNDDATAC_MODE,
10651		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10652	else
10653		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10654
10655	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10656	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10657	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10658	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10659		val |= RCVDBDI_MODE_LRG_RING_SZ;
10660	tw32(RCVDBDI_MODE, val);
10661	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10662	if (tg3_flag(tp, HW_TSO_1) ||
10663	    tg3_flag(tp, HW_TSO_2) ||
10664	    tg3_flag(tp, HW_TSO_3))
10665		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10666	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10667	if (tg3_flag(tp, ENABLE_TSS))
10668		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10669	tw32(SNDBDI_MODE, val);
10670	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10671
10672	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10673		err = tg3_load_5701_a0_firmware_fix(tp);
10674		if (err)
10675			return err;
10676	}
10677
10678	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10679		/* Ignore any errors for the firmware download. If download
10680		 * fails, the device will operate with EEE disabled
10681		 */
10682		tg3_load_57766_firmware(tp);
10683	}
10684
10685	if (tg3_flag(tp, TSO_CAPABLE)) {
10686		err = tg3_load_tso_firmware(tp);
10687		if (err)
10688			return err;
10689	}
10690
10691	tp->tx_mode = TX_MODE_ENABLE;
10692
10693	if (tg3_flag(tp, 5755_PLUS) ||
10694	    tg3_asic_rev(tp) == ASIC_REV_5906)
10695		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10696
10697	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10698	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10699		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10700		tp->tx_mode &= ~val;
10701		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10702	}
10703
10704	tw32_f(MAC_TX_MODE, tp->tx_mode);
10705	udelay(100);
10706
10707	if (tg3_flag(tp, ENABLE_RSS)) {
10708		u32 rss_key[10];
10709
10710		tg3_rss_write_indir_tbl(tp);
10711
10712		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10713
10714		for (i = 0; i < 10 ; i++)
10715			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10716	}
10717
10718	tp->rx_mode = RX_MODE_ENABLE;
10719	if (tg3_flag(tp, 5755_PLUS))
10720		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10721
10722	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10723		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10724
10725	if (tg3_flag(tp, ENABLE_RSS))
10726		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10727			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10728			       RX_MODE_RSS_IPV6_HASH_EN |
10729			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10730			       RX_MODE_RSS_IPV4_HASH_EN |
10731			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10732
10733	tw32_f(MAC_RX_MODE, tp->rx_mode);
10734	udelay(10);
10735
10736	tw32(MAC_LED_CTRL, tp->led_ctrl);
10737
10738	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10739	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10740		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10741		udelay(10);
10742	}
10743	tw32_f(MAC_RX_MODE, tp->rx_mode);
10744	udelay(10);
10745
10746	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10747		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10748		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10749			/* Set drive transmission level to 1.2V  */
10750			/* only if the signal pre-emphasis bit is not set  */
10751			val = tr32(MAC_SERDES_CFG);
10752			val &= 0xfffff000;
10753			val |= 0x880;
10754			tw32(MAC_SERDES_CFG, val);
10755		}
10756		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10757			tw32(MAC_SERDES_CFG, 0x616000);
10758	}
10759
10760	/* Prevent chip from dropping frames when flow control
10761	 * is enabled.
10762	 */
10763	if (tg3_flag(tp, 57765_CLASS))
10764		val = 1;
10765	else
10766		val = 2;
10767	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10768
10769	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10770	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10771		/* Use hardware link auto-negotiation */
10772		tg3_flag_set(tp, HW_AUTONEG);
10773	}
10774
10775	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10776	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10777		u32 tmp;
10778
10779		tmp = tr32(SERDES_RX_CTRL);
10780		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10781		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10782		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10783		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10784	}
10785
10786	if (!tg3_flag(tp, USE_PHYLIB)) {
10787		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10788			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10789
10790		err = tg3_setup_phy(tp, false);
10791		if (err)
10792			return err;
10793
10794		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10795		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10796			u32 tmp;
10797
10798			/* Clear CRC stats. */
10799			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10800				tg3_writephy(tp, MII_TG3_TEST1,
10801					     tmp | MII_TG3_TEST1_CRC_EN);
10802				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10803			}
10804		}
10805	}
10806
10807	__tg3_set_rx_mode(tp->dev);
10808
10809	/* Initialize receive rules. */
10810	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10811	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10812	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10813	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10814
10815	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10816		limit = 8;
10817	else
10818		limit = 16;
10819	if (tg3_flag(tp, ENABLE_ASF))
10820		limit -= 4;
10821	switch (limit) {
10822	case 16:
10823		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10824		fallthrough;
10825	case 15:
10826		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10827		fallthrough;
10828	case 14:
10829		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10830		fallthrough;
10831	case 13:
10832		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10833		fallthrough;
10834	case 12:
10835		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10836		fallthrough;
10837	case 11:
10838		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10839		fallthrough;
10840	case 10:
10841		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10842		fallthrough;
10843	case 9:
10844		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10845		fallthrough;
10846	case 8:
10847		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10848		fallthrough;
10849	case 7:
10850		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10851		fallthrough;
10852	case 6:
10853		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10854		fallthrough;
10855	case 5:
10856		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10857		fallthrough;
10858	case 4:
10859		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10860	case 3:
10861		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10862	case 2:
10863	case 1:
10864
10865	default:
10866		break;
10867	}
10868
10869	if (tg3_flag(tp, ENABLE_APE))
10870		/* Write our heartbeat update interval to APE. */
10871		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10872				APE_HOST_HEARTBEAT_INT_5SEC);
10873
10874	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10875
10876	return 0;
10877}
10878
10879/* Called at device open time to get the chip ready for
10880 * packet processing.  Invoked with tp->lock held.
10881 */
10882static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10883{
10884	/* Chip may have been just powered on. If so, the boot code may still
10885	 * be running initialization. Wait for it to finish to avoid races in
10886	 * accessing the hardware.
10887	 */
10888	tg3_enable_register_access(tp);
10889	tg3_poll_fw(tp);
10890
10891	tg3_switch_clocks(tp);
10892
10893	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10894
10895	return tg3_reset_hw(tp, reset_phy);
10896}
10897
10898#ifdef CONFIG_TIGON3_HWMON
10899static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10900{
10901	u32 off, len = TG3_OCIR_LEN;
10902	int i;
10903
10904	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10905		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10906
10907		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10908		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10909			memset(ocir, 0, len);
10910	}
10911}
10912
10913/* sysfs attributes for hwmon */
10914static ssize_t tg3_show_temp(struct device *dev,
10915			     struct device_attribute *devattr, char *buf)
10916{
10917	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10918	struct tg3 *tp = dev_get_drvdata(dev);
10919	u32 temperature;
10920
10921	spin_lock_bh(&tp->lock);
10922	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10923				sizeof(temperature));
10924	spin_unlock_bh(&tp->lock);
10925	return sprintf(buf, "%u\n", temperature * 1000);
10926}
10927
10928
10929static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10930			  TG3_TEMP_SENSOR_OFFSET);
10931static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10932			  TG3_TEMP_CAUTION_OFFSET);
10933static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10934			  TG3_TEMP_MAX_OFFSET);
10935
10936static struct attribute *tg3_attrs[] = {
10937	&sensor_dev_attr_temp1_input.dev_attr.attr,
10938	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10939	&sensor_dev_attr_temp1_max.dev_attr.attr,
10940	NULL
10941};
10942ATTRIBUTE_GROUPS(tg3);
10943
10944static void tg3_hwmon_close(struct tg3 *tp)
10945{
10946	if (tp->hwmon_dev) {
10947		hwmon_device_unregister(tp->hwmon_dev);
10948		tp->hwmon_dev = NULL;
10949	}
10950}
10951
10952static void tg3_hwmon_open(struct tg3 *tp)
10953{
10954	int i;
10955	u32 size = 0;
10956	struct pci_dev *pdev = tp->pdev;
10957	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10958
10959	tg3_sd_scan_scratchpad(tp, ocirs);
10960
10961	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10962		if (!ocirs[i].src_data_length)
10963			continue;
10964
10965		size += ocirs[i].src_hdr_length;
10966		size += ocirs[i].src_data_length;
10967	}
10968
10969	if (!size)
10970		return;
10971
10972	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10973							  tp, tg3_groups);
10974	if (IS_ERR(tp->hwmon_dev)) {
10975		tp->hwmon_dev = NULL;
10976		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10977	}
10978}
10979#else
10980static inline void tg3_hwmon_close(struct tg3 *tp) { }
10981static inline void tg3_hwmon_open(struct tg3 *tp) { }
10982#endif /* CONFIG_TIGON3_HWMON */
10983
10984
10985#define TG3_STAT_ADD32(PSTAT, REG) \
10986do {	u32 __val = tr32(REG); \
10987	(PSTAT)->low += __val; \
10988	if ((PSTAT)->low < __val) \
10989		(PSTAT)->high += 1; \
10990} while (0)
10991
10992static void tg3_periodic_fetch_stats(struct tg3 *tp)
10993{
10994	struct tg3_hw_stats *sp = tp->hw_stats;
10995
10996	if (!tp->link_up)
10997		return;
10998
10999	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
11000	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
11001	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
11002	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
11003	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
11004	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
11005	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
11006	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
11007	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
11008	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
11009	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
11010	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
11011	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
11012	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
11013		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
11014		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
11015		u32 val;
11016
11017		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
11018		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
11019		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
11020		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11021	}
11022
11023	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11024	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11025	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11026	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11027	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11028	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11029	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11030	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11031	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11032	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11033	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11034	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11035	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11036	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11037
11038	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11039	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11040	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
11041	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11042	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11043		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11044	} else {
11045		u32 val = tr32(HOSTCC_FLOW_ATTN);
11046		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11047		if (val) {
11048			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11049			sp->rx_discards.low += val;
11050			if (sp->rx_discards.low < val)
11051				sp->rx_discards.high += 1;
11052		}
11053		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11054	}
11055	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11056}
11057
11058static void tg3_chk_missed_msi(struct tg3 *tp)
11059{
11060	u32 i;
11061
11062	for (i = 0; i < tp->irq_cnt; i++) {
11063		struct tg3_napi *tnapi = &tp->napi[i];
11064
11065		if (tg3_has_work(tnapi)) {
11066			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11067			    tnapi->last_tx_cons == tnapi->tx_cons) {
11068				if (tnapi->chk_msi_cnt < 1) {
11069					tnapi->chk_msi_cnt++;
11070					return;
11071				}
11072				tg3_msi(0, tnapi);
11073			}
11074		}
11075		tnapi->chk_msi_cnt = 0;
11076		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11077		tnapi->last_tx_cons = tnapi->tx_cons;
11078	}
11079}
11080
11081static void tg3_timer(struct timer_list *t)
11082{
11083	struct tg3 *tp = from_timer(tp, t, timer);
11084
11085	spin_lock(&tp->lock);
11086
11087	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11088		spin_unlock(&tp->lock);
11089		goto restart_timer;
11090	}
11091
11092	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11093	    tg3_flag(tp, 57765_CLASS))
11094		tg3_chk_missed_msi(tp);
11095
11096	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11097		/* BCM4785: Flush posted writes from GbE to host memory. */
11098		tr32(HOSTCC_MODE);
11099	}
11100
11101	if (!tg3_flag(tp, TAGGED_STATUS)) {
11102		/* All of this garbage is because when using non-tagged
11103		 * IRQ status the mailbox/status_block protocol the chip
11104		 * uses with the cpu is race prone.
11105		 */
11106		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11107			tw32(GRC_LOCAL_CTRL,
11108			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11109		} else {
11110			tw32(HOSTCC_MODE, tp->coalesce_mode |
11111			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11112		}
11113
11114		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11115			spin_unlock(&tp->lock);
11116			tg3_reset_task_schedule(tp);
11117			goto restart_timer;
11118		}
11119	}
11120
11121	/* This part only runs once per second. */
11122	if (!--tp->timer_counter) {
11123		if (tg3_flag(tp, 5705_PLUS))
11124			tg3_periodic_fetch_stats(tp);
11125
11126		if (tp->setlpicnt && !--tp->setlpicnt)
11127			tg3_phy_eee_enable(tp);
11128
11129		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11130			u32 mac_stat;
11131			int phy_event;
11132
11133			mac_stat = tr32(MAC_STATUS);
11134
11135			phy_event = 0;
11136			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11137				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11138					phy_event = 1;
11139			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11140				phy_event = 1;
11141
11142			if (phy_event)
11143				tg3_setup_phy(tp, false);
11144		} else if (tg3_flag(tp, POLL_SERDES)) {
11145			u32 mac_stat = tr32(MAC_STATUS);
11146			int need_setup = 0;
11147
11148			if (tp->link_up &&
11149			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11150				need_setup = 1;
11151			}
11152			if (!tp->link_up &&
11153			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11154					 MAC_STATUS_SIGNAL_DET))) {
11155				need_setup = 1;
11156			}
11157			if (need_setup) {
11158				if (!tp->serdes_counter) {
11159					tw32_f(MAC_MODE,
11160					     (tp->mac_mode &
11161					      ~MAC_MODE_PORT_MODE_MASK));
11162					udelay(40);
11163					tw32_f(MAC_MODE, tp->mac_mode);
11164					udelay(40);
11165				}
11166				tg3_setup_phy(tp, false);
11167			}
11168		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11169			   tg3_flag(tp, 5780_CLASS)) {
11170			tg3_serdes_parallel_detect(tp);
11171		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11172			u32 cpmu = tr32(TG3_CPMU_STATUS);
11173			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11174					 TG3_CPMU_STATUS_LINK_MASK);
11175
11176			if (link_up != tp->link_up)
11177				tg3_setup_phy(tp, false);
11178		}
11179
11180		tp->timer_counter = tp->timer_multiplier;
11181	}
11182
11183	/* Heartbeat is only sent once every 2 seconds.
11184	 *
11185	 * The heartbeat is to tell the ASF firmware that the host
11186	 * driver is still alive.  In the event that the OS crashes,
11187	 * ASF needs to reset the hardware to free up the FIFO space
11188	 * that may be filled with rx packets destined for the host.
11189	 * If the FIFO is full, ASF will no longer function properly.
11190	 *
11191	 * Unintended resets have been reported on real time kernels
11192	 * where the timer doesn't run on time.  Netpoll will also have
11193	 * same problem.
11194	 *
11195	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11196	 * to check the ring condition when the heartbeat is expiring
11197	 * before doing the reset.  This will prevent most unintended
11198	 * resets.
11199	 */
11200	if (!--tp->asf_counter) {
11201		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11202			tg3_wait_for_event_ack(tp);
11203
11204			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11205				      FWCMD_NICDRV_ALIVE3);
11206			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11207			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11208				      TG3_FW_UPDATE_TIMEOUT_SEC);
11209
11210			tg3_generate_fw_event(tp);
11211		}
11212		tp->asf_counter = tp->asf_multiplier;
11213	}
11214
11215	/* Update the APE heartbeat every 5 seconds.*/
11216	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11217
11218	spin_unlock(&tp->lock);
11219
11220restart_timer:
11221	tp->timer.expires = jiffies + tp->timer_offset;
11222	add_timer(&tp->timer);
11223}
11224
11225static void tg3_timer_init(struct tg3 *tp)
11226{
11227	if (tg3_flag(tp, TAGGED_STATUS) &&
11228	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11229	    !tg3_flag(tp, 57765_CLASS))
11230		tp->timer_offset = HZ;
11231	else
11232		tp->timer_offset = HZ / 10;
11233
11234	BUG_ON(tp->timer_offset > HZ);
11235
11236	tp->timer_multiplier = (HZ / tp->timer_offset);
11237	tp->asf_multiplier = (HZ / tp->timer_offset) *
11238			     TG3_FW_UPDATE_FREQ_SEC;
11239
11240	timer_setup(&tp->timer, tg3_timer, 0);
11241}
11242
11243static void tg3_timer_start(struct tg3 *tp)
11244{
11245	tp->asf_counter   = tp->asf_multiplier;
11246	tp->timer_counter = tp->timer_multiplier;
11247
11248	tp->timer.expires = jiffies + tp->timer_offset;
11249	add_timer(&tp->timer);
11250}
11251
11252static void tg3_timer_stop(struct tg3 *tp)
11253{
11254	del_timer_sync(&tp->timer);
11255}
11256
11257/* Restart hardware after configuration changes, self-test, etc.
11258 * Invoked with tp->lock held.
11259 */
11260static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11261	__releases(tp->lock)
11262	__acquires(tp->lock)
11263{
11264	int err;
11265
11266	err = tg3_init_hw(tp, reset_phy);
11267	if (err) {
11268		netdev_err(tp->dev,
11269			   "Failed to re-initialize device, aborting\n");
11270		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11271		tg3_full_unlock(tp);
11272		tg3_timer_stop(tp);
11273		tp->irq_sync = 0;
11274		tg3_napi_enable(tp);
11275		dev_close(tp->dev);
11276		tg3_full_lock(tp, 0);
11277	}
11278	return err;
11279}
11280
11281static void tg3_reset_task(struct work_struct *work)
11282{
11283	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11284	int err;
11285
11286	rtnl_lock();
11287	tg3_full_lock(tp, 0);
11288
11289	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11290	    tp->pdev->error_state != pci_channel_io_normal) {
11291		tg3_flag_clear(tp, RESET_TASK_PENDING);
11292		tg3_full_unlock(tp);
11293		rtnl_unlock();
11294		return;
11295	}
11296
11297	tg3_full_unlock(tp);
11298
11299	tg3_phy_stop(tp);
11300
11301	tg3_netif_stop(tp);
11302
11303	tg3_full_lock(tp, 1);
11304
11305	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11306		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11307		tp->write32_rx_mbox = tg3_write_flush_reg32;
11308		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11309		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11310	}
11311
11312	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11313	err = tg3_init_hw(tp, true);
11314	if (err) {
11315		tg3_full_unlock(tp);
11316		tp->irq_sync = 0;
11317		tg3_napi_enable(tp);
11318		/* Clear this flag so that tg3_reset_task_cancel() will not
11319		 * call cancel_work_sync() and wait forever.
11320		 */
11321		tg3_flag_clear(tp, RESET_TASK_PENDING);
11322		dev_close(tp->dev);
11323		goto out;
11324	}
11325
11326	tg3_netif_start(tp);
11327	tg3_full_unlock(tp);
11328	tg3_phy_start(tp);
11329	tg3_flag_clear(tp, RESET_TASK_PENDING);
11330out:
11331	rtnl_unlock();
11332}
11333
11334static int tg3_request_irq(struct tg3 *tp, int irq_num)
11335{
11336	irq_handler_t fn;
11337	unsigned long flags;
11338	char *name;
11339	struct tg3_napi *tnapi = &tp->napi[irq_num];
11340
11341	if (tp->irq_cnt == 1)
11342		name = tp->dev->name;
11343	else {
11344		name = &tnapi->irq_lbl[0];
11345		if (tnapi->tx_buffers && tnapi->rx_rcb)
11346			snprintf(name, sizeof(tnapi->irq_lbl),
11347				 "%s-txrx-%d", tp->dev->name, irq_num);
11348		else if (tnapi->tx_buffers)
11349			snprintf(name, sizeof(tnapi->irq_lbl),
11350				 "%s-tx-%d", tp->dev->name, irq_num);
11351		else if (tnapi->rx_rcb)
11352			snprintf(name, sizeof(tnapi->irq_lbl),
11353				 "%s-rx-%d", tp->dev->name, irq_num);
11354		else
11355			snprintf(name, sizeof(tnapi->irq_lbl),
11356				 "%s-%d", tp->dev->name, irq_num);
11357	}
11358
11359	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11360		fn = tg3_msi;
11361		if (tg3_flag(tp, 1SHOT_MSI))
11362			fn = tg3_msi_1shot;
11363		flags = 0;
11364	} else {
11365		fn = tg3_interrupt;
11366		if (tg3_flag(tp, TAGGED_STATUS))
11367			fn = tg3_interrupt_tagged;
11368		flags = IRQF_SHARED;
11369	}
11370
11371	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11372}
11373
11374static int tg3_test_interrupt(struct tg3 *tp)
11375{
11376	struct tg3_napi *tnapi = &tp->napi[0];
11377	struct net_device *dev = tp->dev;
11378	int err, i, intr_ok = 0;
11379	u32 val;
11380
11381	if (!netif_running(dev))
11382		return -ENODEV;
11383
11384	tg3_disable_ints(tp);
11385
11386	free_irq(tnapi->irq_vec, tnapi);
11387
11388	/*
11389	 * Turn off MSI one shot mode.  Otherwise this test has no
11390	 * observable way to know whether the interrupt was delivered.
11391	 */
11392	if (tg3_flag(tp, 57765_PLUS)) {
11393		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11394		tw32(MSGINT_MODE, val);
11395	}
11396
11397	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11398			  IRQF_SHARED, dev->name, tnapi);
11399	if (err)
11400		return err;
11401
11402	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11403	tg3_enable_ints(tp);
11404
11405	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11406	       tnapi->coal_now);
11407
11408	for (i = 0; i < 5; i++) {
11409		u32 int_mbox, misc_host_ctrl;
11410
11411		int_mbox = tr32_mailbox(tnapi->int_mbox);
11412		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11413
11414		if ((int_mbox != 0) ||
11415		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11416			intr_ok = 1;
11417			break;
11418		}
11419
11420		if (tg3_flag(tp, 57765_PLUS) &&
11421		    tnapi->hw_status->status_tag != tnapi->last_tag)
11422			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11423
11424		msleep(10);
11425	}
11426
11427	tg3_disable_ints(tp);
11428
11429	free_irq(tnapi->irq_vec, tnapi);
11430
11431	err = tg3_request_irq(tp, 0);
11432
11433	if (err)
11434		return err;
11435
11436	if (intr_ok) {
11437		/* Reenable MSI one shot mode. */
11438		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11439			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11440			tw32(MSGINT_MODE, val);
11441		}
11442		return 0;
11443	}
11444
11445	return -EIO;
11446}
11447
11448/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11449 * successfully restored
11450 */
11451static int tg3_test_msi(struct tg3 *tp)
11452{
11453	int err;
11454	u16 pci_cmd;
11455
11456	if (!tg3_flag(tp, USING_MSI))
11457		return 0;
11458
11459	/* Turn off SERR reporting in case MSI terminates with Master
11460	 * Abort.
11461	 */
11462	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11463	pci_write_config_word(tp->pdev, PCI_COMMAND,
11464			      pci_cmd & ~PCI_COMMAND_SERR);
11465
11466	err = tg3_test_interrupt(tp);
11467
11468	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11469
11470	if (!err)
11471		return 0;
11472
11473	/* other failures */
11474	if (err != -EIO)
11475		return err;
11476
11477	/* MSI test failed, go back to INTx mode */
11478	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11479		    "to INTx mode. Please report this failure to the PCI "
11480		    "maintainer and include system chipset information\n");
11481
11482	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11483
11484	pci_disable_msi(tp->pdev);
11485
11486	tg3_flag_clear(tp, USING_MSI);
11487	tp->napi[0].irq_vec = tp->pdev->irq;
11488
11489	err = tg3_request_irq(tp, 0);
11490	if (err)
11491		return err;
11492
11493	/* Need to reset the chip because the MSI cycle may have terminated
11494	 * with Master Abort.
11495	 */
11496	tg3_full_lock(tp, 1);
11497
11498	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11499	err = tg3_init_hw(tp, true);
11500
11501	tg3_full_unlock(tp);
11502
11503	if (err)
11504		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11505
11506	return err;
11507}
11508
11509static int tg3_request_firmware(struct tg3 *tp)
11510{
11511	const struct tg3_firmware_hdr *fw_hdr;
11512
11513	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11514		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11515			   tp->fw_needed);
11516		return -ENOENT;
11517	}
11518
11519	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11520
11521	/* Firmware blob starts with version numbers, followed by
11522	 * start address and _full_ length including BSS sections
11523	 * (which must be longer than the actual data, of course
11524	 */
11525
11526	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11527	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11528		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11529			   tp->fw_len, tp->fw_needed);
11530		release_firmware(tp->fw);
11531		tp->fw = NULL;
11532		return -EINVAL;
11533	}
11534
11535	/* We no longer need firmware; we have it. */
11536	tp->fw_needed = NULL;
11537	return 0;
11538}
11539
11540static u32 tg3_irq_count(struct tg3 *tp)
11541{
11542	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11543
11544	if (irq_cnt > 1) {
11545		/* We want as many rx rings enabled as there are cpus.
11546		 * In multiqueue MSI-X mode, the first MSI-X vector
11547		 * only deals with link interrupts, etc, so we add
11548		 * one to the number of vectors we are requesting.
11549		 */
11550		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11551	}
11552
11553	return irq_cnt;
11554}
11555
11556static bool tg3_enable_msix(struct tg3 *tp)
11557{
11558	int i, rc;
11559	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11560
11561	tp->txq_cnt = tp->txq_req;
11562	tp->rxq_cnt = tp->rxq_req;
11563	if (!tp->rxq_cnt)
11564		tp->rxq_cnt = netif_get_num_default_rss_queues();
11565	if (tp->rxq_cnt > tp->rxq_max)
11566		tp->rxq_cnt = tp->rxq_max;
11567
11568	/* Disable multiple TX rings by default.  Simple round-robin hardware
11569	 * scheduling of the TX rings can cause starvation of rings with
11570	 * small packets when other rings have TSO or jumbo packets.
11571	 */
11572	if (!tp->txq_req)
11573		tp->txq_cnt = 1;
11574
11575	tp->irq_cnt = tg3_irq_count(tp);
11576
11577	for (i = 0; i < tp->irq_max; i++) {
11578		msix_ent[i].entry  = i;
11579		msix_ent[i].vector = 0;
11580	}
11581
11582	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11583	if (rc < 0) {
11584		return false;
11585	} else if (rc < tp->irq_cnt) {
11586		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11587			      tp->irq_cnt, rc);
11588		tp->irq_cnt = rc;
11589		tp->rxq_cnt = max(rc - 1, 1);
11590		if (tp->txq_cnt)
11591			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11592	}
11593
11594	for (i = 0; i < tp->irq_max; i++)
11595		tp->napi[i].irq_vec = msix_ent[i].vector;
11596
11597	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11598		pci_disable_msix(tp->pdev);
11599		return false;
11600	}
11601
11602	if (tp->irq_cnt == 1)
11603		return true;
11604
11605	tg3_flag_set(tp, ENABLE_RSS);
11606
11607	if (tp->txq_cnt > 1)
11608		tg3_flag_set(tp, ENABLE_TSS);
11609
11610	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11611
11612	return true;
11613}
11614
11615static void tg3_ints_init(struct tg3 *tp)
11616{
11617	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11618	    !tg3_flag(tp, TAGGED_STATUS)) {
11619		/* All MSI supporting chips should support tagged
11620		 * status.  Assert that this is the case.
11621		 */
11622		netdev_warn(tp->dev,
11623			    "MSI without TAGGED_STATUS? Not using MSI\n");
11624		goto defcfg;
11625	}
11626
11627	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11628		tg3_flag_set(tp, USING_MSIX);
11629	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11630		tg3_flag_set(tp, USING_MSI);
11631
11632	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11633		u32 msi_mode = tr32(MSGINT_MODE);
11634		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11635			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11636		if (!tg3_flag(tp, 1SHOT_MSI))
11637			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11638		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11639	}
11640defcfg:
11641	if (!tg3_flag(tp, USING_MSIX)) {
11642		tp->irq_cnt = 1;
11643		tp->napi[0].irq_vec = tp->pdev->irq;
11644	}
11645
11646	if (tp->irq_cnt == 1) {
11647		tp->txq_cnt = 1;
11648		tp->rxq_cnt = 1;
11649		netif_set_real_num_tx_queues(tp->dev, 1);
11650		netif_set_real_num_rx_queues(tp->dev, 1);
11651	}
11652}
11653
11654static void tg3_ints_fini(struct tg3 *tp)
11655{
11656	if (tg3_flag(tp, USING_MSIX))
11657		pci_disable_msix(tp->pdev);
11658	else if (tg3_flag(tp, USING_MSI))
11659		pci_disable_msi(tp->pdev);
11660	tg3_flag_clear(tp, USING_MSI);
11661	tg3_flag_clear(tp, USING_MSIX);
11662	tg3_flag_clear(tp, ENABLE_RSS);
11663	tg3_flag_clear(tp, ENABLE_TSS);
11664}
11665
11666static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11667		     bool init)
11668{
11669	struct net_device *dev = tp->dev;
11670	int i, err;
11671
11672	/*
11673	 * Setup interrupts first so we know how
11674	 * many NAPI resources to allocate
11675	 */
11676	tg3_ints_init(tp);
11677
11678	tg3_rss_check_indir_tbl(tp);
11679
11680	/* The placement of this call is tied
11681	 * to the setup and use of Host TX descriptors.
11682	 */
11683	err = tg3_alloc_consistent(tp);
11684	if (err)
11685		goto out_ints_fini;
11686
11687	tg3_napi_init(tp);
11688
11689	tg3_napi_enable(tp);
11690
11691	for (i = 0; i < tp->irq_cnt; i++) {
11692		err = tg3_request_irq(tp, i);
11693		if (err) {
11694			for (i--; i >= 0; i--) {
11695				struct tg3_napi *tnapi = &tp->napi[i];
11696
11697				free_irq(tnapi->irq_vec, tnapi);
11698			}
11699			goto out_napi_fini;
11700		}
11701	}
11702
11703	tg3_full_lock(tp, 0);
11704
11705	if (init)
11706		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11707
11708	err = tg3_init_hw(tp, reset_phy);
11709	if (err) {
11710		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11711		tg3_free_rings(tp);
11712	}
11713
11714	tg3_full_unlock(tp);
11715
11716	if (err)
11717		goto out_free_irq;
11718
11719	if (test_irq && tg3_flag(tp, USING_MSI)) {
11720		err = tg3_test_msi(tp);
11721
11722		if (err) {
11723			tg3_full_lock(tp, 0);
11724			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11725			tg3_free_rings(tp);
11726			tg3_full_unlock(tp);
11727
11728			goto out_napi_fini;
11729		}
11730
11731		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11732			u32 val = tr32(PCIE_TRANSACTION_CFG);
11733
11734			tw32(PCIE_TRANSACTION_CFG,
11735			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11736		}
11737	}
11738
11739	tg3_phy_start(tp);
11740
11741	tg3_hwmon_open(tp);
11742
11743	tg3_full_lock(tp, 0);
11744
11745	tg3_timer_start(tp);
11746	tg3_flag_set(tp, INIT_COMPLETE);
11747	tg3_enable_ints(tp);
11748
11749	tg3_ptp_resume(tp);
11750
11751	tg3_full_unlock(tp);
11752
11753	netif_tx_start_all_queues(dev);
11754
11755	/*
11756	 * Reset loopback feature if it was turned on while the device was down
11757	 * make sure that it's installed properly now.
11758	 */
11759	if (dev->features & NETIF_F_LOOPBACK)
11760		tg3_set_loopback(dev, dev->features);
11761
11762	return 0;
11763
11764out_free_irq:
11765	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11766		struct tg3_napi *tnapi = &tp->napi[i];
11767		free_irq(tnapi->irq_vec, tnapi);
11768	}
11769
11770out_napi_fini:
11771	tg3_napi_disable(tp);
11772	tg3_napi_fini(tp);
11773	tg3_free_consistent(tp);
11774
11775out_ints_fini:
11776	tg3_ints_fini(tp);
11777
11778	return err;
11779}
11780
11781static void tg3_stop(struct tg3 *tp)
11782{
11783	int i;
11784
11785	tg3_reset_task_cancel(tp);
11786	tg3_netif_stop(tp);
11787
11788	tg3_timer_stop(tp);
11789
11790	tg3_hwmon_close(tp);
11791
11792	tg3_phy_stop(tp);
11793
11794	tg3_full_lock(tp, 1);
11795
11796	tg3_disable_ints(tp);
11797
11798	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11799	tg3_free_rings(tp);
11800	tg3_flag_clear(tp, INIT_COMPLETE);
11801
11802	tg3_full_unlock(tp);
11803
11804	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11805		struct tg3_napi *tnapi = &tp->napi[i];
11806		free_irq(tnapi->irq_vec, tnapi);
11807	}
11808
11809	tg3_ints_fini(tp);
11810
11811	tg3_napi_fini(tp);
11812
11813	tg3_free_consistent(tp);
11814}
11815
11816static int tg3_open(struct net_device *dev)
11817{
11818	struct tg3 *tp = netdev_priv(dev);
11819	int err;
11820
11821	if (tp->pcierr_recovery) {
11822		netdev_err(dev, "Failed to open device. PCI error recovery "
11823			   "in progress\n");
11824		return -EAGAIN;
11825	}
11826
11827	if (tp->fw_needed) {
11828		err = tg3_request_firmware(tp);
11829		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11830			if (err) {
11831				netdev_warn(tp->dev, "EEE capability disabled\n");
11832				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11833			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11834				netdev_warn(tp->dev, "EEE capability restored\n");
11835				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11836			}
11837		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11838			if (err)
11839				return err;
11840		} else if (err) {
11841			netdev_warn(tp->dev, "TSO capability disabled\n");
11842			tg3_flag_clear(tp, TSO_CAPABLE);
11843		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11844			netdev_notice(tp->dev, "TSO capability restored\n");
11845			tg3_flag_set(tp, TSO_CAPABLE);
11846		}
11847	}
11848
11849	tg3_carrier_off(tp);
11850
11851	err = tg3_power_up(tp);
11852	if (err)
11853		return err;
11854
11855	tg3_full_lock(tp, 0);
11856
11857	tg3_disable_ints(tp);
11858	tg3_flag_clear(tp, INIT_COMPLETE);
11859
11860	tg3_full_unlock(tp);
11861
11862	err = tg3_start(tp,
11863			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11864			true, true);
11865	if (err) {
11866		tg3_frob_aux_power(tp, false);
11867		pci_set_power_state(tp->pdev, PCI_D3hot);
11868	}
11869
11870	return err;
11871}
11872
11873static int tg3_close(struct net_device *dev)
11874{
11875	struct tg3 *tp = netdev_priv(dev);
11876
11877	if (tp->pcierr_recovery) {
11878		netdev_err(dev, "Failed to close device. PCI error recovery "
11879			   "in progress\n");
11880		return -EAGAIN;
11881	}
11882
11883	tg3_stop(tp);
11884
11885	if (pci_device_is_present(tp->pdev)) {
11886		tg3_power_down_prepare(tp);
11887
11888		tg3_carrier_off(tp);
11889	}
11890	return 0;
11891}
11892
11893static inline u64 get_stat64(tg3_stat64_t *val)
11894{
11895       return ((u64)val->high << 32) | ((u64)val->low);
11896}
11897
11898static u64 tg3_calc_crc_errors(struct tg3 *tp)
11899{
11900	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11901
11902	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11903	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11904	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11905		u32 val;
11906
11907		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11908			tg3_writephy(tp, MII_TG3_TEST1,
11909				     val | MII_TG3_TEST1_CRC_EN);
11910			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11911		} else
11912			val = 0;
11913
11914		tp->phy_crc_errors += val;
11915
11916		return tp->phy_crc_errors;
11917	}
11918
11919	return get_stat64(&hw_stats->rx_fcs_errors);
11920}
11921
11922#define ESTAT_ADD(member) \
11923	estats->member =	old_estats->member + \
11924				get_stat64(&hw_stats->member)
11925
11926static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11927{
11928	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11929	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11930
11931	ESTAT_ADD(rx_octets);
11932	ESTAT_ADD(rx_fragments);
11933	ESTAT_ADD(rx_ucast_packets);
11934	ESTAT_ADD(rx_mcast_packets);
11935	ESTAT_ADD(rx_bcast_packets);
11936	ESTAT_ADD(rx_fcs_errors);
11937	ESTAT_ADD(rx_align_errors);
11938	ESTAT_ADD(rx_xon_pause_rcvd);
11939	ESTAT_ADD(rx_xoff_pause_rcvd);
11940	ESTAT_ADD(rx_mac_ctrl_rcvd);
11941	ESTAT_ADD(rx_xoff_entered);
11942	ESTAT_ADD(rx_frame_too_long_errors);
11943	ESTAT_ADD(rx_jabbers);
11944	ESTAT_ADD(rx_undersize_packets);
11945	ESTAT_ADD(rx_in_length_errors);
11946	ESTAT_ADD(rx_out_length_errors);
11947	ESTAT_ADD(rx_64_or_less_octet_packets);
11948	ESTAT_ADD(rx_65_to_127_octet_packets);
11949	ESTAT_ADD(rx_128_to_255_octet_packets);
11950	ESTAT_ADD(rx_256_to_511_octet_packets);
11951	ESTAT_ADD(rx_512_to_1023_octet_packets);
11952	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11953	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11954	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11955	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11956	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11957
11958	ESTAT_ADD(tx_octets);
11959	ESTAT_ADD(tx_collisions);
11960	ESTAT_ADD(tx_xon_sent);
11961	ESTAT_ADD(tx_xoff_sent);
11962	ESTAT_ADD(tx_flow_control);
11963	ESTAT_ADD(tx_mac_errors);
11964	ESTAT_ADD(tx_single_collisions);
11965	ESTAT_ADD(tx_mult_collisions);
11966	ESTAT_ADD(tx_deferred);
11967	ESTAT_ADD(tx_excessive_collisions);
11968	ESTAT_ADD(tx_late_collisions);
11969	ESTAT_ADD(tx_collide_2times);
11970	ESTAT_ADD(tx_collide_3times);
11971	ESTAT_ADD(tx_collide_4times);
11972	ESTAT_ADD(tx_collide_5times);
11973	ESTAT_ADD(tx_collide_6times);
11974	ESTAT_ADD(tx_collide_7times);
11975	ESTAT_ADD(tx_collide_8times);
11976	ESTAT_ADD(tx_collide_9times);
11977	ESTAT_ADD(tx_collide_10times);
11978	ESTAT_ADD(tx_collide_11times);
11979	ESTAT_ADD(tx_collide_12times);
11980	ESTAT_ADD(tx_collide_13times);
11981	ESTAT_ADD(tx_collide_14times);
11982	ESTAT_ADD(tx_collide_15times);
11983	ESTAT_ADD(tx_ucast_packets);
11984	ESTAT_ADD(tx_mcast_packets);
11985	ESTAT_ADD(tx_bcast_packets);
11986	ESTAT_ADD(tx_carrier_sense_errors);
11987	ESTAT_ADD(tx_discards);
11988	ESTAT_ADD(tx_errors);
11989
11990	ESTAT_ADD(dma_writeq_full);
11991	ESTAT_ADD(dma_write_prioq_full);
11992	ESTAT_ADD(rxbds_empty);
11993	ESTAT_ADD(rx_discards);
11994	ESTAT_ADD(rx_errors);
11995	ESTAT_ADD(rx_threshold_hit);
11996
11997	ESTAT_ADD(dma_readq_full);
11998	ESTAT_ADD(dma_read_prioq_full);
11999	ESTAT_ADD(tx_comp_queue_full);
12000
12001	ESTAT_ADD(ring_set_send_prod_index);
12002	ESTAT_ADD(ring_status_update);
12003	ESTAT_ADD(nic_irqs);
12004	ESTAT_ADD(nic_avoided_irqs);
12005	ESTAT_ADD(nic_tx_threshold_hit);
12006
12007	ESTAT_ADD(mbuf_lwm_thresh_hit);
12008}
12009
12010static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
12011{
12012	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12013	struct tg3_hw_stats *hw_stats = tp->hw_stats;
12014	unsigned long rx_dropped;
12015	unsigned long tx_dropped;
12016	int i;
12017
12018	stats->rx_packets = old_stats->rx_packets +
12019		get_stat64(&hw_stats->rx_ucast_packets) +
12020		get_stat64(&hw_stats->rx_mcast_packets) +
12021		get_stat64(&hw_stats->rx_bcast_packets);
12022
12023	stats->tx_packets = old_stats->tx_packets +
12024		get_stat64(&hw_stats->tx_ucast_packets) +
12025		get_stat64(&hw_stats->tx_mcast_packets) +
12026		get_stat64(&hw_stats->tx_bcast_packets);
12027
12028	stats->rx_bytes = old_stats->rx_bytes +
12029		get_stat64(&hw_stats->rx_octets);
12030	stats->tx_bytes = old_stats->tx_bytes +
12031		get_stat64(&hw_stats->tx_octets);
12032
12033	stats->rx_errors = old_stats->rx_errors +
12034		get_stat64(&hw_stats->rx_errors);
12035	stats->tx_errors = old_stats->tx_errors +
12036		get_stat64(&hw_stats->tx_errors) +
12037		get_stat64(&hw_stats->tx_mac_errors) +
12038		get_stat64(&hw_stats->tx_carrier_sense_errors) +
12039		get_stat64(&hw_stats->tx_discards);
12040
12041	stats->multicast = old_stats->multicast +
12042		get_stat64(&hw_stats->rx_mcast_packets);
12043	stats->collisions = old_stats->collisions +
12044		get_stat64(&hw_stats->tx_collisions);
12045
12046	stats->rx_length_errors = old_stats->rx_length_errors +
12047		get_stat64(&hw_stats->rx_frame_too_long_errors) +
12048		get_stat64(&hw_stats->rx_undersize_packets);
12049
12050	stats->rx_frame_errors = old_stats->rx_frame_errors +
12051		get_stat64(&hw_stats->rx_align_errors);
12052	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12053		get_stat64(&hw_stats->tx_discards);
12054	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12055		get_stat64(&hw_stats->tx_carrier_sense_errors);
12056
12057	stats->rx_crc_errors = old_stats->rx_crc_errors +
12058		tg3_calc_crc_errors(tp);
12059
12060	stats->rx_missed_errors = old_stats->rx_missed_errors +
12061		get_stat64(&hw_stats->rx_discards);
12062
12063	/* Aggregate per-queue counters. The per-queue counters are updated
12064	 * by a single writer, race-free. The result computed by this loop
12065	 * might not be 100% accurate (counters can be updated in the middle of
12066	 * the loop) but the next tg3_get_nstats() will recompute the current
12067	 * value so it is acceptable.
12068	 *
12069	 * Note that these counters wrap around at 4G on 32bit machines.
12070	 */
12071	rx_dropped = (unsigned long)(old_stats->rx_dropped);
12072	tx_dropped = (unsigned long)(old_stats->tx_dropped);
12073
12074	for (i = 0; i < tp->irq_cnt; i++) {
12075		struct tg3_napi *tnapi = &tp->napi[i];
12076
12077		rx_dropped += tnapi->rx_dropped;
12078		tx_dropped += tnapi->tx_dropped;
12079	}
12080
12081	stats->rx_dropped = rx_dropped;
12082	stats->tx_dropped = tx_dropped;
12083}
12084
12085static int tg3_get_regs_len(struct net_device *dev)
12086{
12087	return TG3_REG_BLK_SIZE;
12088}
12089
12090static void tg3_get_regs(struct net_device *dev,
12091		struct ethtool_regs *regs, void *_p)
12092{
12093	struct tg3 *tp = netdev_priv(dev);
12094
12095	regs->version = 0;
12096
12097	memset(_p, 0, TG3_REG_BLK_SIZE);
12098
12099	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12100		return;
12101
12102	tg3_full_lock(tp, 0);
12103
12104	tg3_dump_legacy_regs(tp, (u32 *)_p);
12105
12106	tg3_full_unlock(tp);
12107}
12108
12109static int tg3_get_eeprom_len(struct net_device *dev)
12110{
12111	struct tg3 *tp = netdev_priv(dev);
12112
12113	return tp->nvram_size;
12114}
12115
12116static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12117{
12118	struct tg3 *tp = netdev_priv(dev);
12119	int ret, cpmu_restore = 0;
12120	u8  *pd;
12121	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12122	__be32 val;
12123
12124	if (tg3_flag(tp, NO_NVRAM))
12125		return -EINVAL;
12126
12127	offset = eeprom->offset;
12128	len = eeprom->len;
12129	eeprom->len = 0;
12130
12131	eeprom->magic = TG3_EEPROM_MAGIC;
12132
12133	/* Override clock, link aware and link idle modes */
12134	if (tg3_flag(tp, CPMU_PRESENT)) {
12135		cpmu_val = tr32(TG3_CPMU_CTRL);
12136		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12137				CPMU_CTRL_LINK_IDLE_MODE)) {
12138			tw32(TG3_CPMU_CTRL, cpmu_val &
12139					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12140					     CPMU_CTRL_LINK_IDLE_MODE));
12141			cpmu_restore = 1;
12142		}
12143	}
12144	tg3_override_clk(tp);
12145
12146	if (offset & 3) {
12147		/* adjustments to start on required 4 byte boundary */
12148		b_offset = offset & 3;
12149		b_count = 4 - b_offset;
12150		if (b_count > len) {
12151			/* i.e. offset=1 len=2 */
12152			b_count = len;
12153		}
12154		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12155		if (ret)
12156			goto eeprom_done;
12157		memcpy(data, ((char *)&val) + b_offset, b_count);
12158		len -= b_count;
12159		offset += b_count;
12160		eeprom->len += b_count;
12161	}
12162
12163	/* read bytes up to the last 4 byte boundary */
12164	pd = &data[eeprom->len];
12165	for (i = 0; i < (len - (len & 3)); i += 4) {
12166		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12167		if (ret) {
12168			if (i)
12169				i -= 4;
12170			eeprom->len += i;
12171			goto eeprom_done;
12172		}
12173		memcpy(pd + i, &val, 4);
12174		if (need_resched()) {
12175			if (signal_pending(current)) {
12176				eeprom->len += i;
12177				ret = -EINTR;
12178				goto eeprom_done;
12179			}
12180			cond_resched();
12181		}
12182	}
12183	eeprom->len += i;
12184
12185	if (len & 3) {
12186		/* read last bytes not ending on 4 byte boundary */
12187		pd = &data[eeprom->len];
12188		b_count = len & 3;
12189		b_offset = offset + len - b_count;
12190		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12191		if (ret)
12192			goto eeprom_done;
12193		memcpy(pd, &val, b_count);
12194		eeprom->len += b_count;
12195	}
12196	ret = 0;
12197
12198eeprom_done:
12199	/* Restore clock, link aware and link idle modes */
12200	tg3_restore_clk(tp);
12201	if (cpmu_restore)
12202		tw32(TG3_CPMU_CTRL, cpmu_val);
12203
12204	return ret;
12205}
12206
12207static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12208{
12209	struct tg3 *tp = netdev_priv(dev);
12210	int ret;
12211	u32 offset, len, b_offset, odd_len;
12212	u8 *buf;
12213	__be32 start = 0, end;
12214
12215	if (tg3_flag(tp, NO_NVRAM) ||
12216	    eeprom->magic != TG3_EEPROM_MAGIC)
12217		return -EINVAL;
12218
12219	offset = eeprom->offset;
12220	len = eeprom->len;
12221
12222	if ((b_offset = (offset & 3))) {
12223		/* adjustments to start on required 4 byte boundary */
12224		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12225		if (ret)
12226			return ret;
12227		len += b_offset;
12228		offset &= ~3;
12229		if (len < 4)
12230			len = 4;
12231	}
12232
12233	odd_len = 0;
12234	if (len & 3) {
12235		/* adjustments to end on required 4 byte boundary */
12236		odd_len = 1;
12237		len = (len + 3) & ~3;
12238		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12239		if (ret)
12240			return ret;
12241	}
12242
12243	buf = data;
12244	if (b_offset || odd_len) {
12245		buf = kmalloc(len, GFP_KERNEL);
12246		if (!buf)
12247			return -ENOMEM;
12248		if (b_offset)
12249			memcpy(buf, &start, 4);
12250		if (odd_len)
12251			memcpy(buf+len-4, &end, 4);
12252		memcpy(buf + b_offset, data, eeprom->len);
12253	}
12254
12255	ret = tg3_nvram_write_block(tp, offset, len, buf);
12256
12257	if (buf != data)
12258		kfree(buf);
12259
12260	return ret;
12261}
12262
12263static int tg3_get_link_ksettings(struct net_device *dev,
12264				  struct ethtool_link_ksettings *cmd)
12265{
12266	struct tg3 *tp = netdev_priv(dev);
12267	u32 supported, advertising;
12268
12269	if (tg3_flag(tp, USE_PHYLIB)) {
12270		struct phy_device *phydev;
12271		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12272			return -EAGAIN;
12273		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12274		phy_ethtool_ksettings_get(phydev, cmd);
12275
12276		return 0;
12277	}
12278
12279	supported = (SUPPORTED_Autoneg);
12280
12281	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12282		supported |= (SUPPORTED_1000baseT_Half |
12283			      SUPPORTED_1000baseT_Full);
12284
12285	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12286		supported |= (SUPPORTED_100baseT_Half |
12287			      SUPPORTED_100baseT_Full |
12288			      SUPPORTED_10baseT_Half |
12289			      SUPPORTED_10baseT_Full |
12290			      SUPPORTED_TP);
12291		cmd->base.port = PORT_TP;
12292	} else {
12293		supported |= SUPPORTED_FIBRE;
12294		cmd->base.port = PORT_FIBRE;
12295	}
12296	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12297						supported);
12298
12299	advertising = tp->link_config.advertising;
12300	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12301		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12302			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12303				advertising |= ADVERTISED_Pause;
12304			} else {
12305				advertising |= ADVERTISED_Pause |
12306					ADVERTISED_Asym_Pause;
12307			}
12308		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12309			advertising |= ADVERTISED_Asym_Pause;
12310		}
12311	}
12312	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12313						advertising);
12314
12315	if (netif_running(dev) && tp->link_up) {
12316		cmd->base.speed = tp->link_config.active_speed;
12317		cmd->base.duplex = tp->link_config.active_duplex;
12318		ethtool_convert_legacy_u32_to_link_mode(
12319			cmd->link_modes.lp_advertising,
12320			tp->link_config.rmt_adv);
12321
12322		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12323			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12324				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12325			else
12326				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12327		}
12328	} else {
12329		cmd->base.speed = SPEED_UNKNOWN;
12330		cmd->base.duplex = DUPLEX_UNKNOWN;
12331		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12332	}
12333	cmd->base.phy_address = tp->phy_addr;
12334	cmd->base.autoneg = tp->link_config.autoneg;
12335	return 0;
12336}
12337
12338static int tg3_set_link_ksettings(struct net_device *dev,
12339				  const struct ethtool_link_ksettings *cmd)
12340{
12341	struct tg3 *tp = netdev_priv(dev);
12342	u32 speed = cmd->base.speed;
12343	u32 advertising;
12344
12345	if (tg3_flag(tp, USE_PHYLIB)) {
12346		struct phy_device *phydev;
12347		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12348			return -EAGAIN;
12349		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12350		return phy_ethtool_ksettings_set(phydev, cmd);
12351	}
12352
12353	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12354	    cmd->base.autoneg != AUTONEG_DISABLE)
12355		return -EINVAL;
12356
12357	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12358	    cmd->base.duplex != DUPLEX_FULL &&
12359	    cmd->base.duplex != DUPLEX_HALF)
12360		return -EINVAL;
12361
12362	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12363						cmd->link_modes.advertising);
12364
12365	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12366		u32 mask = ADVERTISED_Autoneg |
12367			   ADVERTISED_Pause |
12368			   ADVERTISED_Asym_Pause;
12369
12370		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12371			mask |= ADVERTISED_1000baseT_Half |
12372				ADVERTISED_1000baseT_Full;
12373
12374		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12375			mask |= ADVERTISED_100baseT_Half |
12376				ADVERTISED_100baseT_Full |
12377				ADVERTISED_10baseT_Half |
12378				ADVERTISED_10baseT_Full |
12379				ADVERTISED_TP;
12380		else
12381			mask |= ADVERTISED_FIBRE;
12382
12383		if (advertising & ~mask)
12384			return -EINVAL;
12385
12386		mask &= (ADVERTISED_1000baseT_Half |
12387			 ADVERTISED_1000baseT_Full |
12388			 ADVERTISED_100baseT_Half |
12389			 ADVERTISED_100baseT_Full |
12390			 ADVERTISED_10baseT_Half |
12391			 ADVERTISED_10baseT_Full);
12392
12393		advertising &= mask;
12394	} else {
12395		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12396			if (speed != SPEED_1000)
12397				return -EINVAL;
12398
12399			if (cmd->base.duplex != DUPLEX_FULL)
12400				return -EINVAL;
12401		} else {
12402			if (speed != SPEED_100 &&
12403			    speed != SPEED_10)
12404				return -EINVAL;
12405		}
12406	}
12407
12408	tg3_full_lock(tp, 0);
12409
12410	tp->link_config.autoneg = cmd->base.autoneg;
12411	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12412		tp->link_config.advertising = (advertising |
12413					      ADVERTISED_Autoneg);
12414		tp->link_config.speed = SPEED_UNKNOWN;
12415		tp->link_config.duplex = DUPLEX_UNKNOWN;
12416	} else {
12417		tp->link_config.advertising = 0;
12418		tp->link_config.speed = speed;
12419		tp->link_config.duplex = cmd->base.duplex;
12420	}
12421
12422	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12423
12424	tg3_warn_mgmt_link_flap(tp);
12425
12426	if (netif_running(dev))
12427		tg3_setup_phy(tp, true);
12428
12429	tg3_full_unlock(tp);
12430
12431	return 0;
12432}
12433
12434static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12435{
12436	struct tg3 *tp = netdev_priv(dev);
12437
12438	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12439	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12440	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12441}
12442
12443static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12444{
12445	struct tg3 *tp = netdev_priv(dev);
12446
12447	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12448		wol->supported = WAKE_MAGIC;
12449	else
12450		wol->supported = 0;
12451	wol->wolopts = 0;
12452	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12453		wol->wolopts = WAKE_MAGIC;
12454	memset(&wol->sopass, 0, sizeof(wol->sopass));
12455}
12456
12457static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12458{
12459	struct tg3 *tp = netdev_priv(dev);
12460	struct device *dp = &tp->pdev->dev;
12461
12462	if (wol->wolopts & ~WAKE_MAGIC)
12463		return -EINVAL;
12464	if ((wol->wolopts & WAKE_MAGIC) &&
12465	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12466		return -EINVAL;
12467
12468	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12469
12470	if (device_may_wakeup(dp))
12471		tg3_flag_set(tp, WOL_ENABLE);
12472	else
12473		tg3_flag_clear(tp, WOL_ENABLE);
12474
12475	return 0;
12476}
12477
12478static u32 tg3_get_msglevel(struct net_device *dev)
12479{
12480	struct tg3 *tp = netdev_priv(dev);
12481	return tp->msg_enable;
12482}
12483
12484static void tg3_set_msglevel(struct net_device *dev, u32 value)
12485{
12486	struct tg3 *tp = netdev_priv(dev);
12487	tp->msg_enable = value;
12488}
12489
12490static int tg3_nway_reset(struct net_device *dev)
12491{
12492	struct tg3 *tp = netdev_priv(dev);
12493	int r;
12494
12495	if (!netif_running(dev))
12496		return -EAGAIN;
12497
12498	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12499		return -EINVAL;
12500
12501	tg3_warn_mgmt_link_flap(tp);
12502
12503	if (tg3_flag(tp, USE_PHYLIB)) {
12504		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12505			return -EAGAIN;
12506		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12507	} else {
12508		u32 bmcr;
12509
12510		spin_lock_bh(&tp->lock);
12511		r = -EINVAL;
12512		tg3_readphy(tp, MII_BMCR, &bmcr);
12513		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12514		    ((bmcr & BMCR_ANENABLE) ||
12515		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12516			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12517						   BMCR_ANENABLE);
12518			r = 0;
12519		}
12520		spin_unlock_bh(&tp->lock);
12521	}
12522
12523	return r;
12524}
12525
12526static void tg3_get_ringparam(struct net_device *dev,
12527			      struct ethtool_ringparam *ering,
12528			      struct kernel_ethtool_ringparam *kernel_ering,
12529			      struct netlink_ext_ack *extack)
12530{
12531	struct tg3 *tp = netdev_priv(dev);
12532
12533	ering->rx_max_pending = tp->rx_std_ring_mask;
12534	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12535		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12536	else
12537		ering->rx_jumbo_max_pending = 0;
12538
12539	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12540
12541	ering->rx_pending = tp->rx_pending;
12542	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12543		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12544	else
12545		ering->rx_jumbo_pending = 0;
12546
12547	ering->tx_pending = tp->napi[0].tx_pending;
12548}
12549
12550static int tg3_set_ringparam(struct net_device *dev,
12551			     struct ethtool_ringparam *ering,
12552			     struct kernel_ethtool_ringparam *kernel_ering,
12553			     struct netlink_ext_ack *extack)
12554{
12555	struct tg3 *tp = netdev_priv(dev);
12556	int i, irq_sync = 0, err = 0;
12557	bool reset_phy = false;
12558
12559	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12560	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12561	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12562	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12563	    (tg3_flag(tp, TSO_BUG) &&
12564	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12565		return -EINVAL;
12566
12567	if (netif_running(dev)) {
12568		tg3_phy_stop(tp);
12569		tg3_netif_stop(tp);
12570		irq_sync = 1;
12571	}
12572
12573	tg3_full_lock(tp, irq_sync);
12574
12575	tp->rx_pending = ering->rx_pending;
12576
12577	if (tg3_flag(tp, MAX_RXPEND_64) &&
12578	    tp->rx_pending > 63)
12579		tp->rx_pending = 63;
12580
12581	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12582		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12583
12584	for (i = 0; i < tp->irq_max; i++)
12585		tp->napi[i].tx_pending = ering->tx_pending;
12586
12587	if (netif_running(dev)) {
12588		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12589		/* Reset PHY to avoid PHY lock up */
12590		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12591		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12592		    tg3_asic_rev(tp) == ASIC_REV_5720)
12593			reset_phy = true;
12594
12595		err = tg3_restart_hw(tp, reset_phy);
12596		if (!err)
12597			tg3_netif_start(tp);
12598	}
12599
12600	tg3_full_unlock(tp);
12601
12602	if (irq_sync && !err)
12603		tg3_phy_start(tp);
12604
12605	return err;
12606}
12607
12608static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12609{
12610	struct tg3 *tp = netdev_priv(dev);
12611
12612	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12613
12614	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12615		epause->rx_pause = 1;
12616	else
12617		epause->rx_pause = 0;
12618
12619	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12620		epause->tx_pause = 1;
12621	else
12622		epause->tx_pause = 0;
12623}
12624
12625static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12626{
12627	struct tg3 *tp = netdev_priv(dev);
12628	int err = 0;
12629	bool reset_phy = false;
12630
12631	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12632		tg3_warn_mgmt_link_flap(tp);
12633
12634	if (tg3_flag(tp, USE_PHYLIB)) {
12635		struct phy_device *phydev;
12636
12637		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12638
12639		if (!phy_validate_pause(phydev, epause))
12640			return -EINVAL;
12641
12642		tp->link_config.flowctrl = 0;
12643		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12644		if (epause->rx_pause) {
12645			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12646
12647			if (epause->tx_pause) {
12648				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12649			}
12650		} else if (epause->tx_pause) {
12651			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12652		}
12653
12654		if (epause->autoneg)
12655			tg3_flag_set(tp, PAUSE_AUTONEG);
12656		else
12657			tg3_flag_clear(tp, PAUSE_AUTONEG);
12658
12659		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12660			if (phydev->autoneg) {
12661				/* phy_set_asym_pause() will
12662				 * renegotiate the link to inform our
12663				 * link partner of our flow control
12664				 * settings, even if the flow control
12665				 * is forced.  Let tg3_adjust_link()
12666				 * do the final flow control setup.
12667				 */
12668				return 0;
12669			}
12670
12671			if (!epause->autoneg)
12672				tg3_setup_flow_control(tp, 0, 0);
12673		}
12674	} else {
12675		int irq_sync = 0;
12676
12677		if (netif_running(dev)) {
12678			tg3_netif_stop(tp);
12679			irq_sync = 1;
12680		}
12681
12682		tg3_full_lock(tp, irq_sync);
12683
12684		if (epause->autoneg)
12685			tg3_flag_set(tp, PAUSE_AUTONEG);
12686		else
12687			tg3_flag_clear(tp, PAUSE_AUTONEG);
12688		if (epause->rx_pause)
12689			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12690		else
12691			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12692		if (epause->tx_pause)
12693			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12694		else
12695			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12696
12697		if (netif_running(dev)) {
12698			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12699			/* Reset PHY to avoid PHY lock up */
12700			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12701			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12702			    tg3_asic_rev(tp) == ASIC_REV_5720)
12703				reset_phy = true;
12704
12705			err = tg3_restart_hw(tp, reset_phy);
12706			if (!err)
12707				tg3_netif_start(tp);
12708		}
12709
12710		tg3_full_unlock(tp);
12711	}
12712
12713	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12714
12715	return err;
12716}
12717
12718static int tg3_get_sset_count(struct net_device *dev, int sset)
12719{
12720	switch (sset) {
12721	case ETH_SS_TEST:
12722		return TG3_NUM_TEST;
12723	case ETH_SS_STATS:
12724		return TG3_NUM_STATS;
12725	default:
12726		return -EOPNOTSUPP;
12727	}
12728}
12729
12730static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12731			 u32 *rules __always_unused)
12732{
12733	struct tg3 *tp = netdev_priv(dev);
12734
12735	if (!tg3_flag(tp, SUPPORT_MSIX))
12736		return -EOPNOTSUPP;
12737
12738	switch (info->cmd) {
12739	case ETHTOOL_GRXRINGS:
12740		if (netif_running(tp->dev))
12741			info->data = tp->rxq_cnt;
12742		else {
12743			info->data = num_online_cpus();
12744			if (info->data > TG3_RSS_MAX_NUM_QS)
12745				info->data = TG3_RSS_MAX_NUM_QS;
12746		}
12747
12748		return 0;
12749
12750	default:
12751		return -EOPNOTSUPP;
12752	}
12753}
12754
12755static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12756{
12757	u32 size = 0;
12758	struct tg3 *tp = netdev_priv(dev);
12759
12760	if (tg3_flag(tp, SUPPORT_MSIX))
12761		size = TG3_RSS_INDIR_TBL_SIZE;
12762
12763	return size;
12764}
12765
12766static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12767{
12768	struct tg3 *tp = netdev_priv(dev);
12769	int i;
12770
12771	rxfh->hfunc = ETH_RSS_HASH_TOP;
12772	if (!rxfh->indir)
12773		return 0;
12774
12775	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12776		rxfh->indir[i] = tp->rss_ind_tbl[i];
12777
12778	return 0;
12779}
12780
12781static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12782			struct netlink_ext_ack *extack)
12783{
12784	struct tg3 *tp = netdev_priv(dev);
12785	size_t i;
12786
12787	/* We require at least one supported parameter to be changed and no
12788	 * change in any of the unsupported parameters
12789	 */
12790	if (rxfh->key ||
12791	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12792	     rxfh->hfunc != ETH_RSS_HASH_TOP))
12793		return -EOPNOTSUPP;
12794
12795	if (!rxfh->indir)
12796		return 0;
12797
12798	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12799		tp->rss_ind_tbl[i] = rxfh->indir[i];
12800
12801	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12802		return 0;
12803
12804	/* It is legal to write the indirection
12805	 * table while the device is running.
12806	 */
12807	tg3_full_lock(tp, 0);
12808	tg3_rss_write_indir_tbl(tp);
12809	tg3_full_unlock(tp);
12810
12811	return 0;
12812}
12813
12814static void tg3_get_channels(struct net_device *dev,
12815			     struct ethtool_channels *channel)
12816{
12817	struct tg3 *tp = netdev_priv(dev);
12818	u32 deflt_qs = netif_get_num_default_rss_queues();
12819
12820	channel->max_rx = tp->rxq_max;
12821	channel->max_tx = tp->txq_max;
12822
12823	if (netif_running(dev)) {
12824		channel->rx_count = tp->rxq_cnt;
12825		channel->tx_count = tp->txq_cnt;
12826	} else {
12827		if (tp->rxq_req)
12828			channel->rx_count = tp->rxq_req;
12829		else
12830			channel->rx_count = min(deflt_qs, tp->rxq_max);
12831
12832		if (tp->txq_req)
12833			channel->tx_count = tp->txq_req;
12834		else
12835			channel->tx_count = min(deflt_qs, tp->txq_max);
12836	}
12837}
12838
12839static int tg3_set_channels(struct net_device *dev,
12840			    struct ethtool_channels *channel)
12841{
12842	struct tg3 *tp = netdev_priv(dev);
12843
12844	if (!tg3_flag(tp, SUPPORT_MSIX))
12845		return -EOPNOTSUPP;
12846
12847	if (channel->rx_count > tp->rxq_max ||
12848	    channel->tx_count > tp->txq_max)
12849		return -EINVAL;
12850
12851	tp->rxq_req = channel->rx_count;
12852	tp->txq_req = channel->tx_count;
12853
12854	if (!netif_running(dev))
12855		return 0;
12856
12857	tg3_stop(tp);
12858
12859	tg3_carrier_off(tp);
12860
12861	tg3_start(tp, true, false, false);
12862
12863	return 0;
12864}
12865
12866static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12867{
12868	switch (stringset) {
12869	case ETH_SS_STATS:
12870		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12871		break;
12872	case ETH_SS_TEST:
12873		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12874		break;
12875	default:
12876		WARN_ON(1);	/* we need a WARN() */
12877		break;
12878	}
12879}
12880
12881static int tg3_set_phys_id(struct net_device *dev,
12882			    enum ethtool_phys_id_state state)
12883{
12884	struct tg3 *tp = netdev_priv(dev);
12885
12886	switch (state) {
12887	case ETHTOOL_ID_ACTIVE:
12888		return 1;	/* cycle on/off once per second */
12889
12890	case ETHTOOL_ID_ON:
12891		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12892		     LED_CTRL_1000MBPS_ON |
12893		     LED_CTRL_100MBPS_ON |
12894		     LED_CTRL_10MBPS_ON |
12895		     LED_CTRL_TRAFFIC_OVERRIDE |
12896		     LED_CTRL_TRAFFIC_BLINK |
12897		     LED_CTRL_TRAFFIC_LED);
12898		break;
12899
12900	case ETHTOOL_ID_OFF:
12901		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12902		     LED_CTRL_TRAFFIC_OVERRIDE);
12903		break;
12904
12905	case ETHTOOL_ID_INACTIVE:
12906		tw32(MAC_LED_CTRL, tp->led_ctrl);
12907		break;
12908	}
12909
12910	return 0;
12911}
12912
12913static void tg3_get_ethtool_stats(struct net_device *dev,
12914				   struct ethtool_stats *estats, u64 *tmp_stats)
12915{
12916	struct tg3 *tp = netdev_priv(dev);
12917
12918	if (tp->hw_stats)
12919		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12920	else
12921		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12922}
12923
12924static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12925{
12926	int i;
12927	__be32 *buf;
12928	u32 offset = 0, len = 0;
12929	u32 magic, val;
12930
12931	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12932		return NULL;
12933
12934	if (magic == TG3_EEPROM_MAGIC) {
12935		for (offset = TG3_NVM_DIR_START;
12936		     offset < TG3_NVM_DIR_END;
12937		     offset += TG3_NVM_DIRENT_SIZE) {
12938			if (tg3_nvram_read(tp, offset, &val))
12939				return NULL;
12940
12941			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12942			    TG3_NVM_DIRTYPE_EXTVPD)
12943				break;
12944		}
12945
12946		if (offset != TG3_NVM_DIR_END) {
12947			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12948			if (tg3_nvram_read(tp, offset + 4, &offset))
12949				return NULL;
12950
12951			offset = tg3_nvram_logical_addr(tp, offset);
12952		}
12953
12954		if (!offset || !len) {
12955			offset = TG3_NVM_VPD_OFF;
12956			len = TG3_NVM_VPD_LEN;
12957		}
12958
12959		buf = kmalloc(len, GFP_KERNEL);
12960		if (!buf)
12961			return NULL;
12962
12963		for (i = 0; i < len; i += 4) {
12964			/* The data is in little-endian format in NVRAM.
12965			 * Use the big-endian read routines to preserve
12966			 * the byte order as it exists in NVRAM.
12967			 */
12968			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12969				goto error;
12970		}
12971		*vpdlen = len;
12972	} else {
12973		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12974		if (IS_ERR(buf))
12975			return NULL;
12976	}
12977
12978	return buf;
12979
12980error:
12981	kfree(buf);
12982	return NULL;
12983}
12984
12985#define NVRAM_TEST_SIZE 0x100
12986#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12987#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12988#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12989#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12990#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12991#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12992#define NVRAM_SELFBOOT_HW_SIZE 0x20
12993#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12994
12995static int tg3_test_nvram(struct tg3 *tp)
12996{
12997	u32 csum, magic;
12998	__be32 *buf;
12999	int i, j, k, err = 0, size;
13000	unsigned int len;
13001
13002	if (tg3_flag(tp, NO_NVRAM))
13003		return 0;
13004
13005	if (tg3_nvram_read(tp, 0, &magic) != 0)
13006		return -EIO;
13007
13008	if (magic == TG3_EEPROM_MAGIC)
13009		size = NVRAM_TEST_SIZE;
13010	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
13011		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
13012		    TG3_EEPROM_SB_FORMAT_1) {
13013			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
13014			case TG3_EEPROM_SB_REVISION_0:
13015				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
13016				break;
13017			case TG3_EEPROM_SB_REVISION_2:
13018				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13019				break;
13020			case TG3_EEPROM_SB_REVISION_3:
13021				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13022				break;
13023			case TG3_EEPROM_SB_REVISION_4:
13024				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13025				break;
13026			case TG3_EEPROM_SB_REVISION_5:
13027				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13028				break;
13029			case TG3_EEPROM_SB_REVISION_6:
13030				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13031				break;
13032			default:
13033				return -EIO;
13034			}
13035		} else
13036			return 0;
13037	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13038		size = NVRAM_SELFBOOT_HW_SIZE;
13039	else
13040		return -EIO;
13041
13042	buf = kmalloc(size, GFP_KERNEL);
13043	if (buf == NULL)
13044		return -ENOMEM;
13045
13046	err = -EIO;
13047	for (i = 0, j = 0; i < size; i += 4, j++) {
13048		err = tg3_nvram_read_be32(tp, i, &buf[j]);
13049		if (err)
13050			break;
13051	}
13052	if (i < size)
13053		goto out;
13054
13055	/* Selfboot format */
13056	magic = be32_to_cpu(buf[0]);
13057	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13058	    TG3_EEPROM_MAGIC_FW) {
13059		u8 *buf8 = (u8 *) buf, csum8 = 0;
13060
13061		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13062		    TG3_EEPROM_SB_REVISION_2) {
13063			/* For rev 2, the csum doesn't include the MBA. */
13064			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13065				csum8 += buf8[i];
13066			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13067				csum8 += buf8[i];
13068		} else {
13069			for (i = 0; i < size; i++)
13070				csum8 += buf8[i];
13071		}
13072
13073		if (csum8 == 0) {
13074			err = 0;
13075			goto out;
13076		}
13077
13078		err = -EIO;
13079		goto out;
13080	}
13081
13082	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13083	    TG3_EEPROM_MAGIC_HW) {
13084		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13085		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13086		u8 *buf8 = (u8 *) buf;
13087
13088		/* Separate the parity bits and the data bytes.  */
13089		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13090			if ((i == 0) || (i == 8)) {
13091				int l;
13092				u8 msk;
13093
13094				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13095					parity[k++] = buf8[i] & msk;
13096				i++;
13097			} else if (i == 16) {
13098				int l;
13099				u8 msk;
13100
13101				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13102					parity[k++] = buf8[i] & msk;
13103				i++;
13104
13105				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13106					parity[k++] = buf8[i] & msk;
13107				i++;
13108			}
13109			data[j++] = buf8[i];
13110		}
13111
13112		err = -EIO;
13113		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13114			u8 hw8 = hweight8(data[i]);
13115
13116			if ((hw8 & 0x1) && parity[i])
13117				goto out;
13118			else if (!(hw8 & 0x1) && !parity[i])
13119				goto out;
13120		}
13121		err = 0;
13122		goto out;
13123	}
13124
13125	err = -EIO;
13126
13127	/* Bootstrap checksum at offset 0x10 */
13128	csum = calc_crc((unsigned char *) buf, 0x10);
13129
13130	/* The type of buf is __be32 *, but this value is __le32 */
13131	if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13132		goto out;
13133
13134	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13135	csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13136
13137	/* The type of buf is __be32 *, but this value is __le32 */
13138	if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13139		goto out;
13140
13141	kfree(buf);
13142
13143	buf = tg3_vpd_readblock(tp, &len);
13144	if (!buf)
13145		return -ENOMEM;
13146
13147	err = pci_vpd_check_csum(buf, len);
13148	/* go on if no checksum found */
13149	if (err == 1)
13150		err = 0;
13151out:
13152	kfree(buf);
13153	return err;
13154}
13155
13156#define TG3_SERDES_TIMEOUT_SEC	2
13157#define TG3_COPPER_TIMEOUT_SEC	6
13158
13159static int tg3_test_link(struct tg3 *tp)
13160{
13161	int i, max;
13162
13163	if (!netif_running(tp->dev))
13164		return -ENODEV;
13165
13166	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13167		max = TG3_SERDES_TIMEOUT_SEC;
13168	else
13169		max = TG3_COPPER_TIMEOUT_SEC;
13170
13171	for (i = 0; i < max; i++) {
13172		if (tp->link_up)
13173			return 0;
13174
13175		if (msleep_interruptible(1000))
13176			break;
13177	}
13178
13179	return -EIO;
13180}
13181
13182/* Only test the commonly used registers */
13183static int tg3_test_registers(struct tg3 *tp)
13184{
13185	int i, is_5705, is_5750;
13186	u32 offset, read_mask, write_mask, val, save_val, read_val;
13187	static struct {
13188		u16 offset;
13189		u16 flags;
13190#define TG3_FL_5705	0x1
13191#define TG3_FL_NOT_5705	0x2
13192#define TG3_FL_NOT_5788	0x4
13193#define TG3_FL_NOT_5750	0x8
13194		u32 read_mask;
13195		u32 write_mask;
13196	} reg_tbl[] = {
13197		/* MAC Control Registers */
13198		{ MAC_MODE, TG3_FL_NOT_5705,
13199			0x00000000, 0x00ef6f8c },
13200		{ MAC_MODE, TG3_FL_5705,
13201			0x00000000, 0x01ef6b8c },
13202		{ MAC_STATUS, TG3_FL_NOT_5705,
13203			0x03800107, 0x00000000 },
13204		{ MAC_STATUS, TG3_FL_5705,
13205			0x03800100, 0x00000000 },
13206		{ MAC_ADDR_0_HIGH, 0x0000,
13207			0x00000000, 0x0000ffff },
13208		{ MAC_ADDR_0_LOW, 0x0000,
13209			0x00000000, 0xffffffff },
13210		{ MAC_RX_MTU_SIZE, 0x0000,
13211			0x00000000, 0x0000ffff },
13212		{ MAC_TX_MODE, 0x0000,
13213			0x00000000, 0x00000070 },
13214		{ MAC_TX_LENGTHS, 0x0000,
13215			0x00000000, 0x00003fff },
13216		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13217			0x00000000, 0x000007fc },
13218		{ MAC_RX_MODE, TG3_FL_5705,
13219			0x00000000, 0x000007dc },
13220		{ MAC_HASH_REG_0, 0x0000,
13221			0x00000000, 0xffffffff },
13222		{ MAC_HASH_REG_1, 0x0000,
13223			0x00000000, 0xffffffff },
13224		{ MAC_HASH_REG_2, 0x0000,
13225			0x00000000, 0xffffffff },
13226		{ MAC_HASH_REG_3, 0x0000,
13227			0x00000000, 0xffffffff },
13228
13229		/* Receive Data and Receive BD Initiator Control Registers. */
13230		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13231			0x00000000, 0xffffffff },
13232		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13233			0x00000000, 0xffffffff },
13234		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13235			0x00000000, 0x00000003 },
13236		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13237			0x00000000, 0xffffffff },
13238		{ RCVDBDI_STD_BD+0, 0x0000,
13239			0x00000000, 0xffffffff },
13240		{ RCVDBDI_STD_BD+4, 0x0000,
13241			0x00000000, 0xffffffff },
13242		{ RCVDBDI_STD_BD+8, 0x0000,
13243			0x00000000, 0xffff0002 },
13244		{ RCVDBDI_STD_BD+0xc, 0x0000,
13245			0x00000000, 0xffffffff },
13246
13247		/* Receive BD Initiator Control Registers. */
13248		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13249			0x00000000, 0xffffffff },
13250		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13251			0x00000000, 0x000003ff },
13252		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13253			0x00000000, 0xffffffff },
13254
13255		/* Host Coalescing Control Registers. */
13256		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13257			0x00000000, 0x00000004 },
13258		{ HOSTCC_MODE, TG3_FL_5705,
13259			0x00000000, 0x000000f6 },
13260		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13261			0x00000000, 0xffffffff },
13262		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13263			0x00000000, 0x000003ff },
13264		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13265			0x00000000, 0xffffffff },
13266		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13267			0x00000000, 0x000003ff },
13268		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13269			0x00000000, 0xffffffff },
13270		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13271			0x00000000, 0x000000ff },
13272		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13273			0x00000000, 0xffffffff },
13274		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13275			0x00000000, 0x000000ff },
13276		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13277			0x00000000, 0xffffffff },
13278		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13279			0x00000000, 0xffffffff },
13280		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13281			0x00000000, 0xffffffff },
13282		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13283			0x00000000, 0x000000ff },
13284		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13285			0x00000000, 0xffffffff },
13286		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13287			0x00000000, 0x000000ff },
13288		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13289			0x00000000, 0xffffffff },
13290		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13291			0x00000000, 0xffffffff },
13292		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13293			0x00000000, 0xffffffff },
13294		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13295			0x00000000, 0xffffffff },
13296		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13297			0x00000000, 0xffffffff },
13298		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13299			0xffffffff, 0x00000000 },
13300		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13301			0xffffffff, 0x00000000 },
13302
13303		/* Buffer Manager Control Registers. */
13304		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13305			0x00000000, 0x007fff80 },
13306		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13307			0x00000000, 0x007fffff },
13308		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13309			0x00000000, 0x0000003f },
13310		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13311			0x00000000, 0x000001ff },
13312		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13313			0x00000000, 0x000001ff },
13314		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13315			0xffffffff, 0x00000000 },
13316		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13317			0xffffffff, 0x00000000 },
13318
13319		/* Mailbox Registers */
13320		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13321			0x00000000, 0x000001ff },
13322		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13323			0x00000000, 0x000001ff },
13324		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13325			0x00000000, 0x000007ff },
13326		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13327			0x00000000, 0x000001ff },
13328
13329		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13330	};
13331
13332	is_5705 = is_5750 = 0;
13333	if (tg3_flag(tp, 5705_PLUS)) {
13334		is_5705 = 1;
13335		if (tg3_flag(tp, 5750_PLUS))
13336			is_5750 = 1;
13337	}
13338
13339	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13340		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13341			continue;
13342
13343		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13344			continue;
13345
13346		if (tg3_flag(tp, IS_5788) &&
13347		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13348			continue;
13349
13350		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13351			continue;
13352
13353		offset = (u32) reg_tbl[i].offset;
13354		read_mask = reg_tbl[i].read_mask;
13355		write_mask = reg_tbl[i].write_mask;
13356
13357		/* Save the original register content */
13358		save_val = tr32(offset);
13359
13360		/* Determine the read-only value. */
13361		read_val = save_val & read_mask;
13362
13363		/* Write zero to the register, then make sure the read-only bits
13364		 * are not changed and the read/write bits are all zeros.
13365		 */
13366		tw32(offset, 0);
13367
13368		val = tr32(offset);
13369
13370		/* Test the read-only and read/write bits. */
13371		if (((val & read_mask) != read_val) || (val & write_mask))
13372			goto out;
13373
13374		/* Write ones to all the bits defined by RdMask and WrMask, then
13375		 * make sure the read-only bits are not changed and the
13376		 * read/write bits are all ones.
13377		 */
13378		tw32(offset, read_mask | write_mask);
13379
13380		val = tr32(offset);
13381
13382		/* Test the read-only bits. */
13383		if ((val & read_mask) != read_val)
13384			goto out;
13385
13386		/* Test the read/write bits. */
13387		if ((val & write_mask) != write_mask)
13388			goto out;
13389
13390		tw32(offset, save_val);
13391	}
13392
13393	return 0;
13394
13395out:
13396	if (netif_msg_hw(tp))
13397		netdev_err(tp->dev,
13398			   "Register test failed at offset %x\n", offset);
13399	tw32(offset, save_val);
13400	return -EIO;
13401}
13402
13403static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13404{
13405	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13406	int i;
13407	u32 j;
13408
13409	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13410		for (j = 0; j < len; j += 4) {
13411			u32 val;
13412
13413			tg3_write_mem(tp, offset + j, test_pattern[i]);
13414			tg3_read_mem(tp, offset + j, &val);
13415			if (val != test_pattern[i])
13416				return -EIO;
13417		}
13418	}
13419	return 0;
13420}
13421
13422static int tg3_test_memory(struct tg3 *tp)
13423{
13424	static struct mem_entry {
13425		u32 offset;
13426		u32 len;
13427	} mem_tbl_570x[] = {
13428		{ 0x00000000, 0x00b50},
13429		{ 0x00002000, 0x1c000},
13430		{ 0xffffffff, 0x00000}
13431	}, mem_tbl_5705[] = {
13432		{ 0x00000100, 0x0000c},
13433		{ 0x00000200, 0x00008},
13434		{ 0x00004000, 0x00800},
13435		{ 0x00006000, 0x01000},
13436		{ 0x00008000, 0x02000},
13437		{ 0x00010000, 0x0e000},
13438		{ 0xffffffff, 0x00000}
13439	}, mem_tbl_5755[] = {
13440		{ 0x00000200, 0x00008},
13441		{ 0x00004000, 0x00800},
13442		{ 0x00006000, 0x00800},
13443		{ 0x00008000, 0x02000},
13444		{ 0x00010000, 0x0c000},
13445		{ 0xffffffff, 0x00000}
13446	}, mem_tbl_5906[] = {
13447		{ 0x00000200, 0x00008},
13448		{ 0x00004000, 0x00400},
13449		{ 0x00006000, 0x00400},
13450		{ 0x00008000, 0x01000},
13451		{ 0x00010000, 0x01000},
13452		{ 0xffffffff, 0x00000}
13453	}, mem_tbl_5717[] = {
13454		{ 0x00000200, 0x00008},
13455		{ 0x00010000, 0x0a000},
13456		{ 0x00020000, 0x13c00},
13457		{ 0xffffffff, 0x00000}
13458	}, mem_tbl_57765[] = {
13459		{ 0x00000200, 0x00008},
13460		{ 0x00004000, 0x00800},
13461		{ 0x00006000, 0x09800},
13462		{ 0x00010000, 0x0a000},
13463		{ 0xffffffff, 0x00000}
13464	};
13465	struct mem_entry *mem_tbl;
13466	int err = 0;
13467	int i;
13468
13469	if (tg3_flag(tp, 5717_PLUS))
13470		mem_tbl = mem_tbl_5717;
13471	else if (tg3_flag(tp, 57765_CLASS) ||
13472		 tg3_asic_rev(tp) == ASIC_REV_5762)
13473		mem_tbl = mem_tbl_57765;
13474	else if (tg3_flag(tp, 5755_PLUS))
13475		mem_tbl = mem_tbl_5755;
13476	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13477		mem_tbl = mem_tbl_5906;
13478	else if (tg3_flag(tp, 5705_PLUS))
13479		mem_tbl = mem_tbl_5705;
13480	else
13481		mem_tbl = mem_tbl_570x;
13482
13483	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13484		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13485		if (err)
13486			break;
13487	}
13488
13489	return err;
13490}
13491
13492#define TG3_TSO_MSS		500
13493
13494#define TG3_TSO_IP_HDR_LEN	20
13495#define TG3_TSO_TCP_HDR_LEN	20
13496#define TG3_TSO_TCP_OPT_LEN	12
13497
13498static const u8 tg3_tso_header[] = {
134990x08, 0x00,
135000x45, 0x00, 0x00, 0x00,
135010x00, 0x00, 0x40, 0x00,
135020x40, 0x06, 0x00, 0x00,
135030x0a, 0x00, 0x00, 0x01,
135040x0a, 0x00, 0x00, 0x02,
135050x0d, 0x00, 0xe0, 0x00,
135060x00, 0x00, 0x01, 0x00,
135070x00, 0x00, 0x02, 0x00,
135080x80, 0x10, 0x10, 0x00,
135090x14, 0x09, 0x00, 0x00,
135100x01, 0x01, 0x08, 0x0a,
135110x11, 0x11, 0x11, 0x11,
135120x11, 0x11, 0x11, 0x11,
13513};
13514
13515static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13516{
13517	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13518	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13519	u32 budget;
13520	struct sk_buff *skb;
13521	u8 *tx_data, *rx_data;
13522	dma_addr_t map;
13523	int num_pkts, tx_len, rx_len, i, err;
13524	struct tg3_rx_buffer_desc *desc;
13525	struct tg3_napi *tnapi, *rnapi;
13526	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13527
13528	tnapi = &tp->napi[0];
13529	rnapi = &tp->napi[0];
13530	if (tp->irq_cnt > 1) {
13531		if (tg3_flag(tp, ENABLE_RSS))
13532			rnapi = &tp->napi[1];
13533		if (tg3_flag(tp, ENABLE_TSS))
13534			tnapi = &tp->napi[1];
13535	}
13536	coal_now = tnapi->coal_now | rnapi->coal_now;
13537
13538	err = -EIO;
13539
13540	tx_len = pktsz;
13541	skb = netdev_alloc_skb(tp->dev, tx_len);
13542	if (!skb)
13543		return -ENOMEM;
13544
13545	tx_data = skb_put(skb, tx_len);
13546	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13547	memset(tx_data + ETH_ALEN, 0x0, 8);
13548
13549	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13550
13551	if (tso_loopback) {
13552		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13553
13554		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13555			      TG3_TSO_TCP_OPT_LEN;
13556
13557		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13558		       sizeof(tg3_tso_header));
13559		mss = TG3_TSO_MSS;
13560
13561		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13562		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13563
13564		/* Set the total length field in the IP header */
13565		iph->tot_len = htons((u16)(mss + hdr_len));
13566
13567		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13568			      TXD_FLAG_CPU_POST_DMA);
13569
13570		if (tg3_flag(tp, HW_TSO_1) ||
13571		    tg3_flag(tp, HW_TSO_2) ||
13572		    tg3_flag(tp, HW_TSO_3)) {
13573			struct tcphdr *th;
13574			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13575			th = (struct tcphdr *)&tx_data[val];
13576			th->check = 0;
13577		} else
13578			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13579
13580		if (tg3_flag(tp, HW_TSO_3)) {
13581			mss |= (hdr_len & 0xc) << 12;
13582			if (hdr_len & 0x10)
13583				base_flags |= 0x00000010;
13584			base_flags |= (hdr_len & 0x3e0) << 5;
13585		} else if (tg3_flag(tp, HW_TSO_2))
13586			mss |= hdr_len << 9;
13587		else if (tg3_flag(tp, HW_TSO_1) ||
13588			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13589			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13590		} else {
13591			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13592		}
13593
13594		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13595	} else {
13596		num_pkts = 1;
13597		data_off = ETH_HLEN;
13598
13599		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13600		    tx_len > VLAN_ETH_FRAME_LEN)
13601			base_flags |= TXD_FLAG_JMB_PKT;
13602	}
13603
13604	for (i = data_off; i < tx_len; i++)
13605		tx_data[i] = (u8) (i & 0xff);
13606
13607	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13608	if (dma_mapping_error(&tp->pdev->dev, map)) {
13609		dev_kfree_skb(skb);
13610		return -EIO;
13611	}
13612
13613	val = tnapi->tx_prod;
13614	tnapi->tx_buffers[val].skb = skb;
13615	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13616
13617	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13618	       rnapi->coal_now);
13619
13620	udelay(10);
13621
13622	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13623
13624	budget = tg3_tx_avail(tnapi);
13625	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13626			    base_flags | TXD_FLAG_END, mss, 0)) {
13627		tnapi->tx_buffers[val].skb = NULL;
13628		dev_kfree_skb(skb);
13629		return -EIO;
13630	}
13631
13632	tnapi->tx_prod++;
13633
13634	/* Sync BD data before updating mailbox */
13635	wmb();
13636
13637	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13638	tr32_mailbox(tnapi->prodmbox);
13639
13640	udelay(10);
13641
13642	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13643	for (i = 0; i < 35; i++) {
13644		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13645		       coal_now);
13646
13647		udelay(10);
13648
13649		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13650		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13651		if ((tx_idx == tnapi->tx_prod) &&
13652		    (rx_idx == (rx_start_idx + num_pkts)))
13653			break;
13654	}
13655
13656	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13657	dev_kfree_skb(skb);
13658
13659	if (tx_idx != tnapi->tx_prod)
13660		goto out;
13661
13662	if (rx_idx != rx_start_idx + num_pkts)
13663		goto out;
13664
13665	val = data_off;
13666	while (rx_idx != rx_start_idx) {
13667		desc = &rnapi->rx_rcb[rx_start_idx++];
13668		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13669		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13670
13671		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13672		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13673			goto out;
13674
13675		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13676			 - ETH_FCS_LEN;
13677
13678		if (!tso_loopback) {
13679			if (rx_len != tx_len)
13680				goto out;
13681
13682			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13683				if (opaque_key != RXD_OPAQUE_RING_STD)
13684					goto out;
13685			} else {
13686				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13687					goto out;
13688			}
13689		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13690			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13691			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13692			goto out;
13693		}
13694
13695		if (opaque_key == RXD_OPAQUE_RING_STD) {
13696			rx_data = tpr->rx_std_buffers[desc_idx].data;
13697			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13698					     mapping);
13699		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13700			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13701			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13702					     mapping);
13703		} else
13704			goto out;
13705
13706		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13707					DMA_FROM_DEVICE);
13708
13709		rx_data += TG3_RX_OFFSET(tp);
13710		for (i = data_off; i < rx_len; i++, val++) {
13711			if (*(rx_data + i) != (u8) (val & 0xff))
13712				goto out;
13713		}
13714	}
13715
13716	err = 0;
13717
13718	/* tg3_free_rings will unmap and free the rx_data */
13719out:
13720	return err;
13721}
13722
13723#define TG3_STD_LOOPBACK_FAILED		1
13724#define TG3_JMB_LOOPBACK_FAILED		2
13725#define TG3_TSO_LOOPBACK_FAILED		4
13726#define TG3_LOOPBACK_FAILED \
13727	(TG3_STD_LOOPBACK_FAILED | \
13728	 TG3_JMB_LOOPBACK_FAILED | \
13729	 TG3_TSO_LOOPBACK_FAILED)
13730
13731static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13732{
13733	int err = -EIO;
13734	u32 eee_cap;
13735	u32 jmb_pkt_sz = 9000;
13736
13737	if (tp->dma_limit)
13738		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13739
13740	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13741	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13742
13743	if (!netif_running(tp->dev)) {
13744		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13745		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13746		if (do_extlpbk)
13747			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13748		goto done;
13749	}
13750
13751	err = tg3_reset_hw(tp, true);
13752	if (err) {
13753		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13754		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13755		if (do_extlpbk)
13756			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13757		goto done;
13758	}
13759
13760	if (tg3_flag(tp, ENABLE_RSS)) {
13761		int i;
13762
13763		/* Reroute all rx packets to the 1st queue */
13764		for (i = MAC_RSS_INDIR_TBL_0;
13765		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13766			tw32(i, 0x0);
13767	}
13768
13769	/* HW errata - mac loopback fails in some cases on 5780.
13770	 * Normal traffic and PHY loopback are not affected by
13771	 * errata.  Also, the MAC loopback test is deprecated for
13772	 * all newer ASIC revisions.
13773	 */
13774	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13775	    !tg3_flag(tp, CPMU_PRESENT)) {
13776		tg3_mac_loopback(tp, true);
13777
13778		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13779			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13780
13781		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13782		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13783			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13784
13785		tg3_mac_loopback(tp, false);
13786	}
13787
13788	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13789	    !tg3_flag(tp, USE_PHYLIB)) {
13790		int i;
13791
13792		tg3_phy_lpbk_set(tp, 0, false);
13793
13794		/* Wait for link */
13795		for (i = 0; i < 100; i++) {
13796			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13797				break;
13798			mdelay(1);
13799		}
13800
13801		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13802			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13803		if (tg3_flag(tp, TSO_CAPABLE) &&
13804		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13805			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13806		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13807		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13808			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13809
13810		if (do_extlpbk) {
13811			tg3_phy_lpbk_set(tp, 0, true);
13812
13813			/* All link indications report up, but the hardware
13814			 * isn't really ready for about 20 msec.  Double it
13815			 * to be sure.
13816			 */
13817			mdelay(40);
13818
13819			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13820				data[TG3_EXT_LOOPB_TEST] |=
13821							TG3_STD_LOOPBACK_FAILED;
13822			if (tg3_flag(tp, TSO_CAPABLE) &&
13823			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13824				data[TG3_EXT_LOOPB_TEST] |=
13825							TG3_TSO_LOOPBACK_FAILED;
13826			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13827			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13828				data[TG3_EXT_LOOPB_TEST] |=
13829							TG3_JMB_LOOPBACK_FAILED;
13830		}
13831
13832		/* Re-enable gphy autopowerdown. */
13833		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13834			tg3_phy_toggle_apd(tp, true);
13835	}
13836
13837	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13838	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13839
13840done:
13841	tp->phy_flags |= eee_cap;
13842
13843	return err;
13844}
13845
13846static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13847			  u64 *data)
13848{
13849	struct tg3 *tp = netdev_priv(dev);
13850	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13851
13852	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13853		if (tg3_power_up(tp)) {
13854			etest->flags |= ETH_TEST_FL_FAILED;
13855			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13856			return;
13857		}
13858		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13859	}
13860
13861	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13862
13863	if (tg3_test_nvram(tp) != 0) {
13864		etest->flags |= ETH_TEST_FL_FAILED;
13865		data[TG3_NVRAM_TEST] = 1;
13866	}
13867	if (!doextlpbk && tg3_test_link(tp)) {
13868		etest->flags |= ETH_TEST_FL_FAILED;
13869		data[TG3_LINK_TEST] = 1;
13870	}
13871	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13872		int err, err2 = 0, irq_sync = 0;
13873
13874		if (netif_running(dev)) {
13875			tg3_phy_stop(tp);
13876			tg3_netif_stop(tp);
13877			irq_sync = 1;
13878		}
13879
13880		tg3_full_lock(tp, irq_sync);
13881		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13882		err = tg3_nvram_lock(tp);
13883		tg3_halt_cpu(tp, RX_CPU_BASE);
13884		if (!tg3_flag(tp, 5705_PLUS))
13885			tg3_halt_cpu(tp, TX_CPU_BASE);
13886		if (!err)
13887			tg3_nvram_unlock(tp);
13888
13889		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13890			tg3_phy_reset(tp);
13891
13892		if (tg3_test_registers(tp) != 0) {
13893			etest->flags |= ETH_TEST_FL_FAILED;
13894			data[TG3_REGISTER_TEST] = 1;
13895		}
13896
13897		if (tg3_test_memory(tp) != 0) {
13898			etest->flags |= ETH_TEST_FL_FAILED;
13899			data[TG3_MEMORY_TEST] = 1;
13900		}
13901
13902		if (doextlpbk)
13903			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13904
13905		if (tg3_test_loopback(tp, data, doextlpbk))
13906			etest->flags |= ETH_TEST_FL_FAILED;
13907
13908		tg3_full_unlock(tp);
13909
13910		if (tg3_test_interrupt(tp) != 0) {
13911			etest->flags |= ETH_TEST_FL_FAILED;
13912			data[TG3_INTERRUPT_TEST] = 1;
13913		}
13914
13915		tg3_full_lock(tp, 0);
13916
13917		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13918		if (netif_running(dev)) {
13919			tg3_flag_set(tp, INIT_COMPLETE);
13920			err2 = tg3_restart_hw(tp, true);
13921			if (!err2)
13922				tg3_netif_start(tp);
13923		}
13924
13925		tg3_full_unlock(tp);
13926
13927		if (irq_sync && !err2)
13928			tg3_phy_start(tp);
13929	}
13930	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13931		tg3_power_down_prepare(tp);
13932
13933}
13934
13935static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13936{
13937	struct tg3 *tp = netdev_priv(dev);
13938	struct hwtstamp_config stmpconf;
13939
13940	if (!tg3_flag(tp, PTP_CAPABLE))
13941		return -EOPNOTSUPP;
13942
13943	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13944		return -EFAULT;
13945
13946	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13947	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13948		return -ERANGE;
13949
13950	switch (stmpconf.rx_filter) {
13951	case HWTSTAMP_FILTER_NONE:
13952		tp->rxptpctl = 0;
13953		break;
13954	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13955		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13956			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13957		break;
13958	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13959		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13960			       TG3_RX_PTP_CTL_SYNC_EVNT;
13961		break;
13962	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13963		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13964			       TG3_RX_PTP_CTL_DELAY_REQ;
13965		break;
13966	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13967		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13968			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13969		break;
13970	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13971		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13972			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13973		break;
13974	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13975		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13976			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13977		break;
13978	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13979		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13980			       TG3_RX_PTP_CTL_SYNC_EVNT;
13981		break;
13982	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13983		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13984			       TG3_RX_PTP_CTL_SYNC_EVNT;
13985		break;
13986	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13987		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13988			       TG3_RX_PTP_CTL_SYNC_EVNT;
13989		break;
13990	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13991		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13992			       TG3_RX_PTP_CTL_DELAY_REQ;
13993		break;
13994	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13995		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13996			       TG3_RX_PTP_CTL_DELAY_REQ;
13997		break;
13998	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13999		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
14000			       TG3_RX_PTP_CTL_DELAY_REQ;
14001		break;
14002	default:
14003		return -ERANGE;
14004	}
14005
14006	if (netif_running(dev) && tp->rxptpctl)
14007		tw32(TG3_RX_PTP_CTL,
14008		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
14009
14010	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
14011		tg3_flag_set(tp, TX_TSTAMP_EN);
14012	else
14013		tg3_flag_clear(tp, TX_TSTAMP_EN);
14014
14015	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14016		-EFAULT : 0;
14017}
14018
14019static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
14020{
14021	struct tg3 *tp = netdev_priv(dev);
14022	struct hwtstamp_config stmpconf;
14023
14024	if (!tg3_flag(tp, PTP_CAPABLE))
14025		return -EOPNOTSUPP;
14026
14027	stmpconf.flags = 0;
14028	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
14029			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
14030
14031	switch (tp->rxptpctl) {
14032	case 0:
14033		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14034		break;
14035	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14036		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14037		break;
14038	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14039		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14040		break;
14041	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14042		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14043		break;
14044	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14045		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14046		break;
14047	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14048		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14049		break;
14050	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14051		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14052		break;
14053	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14054		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14055		break;
14056	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14057		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14058		break;
14059	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14060		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14061		break;
14062	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14063		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14064		break;
14065	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14066		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14067		break;
14068	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14069		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14070		break;
14071	default:
14072		WARN_ON_ONCE(1);
14073		return -ERANGE;
14074	}
14075
14076	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14077		-EFAULT : 0;
14078}
14079
14080static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14081{
14082	struct mii_ioctl_data *data = if_mii(ifr);
14083	struct tg3 *tp = netdev_priv(dev);
14084	int err;
14085
14086	if (tg3_flag(tp, USE_PHYLIB)) {
14087		struct phy_device *phydev;
14088		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14089			return -EAGAIN;
14090		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14091		return phy_mii_ioctl(phydev, ifr, cmd);
14092	}
14093
14094	switch (cmd) {
14095	case SIOCGMIIPHY:
14096		data->phy_id = tp->phy_addr;
14097
14098		fallthrough;
14099	case SIOCGMIIREG: {
14100		u32 mii_regval;
14101
14102		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14103			break;			/* We have no PHY */
14104
14105		if (!netif_running(dev))
14106			return -EAGAIN;
14107
14108		spin_lock_bh(&tp->lock);
14109		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14110				    data->reg_num & 0x1f, &mii_regval);
14111		spin_unlock_bh(&tp->lock);
14112
14113		data->val_out = mii_regval;
14114
14115		return err;
14116	}
14117
14118	case SIOCSMIIREG:
14119		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14120			break;			/* We have no PHY */
14121
14122		if (!netif_running(dev))
14123			return -EAGAIN;
14124
14125		spin_lock_bh(&tp->lock);
14126		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14127				     data->reg_num & 0x1f, data->val_in);
14128		spin_unlock_bh(&tp->lock);
14129
14130		return err;
14131
14132	case SIOCSHWTSTAMP:
14133		return tg3_hwtstamp_set(dev, ifr);
14134
14135	case SIOCGHWTSTAMP:
14136		return tg3_hwtstamp_get(dev, ifr);
14137
14138	default:
14139		/* do nothing */
14140		break;
14141	}
14142	return -EOPNOTSUPP;
14143}
14144
14145static int tg3_get_coalesce(struct net_device *dev,
14146			    struct ethtool_coalesce *ec,
14147			    struct kernel_ethtool_coalesce *kernel_coal,
14148			    struct netlink_ext_ack *extack)
14149{
14150	struct tg3 *tp = netdev_priv(dev);
14151
14152	memcpy(ec, &tp->coal, sizeof(*ec));
14153	return 0;
14154}
14155
14156static int tg3_set_coalesce(struct net_device *dev,
14157			    struct ethtool_coalesce *ec,
14158			    struct kernel_ethtool_coalesce *kernel_coal,
14159			    struct netlink_ext_ack *extack)
14160{
14161	struct tg3 *tp = netdev_priv(dev);
14162	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14163	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14164
14165	if (!tg3_flag(tp, 5705_PLUS)) {
14166		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14167		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14168		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14169		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14170	}
14171
14172	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14173	    (!ec->rx_coalesce_usecs) ||
14174	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14175	    (!ec->tx_coalesce_usecs) ||
14176	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14177	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14178	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14179	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14180	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14181	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14182	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14183	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14184		return -EINVAL;
14185
14186	/* Only copy relevant parameters, ignore all others. */
14187	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14188	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14189	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14190	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14191	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14192	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14193	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14194	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14195	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14196
14197	if (netif_running(dev)) {
14198		tg3_full_lock(tp, 0);
14199		__tg3_set_coalesce(tp, &tp->coal);
14200		tg3_full_unlock(tp);
14201	}
14202	return 0;
14203}
14204
14205static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14206{
14207	struct tg3 *tp = netdev_priv(dev);
14208
14209	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14210		netdev_warn(tp->dev, "Board does not support EEE!\n");
14211		return -EOPNOTSUPP;
14212	}
14213
14214	if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14215		netdev_warn(tp->dev,
14216			    "Direct manipulation of EEE advertisement is not supported\n");
14217		return -EINVAL;
14218	}
14219
14220	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14221		netdev_warn(tp->dev,
14222			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14223			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14224		return -EINVAL;
14225	}
14226
14227	tp->eee.eee_enabled = edata->eee_enabled;
14228	tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14229	tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14230
14231	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14232	tg3_warn_mgmt_link_flap(tp);
14233
14234	if (netif_running(tp->dev)) {
14235		tg3_full_lock(tp, 0);
14236		tg3_setup_eee(tp);
14237		tg3_phy_reset(tp);
14238		tg3_full_unlock(tp);
14239	}
14240
14241	return 0;
14242}
14243
14244static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14245{
14246	struct tg3 *tp = netdev_priv(dev);
14247
14248	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14249		netdev_warn(tp->dev,
14250			    "Board does not support EEE!\n");
14251		return -EOPNOTSUPP;
14252	}
14253
14254	*edata = tp->eee;
14255	return 0;
14256}
14257
14258static const struct ethtool_ops tg3_ethtool_ops = {
14259	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14260				     ETHTOOL_COALESCE_MAX_FRAMES |
14261				     ETHTOOL_COALESCE_USECS_IRQ |
14262				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14263				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14264	.get_drvinfo		= tg3_get_drvinfo,
14265	.get_regs_len		= tg3_get_regs_len,
14266	.get_regs		= tg3_get_regs,
14267	.get_wol		= tg3_get_wol,
14268	.set_wol		= tg3_set_wol,
14269	.get_msglevel		= tg3_get_msglevel,
14270	.set_msglevel		= tg3_set_msglevel,
14271	.nway_reset		= tg3_nway_reset,
14272	.get_link		= ethtool_op_get_link,
14273	.get_eeprom_len		= tg3_get_eeprom_len,
14274	.get_eeprom		= tg3_get_eeprom,
14275	.set_eeprom		= tg3_set_eeprom,
14276	.get_ringparam		= tg3_get_ringparam,
14277	.set_ringparam		= tg3_set_ringparam,
14278	.get_pauseparam		= tg3_get_pauseparam,
14279	.set_pauseparam		= tg3_set_pauseparam,
14280	.self_test		= tg3_self_test,
14281	.get_strings		= tg3_get_strings,
14282	.set_phys_id		= tg3_set_phys_id,
14283	.get_ethtool_stats	= tg3_get_ethtool_stats,
14284	.get_coalesce		= tg3_get_coalesce,
14285	.set_coalesce		= tg3_set_coalesce,
14286	.get_sset_count		= tg3_get_sset_count,
14287	.get_rxnfc		= tg3_get_rxnfc,
14288	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14289	.get_rxfh		= tg3_get_rxfh,
14290	.set_rxfh		= tg3_set_rxfh,
14291	.get_channels		= tg3_get_channels,
14292	.set_channels		= tg3_set_channels,
14293	.get_ts_info		= tg3_get_ts_info,
14294	.get_eee		= tg3_get_eee,
14295	.set_eee		= tg3_set_eee,
14296	.get_link_ksettings	= tg3_get_link_ksettings,
14297	.set_link_ksettings	= tg3_set_link_ksettings,
14298};
14299
14300static void tg3_get_stats64(struct net_device *dev,
14301			    struct rtnl_link_stats64 *stats)
14302{
14303	struct tg3 *tp = netdev_priv(dev);
14304
14305	spin_lock_bh(&tp->lock);
14306	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14307		*stats = tp->net_stats_prev;
14308		spin_unlock_bh(&tp->lock);
14309		return;
14310	}
14311
14312	tg3_get_nstats(tp, stats);
14313	spin_unlock_bh(&tp->lock);
14314}
14315
14316static void tg3_set_rx_mode(struct net_device *dev)
14317{
14318	struct tg3 *tp = netdev_priv(dev);
14319
14320	if (!netif_running(dev))
14321		return;
14322
14323	tg3_full_lock(tp, 0);
14324	__tg3_set_rx_mode(dev);
14325	tg3_full_unlock(tp);
14326}
14327
14328static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14329			       int new_mtu)
14330{
14331	WRITE_ONCE(dev->mtu, new_mtu);
14332
14333	if (new_mtu > ETH_DATA_LEN) {
14334		if (tg3_flag(tp, 5780_CLASS)) {
14335			netdev_update_features(dev);
14336			tg3_flag_clear(tp, TSO_CAPABLE);
14337		} else {
14338			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14339		}
14340	} else {
14341		if (tg3_flag(tp, 5780_CLASS)) {
14342			tg3_flag_set(tp, TSO_CAPABLE);
14343			netdev_update_features(dev);
14344		}
14345		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14346	}
14347}
14348
14349static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14350{
14351	struct tg3 *tp = netdev_priv(dev);
14352	int err;
14353	bool reset_phy = false;
14354
14355	if (!netif_running(dev)) {
14356		/* We'll just catch it later when the
14357		 * device is up'd.
14358		 */
14359		tg3_set_mtu(dev, tp, new_mtu);
14360		return 0;
14361	}
14362
14363	tg3_phy_stop(tp);
14364
14365	tg3_netif_stop(tp);
14366
14367	tg3_set_mtu(dev, tp, new_mtu);
14368
14369	tg3_full_lock(tp, 1);
14370
14371	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14372
14373	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14374	 * breaks all requests to 256 bytes.
14375	 */
14376	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14377	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14378	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14379	    tg3_asic_rev(tp) == ASIC_REV_5720)
14380		reset_phy = true;
14381
14382	err = tg3_restart_hw(tp, reset_phy);
14383
14384	if (!err)
14385		tg3_netif_start(tp);
14386
14387	tg3_full_unlock(tp);
14388
14389	if (!err)
14390		tg3_phy_start(tp);
14391
14392	return err;
14393}
14394
14395static const struct net_device_ops tg3_netdev_ops = {
14396	.ndo_open		= tg3_open,
14397	.ndo_stop		= tg3_close,
14398	.ndo_start_xmit		= tg3_start_xmit,
14399	.ndo_get_stats64	= tg3_get_stats64,
14400	.ndo_validate_addr	= eth_validate_addr,
14401	.ndo_set_rx_mode	= tg3_set_rx_mode,
14402	.ndo_set_mac_address	= tg3_set_mac_addr,
14403	.ndo_eth_ioctl		= tg3_ioctl,
14404	.ndo_tx_timeout		= tg3_tx_timeout,
14405	.ndo_change_mtu		= tg3_change_mtu,
14406	.ndo_fix_features	= tg3_fix_features,
14407	.ndo_set_features	= tg3_set_features,
14408#ifdef CONFIG_NET_POLL_CONTROLLER
14409	.ndo_poll_controller	= tg3_poll_controller,
14410#endif
14411};
14412
14413static void tg3_get_eeprom_size(struct tg3 *tp)
14414{
14415	u32 cursize, val, magic;
14416
14417	tp->nvram_size = EEPROM_CHIP_SIZE;
14418
14419	if (tg3_nvram_read(tp, 0, &magic) != 0)
14420		return;
14421
14422	if ((magic != TG3_EEPROM_MAGIC) &&
14423	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14424	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14425		return;
14426
14427	/*
14428	 * Size the chip by reading offsets at increasing powers of two.
14429	 * When we encounter our validation signature, we know the addressing
14430	 * has wrapped around, and thus have our chip size.
14431	 */
14432	cursize = 0x10;
14433
14434	while (cursize < tp->nvram_size) {
14435		if (tg3_nvram_read(tp, cursize, &val) != 0)
14436			return;
14437
14438		if (val == magic)
14439			break;
14440
14441		cursize <<= 1;
14442	}
14443
14444	tp->nvram_size = cursize;
14445}
14446
14447static void tg3_get_nvram_size(struct tg3 *tp)
14448{
14449	u32 val;
14450
14451	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14452		return;
14453
14454	/* Selfboot format */
14455	if (val != TG3_EEPROM_MAGIC) {
14456		tg3_get_eeprom_size(tp);
14457		return;
14458	}
14459
14460	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14461		if (val != 0) {
14462			/* This is confusing.  We want to operate on the
14463			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14464			 * call will read from NVRAM and byteswap the data
14465			 * according to the byteswapping settings for all
14466			 * other register accesses.  This ensures the data we
14467			 * want will always reside in the lower 16-bits.
14468			 * However, the data in NVRAM is in LE format, which
14469			 * means the data from the NVRAM read will always be
14470			 * opposite the endianness of the CPU.  The 16-bit
14471			 * byteswap then brings the data to CPU endianness.
14472			 */
14473			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14474			return;
14475		}
14476	}
14477	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14478}
14479
14480static void tg3_get_nvram_info(struct tg3 *tp)
14481{
14482	u32 nvcfg1;
14483
14484	nvcfg1 = tr32(NVRAM_CFG1);
14485	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14486		tg3_flag_set(tp, FLASH);
14487	} else {
14488		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14489		tw32(NVRAM_CFG1, nvcfg1);
14490	}
14491
14492	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14493	    tg3_flag(tp, 5780_CLASS)) {
14494		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14495		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14496			tp->nvram_jedecnum = JEDEC_ATMEL;
14497			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14498			tg3_flag_set(tp, NVRAM_BUFFERED);
14499			break;
14500		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14501			tp->nvram_jedecnum = JEDEC_ATMEL;
14502			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14503			break;
14504		case FLASH_VENDOR_ATMEL_EEPROM:
14505			tp->nvram_jedecnum = JEDEC_ATMEL;
14506			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14507			tg3_flag_set(tp, NVRAM_BUFFERED);
14508			break;
14509		case FLASH_VENDOR_ST:
14510			tp->nvram_jedecnum = JEDEC_ST;
14511			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14512			tg3_flag_set(tp, NVRAM_BUFFERED);
14513			break;
14514		case FLASH_VENDOR_SAIFUN:
14515			tp->nvram_jedecnum = JEDEC_SAIFUN;
14516			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14517			break;
14518		case FLASH_VENDOR_SST_SMALL:
14519		case FLASH_VENDOR_SST_LARGE:
14520			tp->nvram_jedecnum = JEDEC_SST;
14521			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14522			break;
14523		}
14524	} else {
14525		tp->nvram_jedecnum = JEDEC_ATMEL;
14526		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14527		tg3_flag_set(tp, NVRAM_BUFFERED);
14528	}
14529}
14530
14531static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14532{
14533	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14534	case FLASH_5752PAGE_SIZE_256:
14535		tp->nvram_pagesize = 256;
14536		break;
14537	case FLASH_5752PAGE_SIZE_512:
14538		tp->nvram_pagesize = 512;
14539		break;
14540	case FLASH_5752PAGE_SIZE_1K:
14541		tp->nvram_pagesize = 1024;
14542		break;
14543	case FLASH_5752PAGE_SIZE_2K:
14544		tp->nvram_pagesize = 2048;
14545		break;
14546	case FLASH_5752PAGE_SIZE_4K:
14547		tp->nvram_pagesize = 4096;
14548		break;
14549	case FLASH_5752PAGE_SIZE_264:
14550		tp->nvram_pagesize = 264;
14551		break;
14552	case FLASH_5752PAGE_SIZE_528:
14553		tp->nvram_pagesize = 528;
14554		break;
14555	}
14556}
14557
14558static void tg3_get_5752_nvram_info(struct tg3 *tp)
14559{
14560	u32 nvcfg1;
14561
14562	nvcfg1 = tr32(NVRAM_CFG1);
14563
14564	/* NVRAM protection for TPM */
14565	if (nvcfg1 & (1 << 27))
14566		tg3_flag_set(tp, PROTECTED_NVRAM);
14567
14568	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14569	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14570	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14571		tp->nvram_jedecnum = JEDEC_ATMEL;
14572		tg3_flag_set(tp, NVRAM_BUFFERED);
14573		break;
14574	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14575		tp->nvram_jedecnum = JEDEC_ATMEL;
14576		tg3_flag_set(tp, NVRAM_BUFFERED);
14577		tg3_flag_set(tp, FLASH);
14578		break;
14579	case FLASH_5752VENDOR_ST_M45PE10:
14580	case FLASH_5752VENDOR_ST_M45PE20:
14581	case FLASH_5752VENDOR_ST_M45PE40:
14582		tp->nvram_jedecnum = JEDEC_ST;
14583		tg3_flag_set(tp, NVRAM_BUFFERED);
14584		tg3_flag_set(tp, FLASH);
14585		break;
14586	}
14587
14588	if (tg3_flag(tp, FLASH)) {
14589		tg3_nvram_get_pagesize(tp, nvcfg1);
14590	} else {
14591		/* For eeprom, set pagesize to maximum eeprom size */
14592		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14593
14594		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14595		tw32(NVRAM_CFG1, nvcfg1);
14596	}
14597}
14598
14599static void tg3_get_5755_nvram_info(struct tg3 *tp)
14600{
14601	u32 nvcfg1, protect = 0;
14602
14603	nvcfg1 = tr32(NVRAM_CFG1);
14604
14605	/* NVRAM protection for TPM */
14606	if (nvcfg1 & (1 << 27)) {
14607		tg3_flag_set(tp, PROTECTED_NVRAM);
14608		protect = 1;
14609	}
14610
14611	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14612	switch (nvcfg1) {
14613	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14614	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14615	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14616	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14617		tp->nvram_jedecnum = JEDEC_ATMEL;
14618		tg3_flag_set(tp, NVRAM_BUFFERED);
14619		tg3_flag_set(tp, FLASH);
14620		tp->nvram_pagesize = 264;
14621		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14622		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14623			tp->nvram_size = (protect ? 0x3e200 :
14624					  TG3_NVRAM_SIZE_512KB);
14625		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14626			tp->nvram_size = (protect ? 0x1f200 :
14627					  TG3_NVRAM_SIZE_256KB);
14628		else
14629			tp->nvram_size = (protect ? 0x1f200 :
14630					  TG3_NVRAM_SIZE_128KB);
14631		break;
14632	case FLASH_5752VENDOR_ST_M45PE10:
14633	case FLASH_5752VENDOR_ST_M45PE20:
14634	case FLASH_5752VENDOR_ST_M45PE40:
14635		tp->nvram_jedecnum = JEDEC_ST;
14636		tg3_flag_set(tp, NVRAM_BUFFERED);
14637		tg3_flag_set(tp, FLASH);
14638		tp->nvram_pagesize = 256;
14639		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14640			tp->nvram_size = (protect ?
14641					  TG3_NVRAM_SIZE_64KB :
14642					  TG3_NVRAM_SIZE_128KB);
14643		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14644			tp->nvram_size = (protect ?
14645					  TG3_NVRAM_SIZE_64KB :
14646					  TG3_NVRAM_SIZE_256KB);
14647		else
14648			tp->nvram_size = (protect ?
14649					  TG3_NVRAM_SIZE_128KB :
14650					  TG3_NVRAM_SIZE_512KB);
14651		break;
14652	}
14653}
14654
14655static void tg3_get_5787_nvram_info(struct tg3 *tp)
14656{
14657	u32 nvcfg1;
14658
14659	nvcfg1 = tr32(NVRAM_CFG1);
14660
14661	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14662	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14663	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14664	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14665	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14666		tp->nvram_jedecnum = JEDEC_ATMEL;
14667		tg3_flag_set(tp, NVRAM_BUFFERED);
14668		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14669
14670		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14671		tw32(NVRAM_CFG1, nvcfg1);
14672		break;
14673	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14674	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14675	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14676	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14677		tp->nvram_jedecnum = JEDEC_ATMEL;
14678		tg3_flag_set(tp, NVRAM_BUFFERED);
14679		tg3_flag_set(tp, FLASH);
14680		tp->nvram_pagesize = 264;
14681		break;
14682	case FLASH_5752VENDOR_ST_M45PE10:
14683	case FLASH_5752VENDOR_ST_M45PE20:
14684	case FLASH_5752VENDOR_ST_M45PE40:
14685		tp->nvram_jedecnum = JEDEC_ST;
14686		tg3_flag_set(tp, NVRAM_BUFFERED);
14687		tg3_flag_set(tp, FLASH);
14688		tp->nvram_pagesize = 256;
14689		break;
14690	}
14691}
14692
14693static void tg3_get_5761_nvram_info(struct tg3 *tp)
14694{
14695	u32 nvcfg1, protect = 0;
14696
14697	nvcfg1 = tr32(NVRAM_CFG1);
14698
14699	/* NVRAM protection for TPM */
14700	if (nvcfg1 & (1 << 27)) {
14701		tg3_flag_set(tp, PROTECTED_NVRAM);
14702		protect = 1;
14703	}
14704
14705	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14706	switch (nvcfg1) {
14707	case FLASH_5761VENDOR_ATMEL_ADB021D:
14708	case FLASH_5761VENDOR_ATMEL_ADB041D:
14709	case FLASH_5761VENDOR_ATMEL_ADB081D:
14710	case FLASH_5761VENDOR_ATMEL_ADB161D:
14711	case FLASH_5761VENDOR_ATMEL_MDB021D:
14712	case FLASH_5761VENDOR_ATMEL_MDB041D:
14713	case FLASH_5761VENDOR_ATMEL_MDB081D:
14714	case FLASH_5761VENDOR_ATMEL_MDB161D:
14715		tp->nvram_jedecnum = JEDEC_ATMEL;
14716		tg3_flag_set(tp, NVRAM_BUFFERED);
14717		tg3_flag_set(tp, FLASH);
14718		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14719		tp->nvram_pagesize = 256;
14720		break;
14721	case FLASH_5761VENDOR_ST_A_M45PE20:
14722	case FLASH_5761VENDOR_ST_A_M45PE40:
14723	case FLASH_5761VENDOR_ST_A_M45PE80:
14724	case FLASH_5761VENDOR_ST_A_M45PE16:
14725	case FLASH_5761VENDOR_ST_M_M45PE20:
14726	case FLASH_5761VENDOR_ST_M_M45PE40:
14727	case FLASH_5761VENDOR_ST_M_M45PE80:
14728	case FLASH_5761VENDOR_ST_M_M45PE16:
14729		tp->nvram_jedecnum = JEDEC_ST;
14730		tg3_flag_set(tp, NVRAM_BUFFERED);
14731		tg3_flag_set(tp, FLASH);
14732		tp->nvram_pagesize = 256;
14733		break;
14734	}
14735
14736	if (protect) {
14737		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14738	} else {
14739		switch (nvcfg1) {
14740		case FLASH_5761VENDOR_ATMEL_ADB161D:
14741		case FLASH_5761VENDOR_ATMEL_MDB161D:
14742		case FLASH_5761VENDOR_ST_A_M45PE16:
14743		case FLASH_5761VENDOR_ST_M_M45PE16:
14744			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14745			break;
14746		case FLASH_5761VENDOR_ATMEL_ADB081D:
14747		case FLASH_5761VENDOR_ATMEL_MDB081D:
14748		case FLASH_5761VENDOR_ST_A_M45PE80:
14749		case FLASH_5761VENDOR_ST_M_M45PE80:
14750			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14751			break;
14752		case FLASH_5761VENDOR_ATMEL_ADB041D:
14753		case FLASH_5761VENDOR_ATMEL_MDB041D:
14754		case FLASH_5761VENDOR_ST_A_M45PE40:
14755		case FLASH_5761VENDOR_ST_M_M45PE40:
14756			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14757			break;
14758		case FLASH_5761VENDOR_ATMEL_ADB021D:
14759		case FLASH_5761VENDOR_ATMEL_MDB021D:
14760		case FLASH_5761VENDOR_ST_A_M45PE20:
14761		case FLASH_5761VENDOR_ST_M_M45PE20:
14762			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14763			break;
14764		}
14765	}
14766}
14767
14768static void tg3_get_5906_nvram_info(struct tg3 *tp)
14769{
14770	tp->nvram_jedecnum = JEDEC_ATMEL;
14771	tg3_flag_set(tp, NVRAM_BUFFERED);
14772	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14773}
14774
14775static void tg3_get_57780_nvram_info(struct tg3 *tp)
14776{
14777	u32 nvcfg1;
14778
14779	nvcfg1 = tr32(NVRAM_CFG1);
14780
14781	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14782	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14783	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14784		tp->nvram_jedecnum = JEDEC_ATMEL;
14785		tg3_flag_set(tp, NVRAM_BUFFERED);
14786		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14787
14788		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14789		tw32(NVRAM_CFG1, nvcfg1);
14790		return;
14791	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14792	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14793	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14794	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14795	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14796	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14797	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14798		tp->nvram_jedecnum = JEDEC_ATMEL;
14799		tg3_flag_set(tp, NVRAM_BUFFERED);
14800		tg3_flag_set(tp, FLASH);
14801
14802		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14803		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14804		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14805		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14806			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14807			break;
14808		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14809		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14810			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14811			break;
14812		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14813		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14814			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14815			break;
14816		}
14817		break;
14818	case FLASH_5752VENDOR_ST_M45PE10:
14819	case FLASH_5752VENDOR_ST_M45PE20:
14820	case FLASH_5752VENDOR_ST_M45PE40:
14821		tp->nvram_jedecnum = JEDEC_ST;
14822		tg3_flag_set(tp, NVRAM_BUFFERED);
14823		tg3_flag_set(tp, FLASH);
14824
14825		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14826		case FLASH_5752VENDOR_ST_M45PE10:
14827			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14828			break;
14829		case FLASH_5752VENDOR_ST_M45PE20:
14830			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14831			break;
14832		case FLASH_5752VENDOR_ST_M45PE40:
14833			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14834			break;
14835		}
14836		break;
14837	default:
14838		tg3_flag_set(tp, NO_NVRAM);
14839		return;
14840	}
14841
14842	tg3_nvram_get_pagesize(tp, nvcfg1);
14843	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14844		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14845}
14846
14847
14848static void tg3_get_5717_nvram_info(struct tg3 *tp)
14849{
14850	u32 nvcfg1;
14851
14852	nvcfg1 = tr32(NVRAM_CFG1);
14853
14854	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14855	case FLASH_5717VENDOR_ATMEL_EEPROM:
14856	case FLASH_5717VENDOR_MICRO_EEPROM:
14857		tp->nvram_jedecnum = JEDEC_ATMEL;
14858		tg3_flag_set(tp, NVRAM_BUFFERED);
14859		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14860
14861		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14862		tw32(NVRAM_CFG1, nvcfg1);
14863		return;
14864	case FLASH_5717VENDOR_ATMEL_MDB011D:
14865	case FLASH_5717VENDOR_ATMEL_ADB011B:
14866	case FLASH_5717VENDOR_ATMEL_ADB011D:
14867	case FLASH_5717VENDOR_ATMEL_MDB021D:
14868	case FLASH_5717VENDOR_ATMEL_ADB021B:
14869	case FLASH_5717VENDOR_ATMEL_ADB021D:
14870	case FLASH_5717VENDOR_ATMEL_45USPT:
14871		tp->nvram_jedecnum = JEDEC_ATMEL;
14872		tg3_flag_set(tp, NVRAM_BUFFERED);
14873		tg3_flag_set(tp, FLASH);
14874
14875		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14876		case FLASH_5717VENDOR_ATMEL_MDB021D:
14877			/* Detect size with tg3_nvram_get_size() */
14878			break;
14879		case FLASH_5717VENDOR_ATMEL_ADB021B:
14880		case FLASH_5717VENDOR_ATMEL_ADB021D:
14881			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14882			break;
14883		default:
14884			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14885			break;
14886		}
14887		break;
14888	case FLASH_5717VENDOR_ST_M_M25PE10:
14889	case FLASH_5717VENDOR_ST_A_M25PE10:
14890	case FLASH_5717VENDOR_ST_M_M45PE10:
14891	case FLASH_5717VENDOR_ST_A_M45PE10:
14892	case FLASH_5717VENDOR_ST_M_M25PE20:
14893	case FLASH_5717VENDOR_ST_A_M25PE20:
14894	case FLASH_5717VENDOR_ST_M_M45PE20:
14895	case FLASH_5717VENDOR_ST_A_M45PE20:
14896	case FLASH_5717VENDOR_ST_25USPT:
14897	case FLASH_5717VENDOR_ST_45USPT:
14898		tp->nvram_jedecnum = JEDEC_ST;
14899		tg3_flag_set(tp, NVRAM_BUFFERED);
14900		tg3_flag_set(tp, FLASH);
14901
14902		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14903		case FLASH_5717VENDOR_ST_M_M25PE20:
14904		case FLASH_5717VENDOR_ST_M_M45PE20:
14905			/* Detect size with tg3_nvram_get_size() */
14906			break;
14907		case FLASH_5717VENDOR_ST_A_M25PE20:
14908		case FLASH_5717VENDOR_ST_A_M45PE20:
14909			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14910			break;
14911		default:
14912			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14913			break;
14914		}
14915		break;
14916	default:
14917		tg3_flag_set(tp, NO_NVRAM);
14918		return;
14919	}
14920
14921	tg3_nvram_get_pagesize(tp, nvcfg1);
14922	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14923		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14924}
14925
14926static void tg3_get_5720_nvram_info(struct tg3 *tp)
14927{
14928	u32 nvcfg1, nvmpinstrp, nv_status;
14929
14930	nvcfg1 = tr32(NVRAM_CFG1);
14931	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14932
14933	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14934		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14935			tg3_flag_set(tp, NO_NVRAM);
14936			return;
14937		}
14938
14939		switch (nvmpinstrp) {
14940		case FLASH_5762_MX25L_100:
14941		case FLASH_5762_MX25L_200:
14942		case FLASH_5762_MX25L_400:
14943		case FLASH_5762_MX25L_800:
14944		case FLASH_5762_MX25L_160_320:
14945			tp->nvram_pagesize = 4096;
14946			tp->nvram_jedecnum = JEDEC_MACRONIX;
14947			tg3_flag_set(tp, NVRAM_BUFFERED);
14948			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14949			tg3_flag_set(tp, FLASH);
14950			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14951			tp->nvram_size =
14952				(1 << (nv_status >> AUTOSENSE_DEVID &
14953						AUTOSENSE_DEVID_MASK)
14954					<< AUTOSENSE_SIZE_IN_MB);
14955			return;
14956
14957		case FLASH_5762_EEPROM_HD:
14958			nvmpinstrp = FLASH_5720_EEPROM_HD;
14959			break;
14960		case FLASH_5762_EEPROM_LD:
14961			nvmpinstrp = FLASH_5720_EEPROM_LD;
14962			break;
14963		case FLASH_5720VENDOR_M_ST_M45PE20:
14964			/* This pinstrap supports multiple sizes, so force it
14965			 * to read the actual size from location 0xf0.
14966			 */
14967			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14968			break;
14969		}
14970	}
14971
14972	switch (nvmpinstrp) {
14973	case FLASH_5720_EEPROM_HD:
14974	case FLASH_5720_EEPROM_LD:
14975		tp->nvram_jedecnum = JEDEC_ATMEL;
14976		tg3_flag_set(tp, NVRAM_BUFFERED);
14977
14978		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14979		tw32(NVRAM_CFG1, nvcfg1);
14980		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14981			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14982		else
14983			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14984		return;
14985	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14986	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14987	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14988	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14989	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14990	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14991	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14992	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14993	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14994	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14995	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14996	case FLASH_5720VENDOR_ATMEL_45USPT:
14997		tp->nvram_jedecnum = JEDEC_ATMEL;
14998		tg3_flag_set(tp, NVRAM_BUFFERED);
14999		tg3_flag_set(tp, FLASH);
15000
15001		switch (nvmpinstrp) {
15002		case FLASH_5720VENDOR_M_ATMEL_DB021D:
15003		case FLASH_5720VENDOR_A_ATMEL_DB021B:
15004		case FLASH_5720VENDOR_A_ATMEL_DB021D:
15005			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15006			break;
15007		case FLASH_5720VENDOR_M_ATMEL_DB041D:
15008		case FLASH_5720VENDOR_A_ATMEL_DB041B:
15009		case FLASH_5720VENDOR_A_ATMEL_DB041D:
15010			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15011			break;
15012		case FLASH_5720VENDOR_M_ATMEL_DB081D:
15013		case FLASH_5720VENDOR_A_ATMEL_DB081D:
15014			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15015			break;
15016		default:
15017			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15018				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15019			break;
15020		}
15021		break;
15022	case FLASH_5720VENDOR_M_ST_M25PE10:
15023	case FLASH_5720VENDOR_M_ST_M45PE10:
15024	case FLASH_5720VENDOR_A_ST_M25PE10:
15025	case FLASH_5720VENDOR_A_ST_M45PE10:
15026	case FLASH_5720VENDOR_M_ST_M25PE20:
15027	case FLASH_5720VENDOR_M_ST_M45PE20:
15028	case FLASH_5720VENDOR_A_ST_M25PE20:
15029	case FLASH_5720VENDOR_A_ST_M45PE20:
15030	case FLASH_5720VENDOR_M_ST_M25PE40:
15031	case FLASH_5720VENDOR_M_ST_M45PE40:
15032	case FLASH_5720VENDOR_A_ST_M25PE40:
15033	case FLASH_5720VENDOR_A_ST_M45PE40:
15034	case FLASH_5720VENDOR_M_ST_M25PE80:
15035	case FLASH_5720VENDOR_M_ST_M45PE80:
15036	case FLASH_5720VENDOR_A_ST_M25PE80:
15037	case FLASH_5720VENDOR_A_ST_M45PE80:
15038	case FLASH_5720VENDOR_ST_25USPT:
15039	case FLASH_5720VENDOR_ST_45USPT:
15040		tp->nvram_jedecnum = JEDEC_ST;
15041		tg3_flag_set(tp, NVRAM_BUFFERED);
15042		tg3_flag_set(tp, FLASH);
15043
15044		switch (nvmpinstrp) {
15045		case FLASH_5720VENDOR_M_ST_M25PE20:
15046		case FLASH_5720VENDOR_M_ST_M45PE20:
15047		case FLASH_5720VENDOR_A_ST_M25PE20:
15048		case FLASH_5720VENDOR_A_ST_M45PE20:
15049			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15050			break;
15051		case FLASH_5720VENDOR_M_ST_M25PE40:
15052		case FLASH_5720VENDOR_M_ST_M45PE40:
15053		case FLASH_5720VENDOR_A_ST_M25PE40:
15054		case FLASH_5720VENDOR_A_ST_M45PE40:
15055			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15056			break;
15057		case FLASH_5720VENDOR_M_ST_M25PE80:
15058		case FLASH_5720VENDOR_M_ST_M45PE80:
15059		case FLASH_5720VENDOR_A_ST_M25PE80:
15060		case FLASH_5720VENDOR_A_ST_M45PE80:
15061			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15062			break;
15063		default:
15064			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15065				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15066			break;
15067		}
15068		break;
15069	default:
15070		tg3_flag_set(tp, NO_NVRAM);
15071		return;
15072	}
15073
15074	tg3_nvram_get_pagesize(tp, nvcfg1);
15075	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15076		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15077
15078	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15079		u32 val;
15080
15081		if (tg3_nvram_read(tp, 0, &val))
15082			return;
15083
15084		if (val != TG3_EEPROM_MAGIC &&
15085		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15086			tg3_flag_set(tp, NO_NVRAM);
15087	}
15088}
15089
15090/* Chips other than 5700/5701 use the NVRAM for fetching info. */
15091static void tg3_nvram_init(struct tg3 *tp)
15092{
15093	if (tg3_flag(tp, IS_SSB_CORE)) {
15094		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15095		tg3_flag_clear(tp, NVRAM);
15096		tg3_flag_clear(tp, NVRAM_BUFFERED);
15097		tg3_flag_set(tp, NO_NVRAM);
15098		return;
15099	}
15100
15101	tw32_f(GRC_EEPROM_ADDR,
15102	     (EEPROM_ADDR_FSM_RESET |
15103	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15104	       EEPROM_ADDR_CLKPERD_SHIFT)));
15105
15106	msleep(1);
15107
15108	/* Enable seeprom accesses. */
15109	tw32_f(GRC_LOCAL_CTRL,
15110	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15111	udelay(100);
15112
15113	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15114	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15115		tg3_flag_set(tp, NVRAM);
15116
15117		if (tg3_nvram_lock(tp)) {
15118			netdev_warn(tp->dev,
15119				    "Cannot get nvram lock, %s failed\n",
15120				    __func__);
15121			return;
15122		}
15123		tg3_enable_nvram_access(tp);
15124
15125		tp->nvram_size = 0;
15126
15127		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15128			tg3_get_5752_nvram_info(tp);
15129		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15130			tg3_get_5755_nvram_info(tp);
15131		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15132			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15133			 tg3_asic_rev(tp) == ASIC_REV_5785)
15134			tg3_get_5787_nvram_info(tp);
15135		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15136			tg3_get_5761_nvram_info(tp);
15137		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15138			tg3_get_5906_nvram_info(tp);
15139		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15140			 tg3_flag(tp, 57765_CLASS))
15141			tg3_get_57780_nvram_info(tp);
15142		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15143			 tg3_asic_rev(tp) == ASIC_REV_5719)
15144			tg3_get_5717_nvram_info(tp);
15145		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15146			 tg3_asic_rev(tp) == ASIC_REV_5762)
15147			tg3_get_5720_nvram_info(tp);
15148		else
15149			tg3_get_nvram_info(tp);
15150
15151		if (tp->nvram_size == 0)
15152			tg3_get_nvram_size(tp);
15153
15154		tg3_disable_nvram_access(tp);
15155		tg3_nvram_unlock(tp);
15156
15157	} else {
15158		tg3_flag_clear(tp, NVRAM);
15159		tg3_flag_clear(tp, NVRAM_BUFFERED);
15160
15161		tg3_get_eeprom_size(tp);
15162	}
15163}
15164
15165struct subsys_tbl_ent {
15166	u16 subsys_vendor, subsys_devid;
15167	u32 phy_id;
15168};
15169
15170static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15171	/* Broadcom boards. */
15172	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15173	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15174	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15175	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15176	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15177	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15178	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15179	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15180	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15181	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15182	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15183	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15184	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15185	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15186	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15187	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15188	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15189	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15190	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15191	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15192	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15193	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15194
15195	/* 3com boards. */
15196	{ TG3PCI_SUBVENDOR_ID_3COM,
15197	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15198	{ TG3PCI_SUBVENDOR_ID_3COM,
15199	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15200	{ TG3PCI_SUBVENDOR_ID_3COM,
15201	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15202	{ TG3PCI_SUBVENDOR_ID_3COM,
15203	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15204	{ TG3PCI_SUBVENDOR_ID_3COM,
15205	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15206
15207	/* DELL boards. */
15208	{ TG3PCI_SUBVENDOR_ID_DELL,
15209	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15210	{ TG3PCI_SUBVENDOR_ID_DELL,
15211	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15212	{ TG3PCI_SUBVENDOR_ID_DELL,
15213	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15214	{ TG3PCI_SUBVENDOR_ID_DELL,
15215	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15216
15217	/* Compaq boards. */
15218	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15219	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15220	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15221	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15222	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15223	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15224	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15225	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15226	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15227	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15228
15229	/* IBM boards. */
15230	{ TG3PCI_SUBVENDOR_ID_IBM,
15231	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15232};
15233
15234static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15235{
15236	int i;
15237
15238	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15239		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15240		     tp->pdev->subsystem_vendor) &&
15241		    (subsys_id_to_phy_id[i].subsys_devid ==
15242		     tp->pdev->subsystem_device))
15243			return &subsys_id_to_phy_id[i];
15244	}
15245	return NULL;
15246}
15247
15248static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15249{
15250	u32 val;
15251
15252	tp->phy_id = TG3_PHY_ID_INVALID;
15253	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15254
15255	/* Assume an onboard device and WOL capable by default.  */
15256	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15257	tg3_flag_set(tp, WOL_CAP);
15258
15259	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15260		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15261			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15262			tg3_flag_set(tp, IS_NIC);
15263		}
15264		val = tr32(VCPU_CFGSHDW);
15265		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15266			tg3_flag_set(tp, ASPM_WORKAROUND);
15267		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15268		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15269			tg3_flag_set(tp, WOL_ENABLE);
15270			device_set_wakeup_enable(&tp->pdev->dev, true);
15271		}
15272		goto done;
15273	}
15274
15275	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15276	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15277		u32 nic_cfg, led_cfg;
15278		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15279		u32 nic_phy_id, ver, eeprom_phy_id;
15280		int eeprom_phy_serdes = 0;
15281
15282		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15283		tp->nic_sram_data_cfg = nic_cfg;
15284
15285		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15286		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15287		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15288		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15289		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15290		    (ver > 0) && (ver < 0x100))
15291			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15292
15293		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15294			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15295
15296		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15297		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15298		    tg3_asic_rev(tp) == ASIC_REV_5720)
15299			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15300
15301		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15302		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15303			eeprom_phy_serdes = 1;
15304
15305		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15306		if (nic_phy_id != 0) {
15307			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15308			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15309
15310			eeprom_phy_id  = (id1 >> 16) << 10;
15311			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15312			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15313		} else
15314			eeprom_phy_id = 0;
15315
15316		tp->phy_id = eeprom_phy_id;
15317		if (eeprom_phy_serdes) {
15318			if (!tg3_flag(tp, 5705_PLUS))
15319				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15320			else
15321				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15322		}
15323
15324		if (tg3_flag(tp, 5750_PLUS))
15325			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15326				    SHASTA_EXT_LED_MODE_MASK);
15327		else
15328			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15329
15330		switch (led_cfg) {
15331		default:
15332		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15333			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15334			break;
15335
15336		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15337			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15338			break;
15339
15340		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15341			tp->led_ctrl = LED_CTRL_MODE_MAC;
15342
15343			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15344			 * read on some older 5700/5701 bootcode.
15345			 */
15346			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15347			    tg3_asic_rev(tp) == ASIC_REV_5701)
15348				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15349
15350			break;
15351
15352		case SHASTA_EXT_LED_SHARED:
15353			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15354			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15355			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15356				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15357						 LED_CTRL_MODE_PHY_2);
15358
15359			if (tg3_flag(tp, 5717_PLUS) ||
15360			    tg3_asic_rev(tp) == ASIC_REV_5762)
15361				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15362						LED_CTRL_BLINK_RATE_MASK;
15363
15364			break;
15365
15366		case SHASTA_EXT_LED_MAC:
15367			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15368			break;
15369
15370		case SHASTA_EXT_LED_COMBO:
15371			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15372			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15373				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15374						 LED_CTRL_MODE_PHY_2);
15375			break;
15376
15377		}
15378
15379		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15380		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15381		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15382			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15383
15384		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15385			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15386
15387		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15388			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15389			if ((tp->pdev->subsystem_vendor ==
15390			     PCI_VENDOR_ID_ARIMA) &&
15391			    (tp->pdev->subsystem_device == 0x205a ||
15392			     tp->pdev->subsystem_device == 0x2063))
15393				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15394		} else {
15395			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15396			tg3_flag_set(tp, IS_NIC);
15397		}
15398
15399		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15400			tg3_flag_set(tp, ENABLE_ASF);
15401			if (tg3_flag(tp, 5750_PLUS))
15402				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15403		}
15404
15405		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15406		    tg3_flag(tp, 5750_PLUS))
15407			tg3_flag_set(tp, ENABLE_APE);
15408
15409		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15410		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15411			tg3_flag_clear(tp, WOL_CAP);
15412
15413		if (tg3_flag(tp, WOL_CAP) &&
15414		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15415			tg3_flag_set(tp, WOL_ENABLE);
15416			device_set_wakeup_enable(&tp->pdev->dev, true);
15417		}
15418
15419		if (cfg2 & (1 << 17))
15420			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15421
15422		/* serdes signal pre-emphasis in register 0x590 set by */
15423		/* bootcode if bit 18 is set */
15424		if (cfg2 & (1 << 18))
15425			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15426
15427		if ((tg3_flag(tp, 57765_PLUS) ||
15428		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15429		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15430		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15431			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15432
15433		if (tg3_flag(tp, PCI_EXPRESS)) {
15434			u32 cfg3;
15435
15436			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15437			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15438			    !tg3_flag(tp, 57765_PLUS) &&
15439			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15440				tg3_flag_set(tp, ASPM_WORKAROUND);
15441			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15442				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15443			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15444				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15445		}
15446
15447		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15448			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15449		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15450			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15451		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15452			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15453
15454		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15455			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15456	}
15457done:
15458	if (tg3_flag(tp, WOL_CAP))
15459		device_set_wakeup_enable(&tp->pdev->dev,
15460					 tg3_flag(tp, WOL_ENABLE));
15461	else
15462		device_set_wakeup_capable(&tp->pdev->dev, false);
15463}
15464
15465static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15466{
15467	int i, err;
15468	u32 val2, off = offset * 8;
15469
15470	err = tg3_nvram_lock(tp);
15471	if (err)
15472		return err;
15473
15474	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15475	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15476			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15477	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15478	udelay(10);
15479
15480	for (i = 0; i < 100; i++) {
15481		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15482		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15483			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15484			break;
15485		}
15486		udelay(10);
15487	}
15488
15489	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15490
15491	tg3_nvram_unlock(tp);
15492	if (val2 & APE_OTP_STATUS_CMD_DONE)
15493		return 0;
15494
15495	return -EBUSY;
15496}
15497
15498static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15499{
15500	int i;
15501	u32 val;
15502
15503	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15504	tw32(OTP_CTRL, cmd);
15505
15506	/* Wait for up to 1 ms for command to execute. */
15507	for (i = 0; i < 100; i++) {
15508		val = tr32(OTP_STATUS);
15509		if (val & OTP_STATUS_CMD_DONE)
15510			break;
15511		udelay(10);
15512	}
15513
15514	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15515}
15516
15517/* Read the gphy configuration from the OTP region of the chip.  The gphy
15518 * configuration is a 32-bit value that straddles the alignment boundary.
15519 * We do two 32-bit reads and then shift and merge the results.
15520 */
15521static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15522{
15523	u32 bhalf_otp, thalf_otp;
15524
15525	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15526
15527	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15528		return 0;
15529
15530	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15531
15532	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15533		return 0;
15534
15535	thalf_otp = tr32(OTP_READ_DATA);
15536
15537	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15538
15539	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15540		return 0;
15541
15542	bhalf_otp = tr32(OTP_READ_DATA);
15543
15544	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15545}
15546
15547static void tg3_phy_init_link_config(struct tg3 *tp)
15548{
15549	u32 adv = ADVERTISED_Autoneg;
15550
15551	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15552		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15553			adv |= ADVERTISED_1000baseT_Half;
15554		adv |= ADVERTISED_1000baseT_Full;
15555	}
15556
15557	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15558		adv |= ADVERTISED_100baseT_Half |
15559		       ADVERTISED_100baseT_Full |
15560		       ADVERTISED_10baseT_Half |
15561		       ADVERTISED_10baseT_Full |
15562		       ADVERTISED_TP;
15563	else
15564		adv |= ADVERTISED_FIBRE;
15565
15566	tp->link_config.advertising = adv;
15567	tp->link_config.speed = SPEED_UNKNOWN;
15568	tp->link_config.duplex = DUPLEX_UNKNOWN;
15569	tp->link_config.autoneg = AUTONEG_ENABLE;
15570	tp->link_config.active_speed = SPEED_UNKNOWN;
15571	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15572
15573	tp->old_link = -1;
15574}
15575
15576static int tg3_phy_probe(struct tg3 *tp)
15577{
15578	u32 hw_phy_id_1, hw_phy_id_2;
15579	u32 hw_phy_id, hw_phy_id_masked;
15580	int err;
15581
15582	/* flow control autonegotiation is default behavior */
15583	tg3_flag_set(tp, PAUSE_AUTONEG);
15584	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15585
15586	if (tg3_flag(tp, ENABLE_APE)) {
15587		switch (tp->pci_fn) {
15588		case 0:
15589			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15590			break;
15591		case 1:
15592			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15593			break;
15594		case 2:
15595			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15596			break;
15597		case 3:
15598			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15599			break;
15600		}
15601	}
15602
15603	if (!tg3_flag(tp, ENABLE_ASF) &&
15604	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15605	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15606		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15607				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15608
15609	if (tg3_flag(tp, USE_PHYLIB))
15610		return tg3_phy_init(tp);
15611
15612	/* Reading the PHY ID register can conflict with ASF
15613	 * firmware access to the PHY hardware.
15614	 */
15615	err = 0;
15616	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15617		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15618	} else {
15619		/* Now read the physical PHY_ID from the chip and verify
15620		 * that it is sane.  If it doesn't look good, we fall back
15621		 * to either the hard-coded table based PHY_ID and failing
15622		 * that the value found in the eeprom area.
15623		 */
15624		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15625		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15626
15627		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15628		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15629		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15630
15631		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15632	}
15633
15634	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15635		tp->phy_id = hw_phy_id;
15636		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15637			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15638		else
15639			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15640	} else {
15641		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15642			/* Do nothing, phy ID already set up in
15643			 * tg3_get_eeprom_hw_cfg().
15644			 */
15645		} else {
15646			struct subsys_tbl_ent *p;
15647
15648			/* No eeprom signature?  Try the hardcoded
15649			 * subsys device table.
15650			 */
15651			p = tg3_lookup_by_subsys(tp);
15652			if (p) {
15653				tp->phy_id = p->phy_id;
15654			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15655				/* For now we saw the IDs 0xbc050cd0,
15656				 * 0xbc050f80 and 0xbc050c30 on devices
15657				 * connected to an BCM4785 and there are
15658				 * probably more. Just assume that the phy is
15659				 * supported when it is connected to a SSB core
15660				 * for now.
15661				 */
15662				return -ENODEV;
15663			}
15664
15665			if (!tp->phy_id ||
15666			    tp->phy_id == TG3_PHY_ID_BCM8002)
15667				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15668		}
15669	}
15670
15671	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15672	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15673	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15674	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15675	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15676	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15677	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15678	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15679	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15680		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15681
15682		linkmode_zero(tp->eee.supported);
15683		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15684				 tp->eee.supported);
15685		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15686				 tp->eee.supported);
15687		linkmode_copy(tp->eee.advertised, tp->eee.supported);
15688
15689		tp->eee.eee_enabled = 1;
15690		tp->eee.tx_lpi_enabled = 1;
15691		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15692	}
15693
15694	tg3_phy_init_link_config(tp);
15695
15696	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15697	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15698	    !tg3_flag(tp, ENABLE_APE) &&
15699	    !tg3_flag(tp, ENABLE_ASF)) {
15700		u32 bmsr, dummy;
15701
15702		tg3_readphy(tp, MII_BMSR, &bmsr);
15703		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15704		    (bmsr & BMSR_LSTATUS))
15705			goto skip_phy_reset;
15706
15707		err = tg3_phy_reset(tp);
15708		if (err)
15709			return err;
15710
15711		tg3_phy_set_wirespeed(tp);
15712
15713		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15714			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15715					    tp->link_config.flowctrl);
15716
15717			tg3_writephy(tp, MII_BMCR,
15718				     BMCR_ANENABLE | BMCR_ANRESTART);
15719		}
15720	}
15721
15722skip_phy_reset:
15723	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15724		err = tg3_init_5401phy_dsp(tp);
15725		if (err)
15726			return err;
15727
15728		err = tg3_init_5401phy_dsp(tp);
15729	}
15730
15731	return err;
15732}
15733
15734static void tg3_read_vpd(struct tg3 *tp)
15735{
15736	u8 *vpd_data;
15737	unsigned int len, vpdlen;
15738	int i;
15739
15740	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15741	if (!vpd_data)
15742		goto out_no_vpd;
15743
15744	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15745					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15746	if (i < 0)
15747		goto partno;
15748
15749	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15750		goto partno;
15751
15752	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15753					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15754	if (i < 0)
15755		goto partno;
15756
15757	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15758	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15759
15760partno:
15761	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15762					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15763	if (i < 0)
15764		goto out_not_found;
15765
15766	if (len > TG3_BPN_SIZE)
15767		goto out_not_found;
15768
15769	memcpy(tp->board_part_number, &vpd_data[i], len);
15770
15771out_not_found:
15772	kfree(vpd_data);
15773	if (tp->board_part_number[0])
15774		return;
15775
15776out_no_vpd:
15777	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15778		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15779		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15780			strcpy(tp->board_part_number, "BCM5717");
15781		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15782			strcpy(tp->board_part_number, "BCM5718");
15783		else
15784			goto nomatch;
15785	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15786		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15787			strcpy(tp->board_part_number, "BCM57780");
15788		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15789			strcpy(tp->board_part_number, "BCM57760");
15790		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15791			strcpy(tp->board_part_number, "BCM57790");
15792		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15793			strcpy(tp->board_part_number, "BCM57788");
15794		else
15795			goto nomatch;
15796	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15797		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15798			strcpy(tp->board_part_number, "BCM57761");
15799		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15800			strcpy(tp->board_part_number, "BCM57765");
15801		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15802			strcpy(tp->board_part_number, "BCM57781");
15803		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15804			strcpy(tp->board_part_number, "BCM57785");
15805		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15806			strcpy(tp->board_part_number, "BCM57791");
15807		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15808			strcpy(tp->board_part_number, "BCM57795");
15809		else
15810			goto nomatch;
15811	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15812		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15813			strcpy(tp->board_part_number, "BCM57762");
15814		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15815			strcpy(tp->board_part_number, "BCM57766");
15816		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15817			strcpy(tp->board_part_number, "BCM57782");
15818		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15819			strcpy(tp->board_part_number, "BCM57786");
15820		else
15821			goto nomatch;
15822	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15823		strcpy(tp->board_part_number, "BCM95906");
15824	} else {
15825nomatch:
15826		strcpy(tp->board_part_number, "none");
15827	}
15828}
15829
15830static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15831{
15832	u32 val;
15833
15834	if (tg3_nvram_read(tp, offset, &val) ||
15835	    (val & 0xfc000000) != 0x0c000000 ||
15836	    tg3_nvram_read(tp, offset + 4, &val) ||
15837	    val != 0)
15838		return 0;
15839
15840	return 1;
15841}
15842
15843static void tg3_read_bc_ver(struct tg3 *tp)
15844{
15845	u32 val, offset, start, ver_offset;
15846	int i, dst_off;
15847	bool newver = false;
15848
15849	if (tg3_nvram_read(tp, 0xc, &offset) ||
15850	    tg3_nvram_read(tp, 0x4, &start))
15851		return;
15852
15853	offset = tg3_nvram_logical_addr(tp, offset);
15854
15855	if (tg3_nvram_read(tp, offset, &val))
15856		return;
15857
15858	if ((val & 0xfc000000) == 0x0c000000) {
15859		if (tg3_nvram_read(tp, offset + 4, &val))
15860			return;
15861
15862		if (val == 0)
15863			newver = true;
15864	}
15865
15866	dst_off = strlen(tp->fw_ver);
15867
15868	if (newver) {
15869		if (TG3_VER_SIZE - dst_off < 16 ||
15870		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15871			return;
15872
15873		offset = offset + ver_offset - start;
15874		for (i = 0; i < 16; i += 4) {
15875			__be32 v;
15876			if (tg3_nvram_read_be32(tp, offset + i, &v))
15877				return;
15878
15879			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15880		}
15881	} else {
15882		u32 major, minor;
15883
15884		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15885			return;
15886
15887		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15888			TG3_NVM_BCVER_MAJSFT;
15889		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15890		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15891			 "v%d.%02d", major, minor);
15892	}
15893}
15894
15895static void tg3_read_hwsb_ver(struct tg3 *tp)
15896{
15897	u32 val, major, minor;
15898
15899	/* Use native endian representation */
15900	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15901		return;
15902
15903	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15904		TG3_NVM_HWSB_CFG1_MAJSFT;
15905	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15906		TG3_NVM_HWSB_CFG1_MINSFT;
15907
15908	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15909}
15910
15911static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15912{
15913	u32 offset, major, minor, build;
15914
15915	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15916
15917	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15918		return;
15919
15920	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15921	case TG3_EEPROM_SB_REVISION_0:
15922		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15923		break;
15924	case TG3_EEPROM_SB_REVISION_2:
15925		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15926		break;
15927	case TG3_EEPROM_SB_REVISION_3:
15928		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15929		break;
15930	case TG3_EEPROM_SB_REVISION_4:
15931		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15932		break;
15933	case TG3_EEPROM_SB_REVISION_5:
15934		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15935		break;
15936	case TG3_EEPROM_SB_REVISION_6:
15937		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15938		break;
15939	default:
15940		return;
15941	}
15942
15943	if (tg3_nvram_read(tp, offset, &val))
15944		return;
15945
15946	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15947		TG3_EEPROM_SB_EDH_BLD_SHFT;
15948	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15949		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15950	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15951
15952	if (minor > 99 || build > 26)
15953		return;
15954
15955	offset = strlen(tp->fw_ver);
15956	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15957		 " v%d.%02d", major, minor);
15958
15959	if (build > 0) {
15960		offset = strlen(tp->fw_ver);
15961		if (offset < TG3_VER_SIZE - 1)
15962			tp->fw_ver[offset] = 'a' + build - 1;
15963	}
15964}
15965
15966static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15967{
15968	u32 val, offset, start;
15969	int i, vlen;
15970
15971	for (offset = TG3_NVM_DIR_START;
15972	     offset < TG3_NVM_DIR_END;
15973	     offset += TG3_NVM_DIRENT_SIZE) {
15974		if (tg3_nvram_read(tp, offset, &val))
15975			return;
15976
15977		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15978			break;
15979	}
15980
15981	if (offset == TG3_NVM_DIR_END)
15982		return;
15983
15984	if (!tg3_flag(tp, 5705_PLUS))
15985		start = 0x08000000;
15986	else if (tg3_nvram_read(tp, offset - 4, &start))
15987		return;
15988
15989	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15990	    !tg3_fw_img_is_valid(tp, offset) ||
15991	    tg3_nvram_read(tp, offset + 8, &val))
15992		return;
15993
15994	offset += val - start;
15995
15996	vlen = strlen(tp->fw_ver);
15997
15998	tp->fw_ver[vlen++] = ',';
15999	tp->fw_ver[vlen++] = ' ';
16000
16001	for (i = 0; i < 4; i++) {
16002		__be32 v;
16003		if (tg3_nvram_read_be32(tp, offset, &v))
16004			return;
16005
16006		offset += sizeof(v);
16007
16008		if (vlen > TG3_VER_SIZE - sizeof(v)) {
16009			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
16010			break;
16011		}
16012
16013		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
16014		vlen += sizeof(v);
16015	}
16016}
16017
16018static void tg3_probe_ncsi(struct tg3 *tp)
16019{
16020	u32 apedata;
16021
16022	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
16023	if (apedata != APE_SEG_SIG_MAGIC)
16024		return;
16025
16026	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16027	if (!(apedata & APE_FW_STATUS_READY))
16028		return;
16029
16030	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16031		tg3_flag_set(tp, APE_HAS_NCSI);
16032}
16033
16034static void tg3_read_dash_ver(struct tg3 *tp)
16035{
16036	int vlen;
16037	u32 apedata;
16038	char *fwtype;
16039
16040	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16041
16042	if (tg3_flag(tp, APE_HAS_NCSI))
16043		fwtype = "NCSI";
16044	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16045		fwtype = "SMASH";
16046	else
16047		fwtype = "DASH";
16048
16049	vlen = strlen(tp->fw_ver);
16050
16051	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16052		 fwtype,
16053		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16054		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16055		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16056		 (apedata & APE_FW_VERSION_BLDMSK));
16057}
16058
16059static void tg3_read_otp_ver(struct tg3 *tp)
16060{
16061	u32 val, val2;
16062
16063	if (tg3_asic_rev(tp) != ASIC_REV_5762)
16064		return;
16065
16066	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16067	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16068	    TG3_OTP_MAGIC0_VALID(val)) {
16069		u64 val64 = (u64) val << 32 | val2;
16070		u32 ver = 0;
16071		int i, vlen;
16072
16073		for (i = 0; i < 7; i++) {
16074			if ((val64 & 0xff) == 0)
16075				break;
16076			ver = val64 & 0xff;
16077			val64 >>= 8;
16078		}
16079		vlen = strlen(tp->fw_ver);
16080		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16081	}
16082}
16083
16084static void tg3_read_fw_ver(struct tg3 *tp)
16085{
16086	u32 val;
16087	bool vpd_vers = false;
16088
16089	if (tp->fw_ver[0] != 0)
16090		vpd_vers = true;
16091
16092	if (tg3_flag(tp, NO_NVRAM)) {
16093		strcat(tp->fw_ver, "sb");
16094		tg3_read_otp_ver(tp);
16095		return;
16096	}
16097
16098	if (tg3_nvram_read(tp, 0, &val))
16099		return;
16100
16101	if (val == TG3_EEPROM_MAGIC)
16102		tg3_read_bc_ver(tp);
16103	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16104		tg3_read_sb_ver(tp, val);
16105	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16106		tg3_read_hwsb_ver(tp);
16107
16108	if (tg3_flag(tp, ENABLE_ASF)) {
16109		if (tg3_flag(tp, ENABLE_APE)) {
16110			tg3_probe_ncsi(tp);
16111			if (!vpd_vers)
16112				tg3_read_dash_ver(tp);
16113		} else if (!vpd_vers) {
16114			tg3_read_mgmtfw_ver(tp);
16115		}
16116	}
16117
16118	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16119}
16120
16121static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16122{
16123	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16124		return TG3_RX_RET_MAX_SIZE_5717;
16125	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16126		return TG3_RX_RET_MAX_SIZE_5700;
16127	else
16128		return TG3_RX_RET_MAX_SIZE_5705;
16129}
16130
16131static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16132	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16133	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16134	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16135	{ },
16136};
16137
16138static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16139{
16140	struct pci_dev *peer;
16141	unsigned int func, devnr = tp->pdev->devfn & ~7;
16142
16143	for (func = 0; func < 8; func++) {
16144		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16145		if (peer && peer != tp->pdev)
16146			break;
16147		pci_dev_put(peer);
16148	}
16149	/* 5704 can be configured in single-port mode, set peer to
16150	 * tp->pdev in that case.
16151	 */
16152	if (!peer) {
16153		peer = tp->pdev;
16154		return peer;
16155	}
16156
16157	/*
16158	 * We don't need to keep the refcount elevated; there's no way
16159	 * to remove one half of this device without removing the other
16160	 */
16161	pci_dev_put(peer);
16162
16163	return peer;
16164}
16165
16166static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16167{
16168	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16169	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16170		u32 reg;
16171
16172		/* All devices that use the alternate
16173		 * ASIC REV location have a CPMU.
16174		 */
16175		tg3_flag_set(tp, CPMU_PRESENT);
16176
16177		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16178		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16179		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16180		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16181		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16182		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16183		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16184		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16185		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16186		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16187		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16188			reg = TG3PCI_GEN2_PRODID_ASICREV;
16189		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16190			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16191			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16192			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16193			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16194			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16195			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16196			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16197			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16198			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16199			reg = TG3PCI_GEN15_PRODID_ASICREV;
16200		else
16201			reg = TG3PCI_PRODID_ASICREV;
16202
16203		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16204	}
16205
16206	/* Wrong chip ID in 5752 A0. This code can be removed later
16207	 * as A0 is not in production.
16208	 */
16209	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16210		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16211
16212	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16213		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16214
16215	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16216	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16217	    tg3_asic_rev(tp) == ASIC_REV_5720)
16218		tg3_flag_set(tp, 5717_PLUS);
16219
16220	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16221	    tg3_asic_rev(tp) == ASIC_REV_57766)
16222		tg3_flag_set(tp, 57765_CLASS);
16223
16224	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16225	     tg3_asic_rev(tp) == ASIC_REV_5762)
16226		tg3_flag_set(tp, 57765_PLUS);
16227
16228	/* Intentionally exclude ASIC_REV_5906 */
16229	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16230	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16231	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16232	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16233	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16234	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16235	    tg3_flag(tp, 57765_PLUS))
16236		tg3_flag_set(tp, 5755_PLUS);
16237
16238	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16239	    tg3_asic_rev(tp) == ASIC_REV_5714)
16240		tg3_flag_set(tp, 5780_CLASS);
16241
16242	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16243	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16244	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16245	    tg3_flag(tp, 5755_PLUS) ||
16246	    tg3_flag(tp, 5780_CLASS))
16247		tg3_flag_set(tp, 5750_PLUS);
16248
16249	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16250	    tg3_flag(tp, 5750_PLUS))
16251		tg3_flag_set(tp, 5705_PLUS);
16252}
16253
16254static bool tg3_10_100_only_device(struct tg3 *tp,
16255				   const struct pci_device_id *ent)
16256{
16257	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16258
16259	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16260	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16261	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16262		return true;
16263
16264	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16265		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16266			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16267				return true;
16268		} else {
16269			return true;
16270		}
16271	}
16272
16273	return false;
16274}
16275
16276static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16277{
16278	u32 misc_ctrl_reg;
16279	u32 pci_state_reg, grc_misc_cfg;
16280	u32 val;
16281	u16 pci_cmd;
16282	int err;
16283
16284	/* Force memory write invalidate off.  If we leave it on,
16285	 * then on 5700_BX chips we have to enable a workaround.
16286	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16287	 * to match the cacheline size.  The Broadcom driver have this
16288	 * workaround but turns MWI off all the times so never uses
16289	 * it.  This seems to suggest that the workaround is insufficient.
16290	 */
16291	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16292	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16293	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16294
16295	/* Important! -- Make sure register accesses are byteswapped
16296	 * correctly.  Also, for those chips that require it, make
16297	 * sure that indirect register accesses are enabled before
16298	 * the first operation.
16299	 */
16300	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16301			      &misc_ctrl_reg);
16302	tp->misc_host_ctrl |= (misc_ctrl_reg &
16303			       MISC_HOST_CTRL_CHIPREV);
16304	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16305			       tp->misc_host_ctrl);
16306
16307	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16308
16309	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16310	 * we need to disable memory and use config. cycles
16311	 * only to access all registers. The 5702/03 chips
16312	 * can mistakenly decode the special cycles from the
16313	 * ICH chipsets as memory write cycles, causing corruption
16314	 * of register and memory space. Only certain ICH bridges
16315	 * will drive special cycles with non-zero data during the
16316	 * address phase which can fall within the 5703's address
16317	 * range. This is not an ICH bug as the PCI spec allows
16318	 * non-zero address during special cycles. However, only
16319	 * these ICH bridges are known to drive non-zero addresses
16320	 * during special cycles.
16321	 *
16322	 * Since special cycles do not cross PCI bridges, we only
16323	 * enable this workaround if the 5703 is on the secondary
16324	 * bus of these ICH bridges.
16325	 */
16326	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16327	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16328		static struct tg3_dev_id {
16329			u32	vendor;
16330			u32	device;
16331			u32	rev;
16332		} ich_chipsets[] = {
16333			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16334			  PCI_ANY_ID },
16335			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16336			  PCI_ANY_ID },
16337			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16338			  0xa },
16339			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16340			  PCI_ANY_ID },
16341			{ },
16342		};
16343		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16344		struct pci_dev *bridge = NULL;
16345
16346		while (pci_id->vendor != 0) {
16347			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16348						bridge);
16349			if (!bridge) {
16350				pci_id++;
16351				continue;
16352			}
16353			if (pci_id->rev != PCI_ANY_ID) {
16354				if (bridge->revision > pci_id->rev)
16355					continue;
16356			}
16357			if (bridge->subordinate &&
16358			    (bridge->subordinate->number ==
16359			     tp->pdev->bus->number)) {
16360				tg3_flag_set(tp, ICH_WORKAROUND);
16361				pci_dev_put(bridge);
16362				break;
16363			}
16364		}
16365	}
16366
16367	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16368		static struct tg3_dev_id {
16369			u32	vendor;
16370			u32	device;
16371		} bridge_chipsets[] = {
16372			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16373			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16374			{ },
16375		};
16376		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16377		struct pci_dev *bridge = NULL;
16378
16379		while (pci_id->vendor != 0) {
16380			bridge = pci_get_device(pci_id->vendor,
16381						pci_id->device,
16382						bridge);
16383			if (!bridge) {
16384				pci_id++;
16385				continue;
16386			}
16387			if (bridge->subordinate &&
16388			    (bridge->subordinate->number <=
16389			     tp->pdev->bus->number) &&
16390			    (bridge->subordinate->busn_res.end >=
16391			     tp->pdev->bus->number)) {
16392				tg3_flag_set(tp, 5701_DMA_BUG);
16393				pci_dev_put(bridge);
16394				break;
16395			}
16396		}
16397	}
16398
16399	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16400	 * DMA addresses > 40-bit. This bridge may have other additional
16401	 * 57xx devices behind it in some 4-port NIC designs for example.
16402	 * Any tg3 device found behind the bridge will also need the 40-bit
16403	 * DMA workaround.
16404	 */
16405	if (tg3_flag(tp, 5780_CLASS)) {
16406		tg3_flag_set(tp, 40BIT_DMA_BUG);
16407		tp->msi_cap = tp->pdev->msi_cap;
16408	} else {
16409		struct pci_dev *bridge = NULL;
16410
16411		do {
16412			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16413						PCI_DEVICE_ID_SERVERWORKS_EPB,
16414						bridge);
16415			if (bridge && bridge->subordinate &&
16416			    (bridge->subordinate->number <=
16417			     tp->pdev->bus->number) &&
16418			    (bridge->subordinate->busn_res.end >=
16419			     tp->pdev->bus->number)) {
16420				tg3_flag_set(tp, 40BIT_DMA_BUG);
16421				pci_dev_put(bridge);
16422				break;
16423			}
16424		} while (bridge);
16425	}
16426
16427	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16428	    tg3_asic_rev(tp) == ASIC_REV_5714)
16429		tp->pdev_peer = tg3_find_peer(tp);
16430
16431	/* Determine TSO capabilities */
16432	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16433		; /* Do nothing. HW bug. */
16434	else if (tg3_flag(tp, 57765_PLUS))
16435		tg3_flag_set(tp, HW_TSO_3);
16436	else if (tg3_flag(tp, 5755_PLUS) ||
16437		 tg3_asic_rev(tp) == ASIC_REV_5906)
16438		tg3_flag_set(tp, HW_TSO_2);
16439	else if (tg3_flag(tp, 5750_PLUS)) {
16440		tg3_flag_set(tp, HW_TSO_1);
16441		tg3_flag_set(tp, TSO_BUG);
16442		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16443		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16444			tg3_flag_clear(tp, TSO_BUG);
16445	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16446		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16447		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16448		tg3_flag_set(tp, FW_TSO);
16449		tg3_flag_set(tp, TSO_BUG);
16450		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16451			tp->fw_needed = FIRMWARE_TG3TSO5;
16452		else
16453			tp->fw_needed = FIRMWARE_TG3TSO;
16454	}
16455
16456	/* Selectively allow TSO based on operating conditions */
16457	if (tg3_flag(tp, HW_TSO_1) ||
16458	    tg3_flag(tp, HW_TSO_2) ||
16459	    tg3_flag(tp, HW_TSO_3) ||
16460	    tg3_flag(tp, FW_TSO)) {
16461		/* For firmware TSO, assume ASF is disabled.
16462		 * We'll disable TSO later if we discover ASF
16463		 * is enabled in tg3_get_eeprom_hw_cfg().
16464		 */
16465		tg3_flag_set(tp, TSO_CAPABLE);
16466	} else {
16467		tg3_flag_clear(tp, TSO_CAPABLE);
16468		tg3_flag_clear(tp, TSO_BUG);
16469		tp->fw_needed = NULL;
16470	}
16471
16472	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16473		tp->fw_needed = FIRMWARE_TG3;
16474
16475	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16476		tp->fw_needed = FIRMWARE_TG357766;
16477
16478	tp->irq_max = 1;
16479
16480	if (tg3_flag(tp, 5750_PLUS)) {
16481		tg3_flag_set(tp, SUPPORT_MSI);
16482		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16483		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16484		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16485		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16486		     tp->pdev_peer == tp->pdev))
16487			tg3_flag_clear(tp, SUPPORT_MSI);
16488
16489		if (tg3_flag(tp, 5755_PLUS) ||
16490		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16491			tg3_flag_set(tp, 1SHOT_MSI);
16492		}
16493
16494		if (tg3_flag(tp, 57765_PLUS)) {
16495			tg3_flag_set(tp, SUPPORT_MSIX);
16496			tp->irq_max = TG3_IRQ_MAX_VECS;
16497		}
16498	}
16499
16500	tp->txq_max = 1;
16501	tp->rxq_max = 1;
16502	if (tp->irq_max > 1) {
16503		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16504		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16505
16506		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16507		    tg3_asic_rev(tp) == ASIC_REV_5720)
16508			tp->txq_max = tp->irq_max - 1;
16509	}
16510
16511	if (tg3_flag(tp, 5755_PLUS) ||
16512	    tg3_asic_rev(tp) == ASIC_REV_5906)
16513		tg3_flag_set(tp, SHORT_DMA_BUG);
16514
16515	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16516		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16517
16518	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16519	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16520	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16521	    tg3_asic_rev(tp) == ASIC_REV_5762)
16522		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16523
16524	if (tg3_flag(tp, 57765_PLUS) &&
16525	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16526		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16527
16528	if (!tg3_flag(tp, 5705_PLUS) ||
16529	    tg3_flag(tp, 5780_CLASS) ||
16530	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16531		tg3_flag_set(tp, JUMBO_CAPABLE);
16532
16533	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16534			      &pci_state_reg);
16535
16536	if (pci_is_pcie(tp->pdev)) {
16537		u16 lnkctl;
16538
16539		tg3_flag_set(tp, PCI_EXPRESS);
16540
16541		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16542		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16543			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16544				tg3_flag_clear(tp, HW_TSO_2);
16545				tg3_flag_clear(tp, TSO_CAPABLE);
16546			}
16547			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16548			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16549			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16550			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16551				tg3_flag_set(tp, CLKREQ_BUG);
16552		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16553			tg3_flag_set(tp, L1PLLPD_EN);
16554		}
16555	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16556		/* BCM5785 devices are effectively PCIe devices, and should
16557		 * follow PCIe codepaths, but do not have a PCIe capabilities
16558		 * section.
16559		 */
16560		tg3_flag_set(tp, PCI_EXPRESS);
16561	} else if (!tg3_flag(tp, 5705_PLUS) ||
16562		   tg3_flag(tp, 5780_CLASS)) {
16563		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16564		if (!tp->pcix_cap) {
16565			dev_err(&tp->pdev->dev,
16566				"Cannot find PCI-X capability, aborting\n");
16567			return -EIO;
16568		}
16569
16570		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16571			tg3_flag_set(tp, PCIX_MODE);
16572	}
16573
16574	/* If we have an AMD 762 or VIA K8T800 chipset, write
16575	 * reordering to the mailbox registers done by the host
16576	 * controller can cause major troubles.  We read back from
16577	 * every mailbox register write to force the writes to be
16578	 * posted to the chip in order.
16579	 */
16580	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16581	    !tg3_flag(tp, PCI_EXPRESS))
16582		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16583
16584	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16585			     &tp->pci_cacheline_sz);
16586	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16587			     &tp->pci_lat_timer);
16588	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16589	    tp->pci_lat_timer < 64) {
16590		tp->pci_lat_timer = 64;
16591		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16592				      tp->pci_lat_timer);
16593	}
16594
16595	/* Important! -- It is critical that the PCI-X hw workaround
16596	 * situation is decided before the first MMIO register access.
16597	 */
16598	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16599		/* 5700 BX chips need to have their TX producer index
16600		 * mailboxes written twice to workaround a bug.
16601		 */
16602		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16603
16604		/* If we are in PCI-X mode, enable register write workaround.
16605		 *
16606		 * The workaround is to use indirect register accesses
16607		 * for all chip writes not to mailbox registers.
16608		 */
16609		if (tg3_flag(tp, PCIX_MODE)) {
16610			u32 pm_reg;
16611
16612			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16613
16614			/* The chip can have it's power management PCI config
16615			 * space registers clobbered due to this bug.
16616			 * So explicitly force the chip into D0 here.
16617			 */
16618			pci_read_config_dword(tp->pdev,
16619					      tp->pdev->pm_cap + PCI_PM_CTRL,
16620					      &pm_reg);
16621			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16622			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16623			pci_write_config_dword(tp->pdev,
16624					       tp->pdev->pm_cap + PCI_PM_CTRL,
16625					       pm_reg);
16626
16627			/* Also, force SERR#/PERR# in PCI command. */
16628			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16629			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16630			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16631		}
16632	}
16633
16634	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16635		tg3_flag_set(tp, PCI_HIGH_SPEED);
16636	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16637		tg3_flag_set(tp, PCI_32BIT);
16638
16639	/* Chip-specific fixup from Broadcom driver */
16640	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16641	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16642		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16643		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16644	}
16645
16646	/* Default fast path register access methods */
16647	tp->read32 = tg3_read32;
16648	tp->write32 = tg3_write32;
16649	tp->read32_mbox = tg3_read32;
16650	tp->write32_mbox = tg3_write32;
16651	tp->write32_tx_mbox = tg3_write32;
16652	tp->write32_rx_mbox = tg3_write32;
16653
16654	/* Various workaround register access methods */
16655	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16656		tp->write32 = tg3_write_indirect_reg32;
16657	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16658		 (tg3_flag(tp, PCI_EXPRESS) &&
16659		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16660		/*
16661		 * Back to back register writes can cause problems on these
16662		 * chips, the workaround is to read back all reg writes
16663		 * except those to mailbox regs.
16664		 *
16665		 * See tg3_write_indirect_reg32().
16666		 */
16667		tp->write32 = tg3_write_flush_reg32;
16668	}
16669
16670	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16671		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16672		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16673			tp->write32_rx_mbox = tg3_write_flush_reg32;
16674	}
16675
16676	if (tg3_flag(tp, ICH_WORKAROUND)) {
16677		tp->read32 = tg3_read_indirect_reg32;
16678		tp->write32 = tg3_write_indirect_reg32;
16679		tp->read32_mbox = tg3_read_indirect_mbox;
16680		tp->write32_mbox = tg3_write_indirect_mbox;
16681		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16682		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16683
16684		iounmap(tp->regs);
16685		tp->regs = NULL;
16686
16687		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16688		pci_cmd &= ~PCI_COMMAND_MEMORY;
16689		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16690	}
16691	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16692		tp->read32_mbox = tg3_read32_mbox_5906;
16693		tp->write32_mbox = tg3_write32_mbox_5906;
16694		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16695		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16696	}
16697
16698	if (tp->write32 == tg3_write_indirect_reg32 ||
16699	    (tg3_flag(tp, PCIX_MODE) &&
16700	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16701	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16702		tg3_flag_set(tp, SRAM_USE_CONFIG);
16703
16704	/* The memory arbiter has to be enabled in order for SRAM accesses
16705	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16706	 * sure it is enabled, but other entities such as system netboot
16707	 * code might disable it.
16708	 */
16709	val = tr32(MEMARB_MODE);
16710	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16711
16712	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16713	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16714	    tg3_flag(tp, 5780_CLASS)) {
16715		if (tg3_flag(tp, PCIX_MODE)) {
16716			pci_read_config_dword(tp->pdev,
16717					      tp->pcix_cap + PCI_X_STATUS,
16718					      &val);
16719			tp->pci_fn = val & 0x7;
16720		}
16721	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16722		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16723		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16724		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16725		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16726			val = tr32(TG3_CPMU_STATUS);
16727
16728		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16729			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16730		else
16731			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16732				     TG3_CPMU_STATUS_FSHFT_5719;
16733	}
16734
16735	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16736		tp->write32_tx_mbox = tg3_write_flush_reg32;
16737		tp->write32_rx_mbox = tg3_write_flush_reg32;
16738	}
16739
16740	/* Get eeprom hw config before calling tg3_set_power_state().
16741	 * In particular, the TG3_FLAG_IS_NIC flag must be
16742	 * determined before calling tg3_set_power_state() so that
16743	 * we know whether or not to switch out of Vaux power.
16744	 * When the flag is set, it means that GPIO1 is used for eeprom
16745	 * write protect and also implies that it is a LOM where GPIOs
16746	 * are not used to switch power.
16747	 */
16748	tg3_get_eeprom_hw_cfg(tp);
16749
16750	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16751		tg3_flag_clear(tp, TSO_CAPABLE);
16752		tg3_flag_clear(tp, TSO_BUG);
16753		tp->fw_needed = NULL;
16754	}
16755
16756	if (tg3_flag(tp, ENABLE_APE)) {
16757		/* Allow reads and writes to the
16758		 * APE register and memory space.
16759		 */
16760		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16761				 PCISTATE_ALLOW_APE_SHMEM_WR |
16762				 PCISTATE_ALLOW_APE_PSPACE_WR;
16763		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16764				       pci_state_reg);
16765
16766		tg3_ape_lock_init(tp);
16767		tp->ape_hb_interval =
16768			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16769	}
16770
16771	/* Set up tp->grc_local_ctrl before calling
16772	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16773	 * will bring 5700's external PHY out of reset.
16774	 * It is also used as eeprom write protect on LOMs.
16775	 */
16776	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16777	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16778	    tg3_flag(tp, EEPROM_WRITE_PROT))
16779		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16780				       GRC_LCLCTRL_GPIO_OUTPUT1);
16781	/* Unused GPIO3 must be driven as output on 5752 because there
16782	 * are no pull-up resistors on unused GPIO pins.
16783	 */
16784	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16785		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16786
16787	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16788	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16789	    tg3_flag(tp, 57765_CLASS))
16790		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16791
16792	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16793	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16794		/* Turn off the debug UART. */
16795		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16796		if (tg3_flag(tp, IS_NIC))
16797			/* Keep VMain power. */
16798			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16799					      GRC_LCLCTRL_GPIO_OUTPUT0;
16800	}
16801
16802	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16803		tp->grc_local_ctrl |=
16804			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16805
16806	/* Switch out of Vaux if it is a NIC */
16807	tg3_pwrsrc_switch_to_vmain(tp);
16808
16809	/* Derive initial jumbo mode from MTU assigned in
16810	 * ether_setup() via the alloc_etherdev() call
16811	 */
16812	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16813		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16814
16815	/* Determine WakeOnLan speed to use. */
16816	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16817	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16818	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16819	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16820		tg3_flag_clear(tp, WOL_SPEED_100MB);
16821	} else {
16822		tg3_flag_set(tp, WOL_SPEED_100MB);
16823	}
16824
16825	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16826		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16827
16828	/* A few boards don't want Ethernet@WireSpeed phy feature */
16829	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16830	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16831	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16832	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16833	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16834	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16835		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16836
16837	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16838	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16839		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16840	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16841		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16842
16843	if (tg3_flag(tp, 5705_PLUS) &&
16844	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16845	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16846	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16847	    !tg3_flag(tp, 57765_PLUS)) {
16848		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16849		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16850		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16851		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16852			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16853			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16854				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16855			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16856				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16857		} else
16858			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16859	}
16860
16861	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16862	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16863		tp->phy_otp = tg3_read_otp_phycfg(tp);
16864		if (tp->phy_otp == 0)
16865			tp->phy_otp = TG3_OTP_DEFAULT;
16866	}
16867
16868	if (tg3_flag(tp, CPMU_PRESENT))
16869		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16870	else
16871		tp->mi_mode = MAC_MI_MODE_BASE;
16872
16873	tp->coalesce_mode = 0;
16874	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16875	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16876		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16877
16878	/* Set these bits to enable statistics workaround. */
16879	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16880	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16881	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16882	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16883		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16884		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16885	}
16886
16887	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16888	    tg3_asic_rev(tp) == ASIC_REV_57780)
16889		tg3_flag_set(tp, USE_PHYLIB);
16890
16891	err = tg3_mdio_init(tp);
16892	if (err)
16893		return err;
16894
16895	/* Initialize data/descriptor byte/word swapping. */
16896	val = tr32(GRC_MODE);
16897	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16898	    tg3_asic_rev(tp) == ASIC_REV_5762)
16899		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16900			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16901			GRC_MODE_B2HRX_ENABLE |
16902			GRC_MODE_HTX2B_ENABLE |
16903			GRC_MODE_HOST_STACKUP);
16904	else
16905		val &= GRC_MODE_HOST_STACKUP;
16906
16907	tw32(GRC_MODE, val | tp->grc_mode);
16908
16909	tg3_switch_clocks(tp);
16910
16911	/* Clear this out for sanity. */
16912	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16913
16914	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16915	tw32(TG3PCI_REG_BASE_ADDR, 0);
16916
16917	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16918			      &pci_state_reg);
16919	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16920	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16921		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16922		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16923		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16924		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16925			void __iomem *sram_base;
16926
16927			/* Write some dummy words into the SRAM status block
16928			 * area, see if it reads back correctly.  If the return
16929			 * value is bad, force enable the PCIX workaround.
16930			 */
16931			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16932
16933			writel(0x00000000, sram_base);
16934			writel(0x00000000, sram_base + 4);
16935			writel(0xffffffff, sram_base + 4);
16936			if (readl(sram_base) != 0x00000000)
16937				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16938		}
16939	}
16940
16941	udelay(50);
16942	tg3_nvram_init(tp);
16943
16944	/* If the device has an NVRAM, no need to load patch firmware */
16945	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16946	    !tg3_flag(tp, NO_NVRAM))
16947		tp->fw_needed = NULL;
16948
16949	grc_misc_cfg = tr32(GRC_MISC_CFG);
16950	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16951
16952	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16953	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16954	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16955		tg3_flag_set(tp, IS_5788);
16956
16957	if (!tg3_flag(tp, IS_5788) &&
16958	    tg3_asic_rev(tp) != ASIC_REV_5700)
16959		tg3_flag_set(tp, TAGGED_STATUS);
16960	if (tg3_flag(tp, TAGGED_STATUS)) {
16961		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16962				      HOSTCC_MODE_CLRTICK_TXBD);
16963
16964		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16965		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16966				       tp->misc_host_ctrl);
16967	}
16968
16969	/* Preserve the APE MAC_MODE bits */
16970	if (tg3_flag(tp, ENABLE_APE))
16971		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16972	else
16973		tp->mac_mode = 0;
16974
16975	if (tg3_10_100_only_device(tp, ent))
16976		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16977
16978	err = tg3_phy_probe(tp);
16979	if (err) {
16980		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16981		/* ... but do not return immediately ... */
16982		tg3_mdio_fini(tp);
16983	}
16984
16985	tg3_read_vpd(tp);
16986	tg3_read_fw_ver(tp);
16987
16988	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16989		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16990	} else {
16991		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16992			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16993		else
16994			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16995	}
16996
16997	/* 5700 {AX,BX} chips have a broken status block link
16998	 * change bit implementation, so we must use the
16999	 * status register in those cases.
17000	 */
17001	if (tg3_asic_rev(tp) == ASIC_REV_5700)
17002		tg3_flag_set(tp, USE_LINKCHG_REG);
17003	else
17004		tg3_flag_clear(tp, USE_LINKCHG_REG);
17005
17006	/* The led_ctrl is set during tg3_phy_probe, here we might
17007	 * have to force the link status polling mechanism based
17008	 * upon subsystem IDs.
17009	 */
17010	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
17011	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
17012	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
17013		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17014		tg3_flag_set(tp, USE_LINKCHG_REG);
17015	}
17016
17017	/* For all SERDES we poll the MAC status register. */
17018	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
17019		tg3_flag_set(tp, POLL_SERDES);
17020	else
17021		tg3_flag_clear(tp, POLL_SERDES);
17022
17023	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17024		tg3_flag_set(tp, POLL_CPMU_LINK);
17025
17026	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17027	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17028	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17029	    tg3_flag(tp, PCIX_MODE)) {
17030		tp->rx_offset = NET_SKB_PAD;
17031#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17032		tp->rx_copy_thresh = ~(u16)0;
17033#endif
17034	}
17035
17036	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17037	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17038	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17039
17040	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17041
17042	/* Increment the rx prod index on the rx std ring by at most
17043	 * 8 for these chips to workaround hw errata.
17044	 */
17045	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17046	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
17047	    tg3_asic_rev(tp) == ASIC_REV_5755)
17048		tp->rx_std_max_post = 8;
17049
17050	if (tg3_flag(tp, ASPM_WORKAROUND))
17051		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17052				     PCIE_PWR_MGMT_L1_THRESH_MSK;
17053
17054	return err;
17055}
17056
17057static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17058{
17059	u32 hi, lo, mac_offset;
17060	int addr_ok = 0;
17061	int err;
17062
17063	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17064		return 0;
17065
17066	if (tg3_flag(tp, IS_SSB_CORE)) {
17067		err = ssb_gige_get_macaddr(tp->pdev, addr);
17068		if (!err && is_valid_ether_addr(addr))
17069			return 0;
17070	}
17071
17072	mac_offset = 0x7c;
17073	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17074	    tg3_flag(tp, 5780_CLASS)) {
17075		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17076			mac_offset = 0xcc;
17077		if (tg3_nvram_lock(tp))
17078			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17079		else
17080			tg3_nvram_unlock(tp);
17081	} else if (tg3_flag(tp, 5717_PLUS)) {
17082		if (tp->pci_fn & 1)
17083			mac_offset = 0xcc;
17084		if (tp->pci_fn > 1)
17085			mac_offset += 0x18c;
17086	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17087		mac_offset = 0x10;
17088
17089	/* First try to get it from MAC address mailbox. */
17090	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17091	if ((hi >> 16) == 0x484b) {
17092		addr[0] = (hi >>  8) & 0xff;
17093		addr[1] = (hi >>  0) & 0xff;
17094
17095		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17096		addr[2] = (lo >> 24) & 0xff;
17097		addr[3] = (lo >> 16) & 0xff;
17098		addr[4] = (lo >>  8) & 0xff;
17099		addr[5] = (lo >>  0) & 0xff;
17100
17101		/* Some old bootcode may report a 0 MAC address in SRAM */
17102		addr_ok = is_valid_ether_addr(addr);
17103	}
17104	if (!addr_ok) {
17105		__be32 be_hi, be_lo;
17106
17107		/* Next, try NVRAM. */
17108		if (!tg3_flag(tp, NO_NVRAM) &&
17109		    !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17110		    !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17111			memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17112			memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17113		}
17114		/* Finally just fetch it out of the MAC control regs. */
17115		else {
17116			hi = tr32(MAC_ADDR_0_HIGH);
17117			lo = tr32(MAC_ADDR_0_LOW);
17118
17119			addr[5] = lo & 0xff;
17120			addr[4] = (lo >> 8) & 0xff;
17121			addr[3] = (lo >> 16) & 0xff;
17122			addr[2] = (lo >> 24) & 0xff;
17123			addr[1] = hi & 0xff;
17124			addr[0] = (hi >> 8) & 0xff;
17125		}
17126	}
17127
17128	if (!is_valid_ether_addr(addr))
17129		return -EINVAL;
17130	return 0;
17131}
17132
17133#define BOUNDARY_SINGLE_CACHELINE	1
17134#define BOUNDARY_MULTI_CACHELINE	2
17135
17136static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17137{
17138	int cacheline_size;
17139	u8 byte;
17140	int goal;
17141
17142	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17143	if (byte == 0)
17144		cacheline_size = 1024;
17145	else
17146		cacheline_size = (int) byte * 4;
17147
17148	/* On 5703 and later chips, the boundary bits have no
17149	 * effect.
17150	 */
17151	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17152	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17153	    !tg3_flag(tp, PCI_EXPRESS))
17154		goto out;
17155
17156#if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17157	goal = BOUNDARY_MULTI_CACHELINE;
17158#else
17159#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17160	goal = BOUNDARY_SINGLE_CACHELINE;
17161#else
17162	goal = 0;
17163#endif
17164#endif
17165
17166	if (tg3_flag(tp, 57765_PLUS)) {
17167		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17168		goto out;
17169	}
17170
17171	if (!goal)
17172		goto out;
17173
17174	/* PCI controllers on most RISC systems tend to disconnect
17175	 * when a device tries to burst across a cache-line boundary.
17176	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17177	 *
17178	 * Unfortunately, for PCI-E there are only limited
17179	 * write-side controls for this, and thus for reads
17180	 * we will still get the disconnects.  We'll also waste
17181	 * these PCI cycles for both read and write for chips
17182	 * other than 5700 and 5701 which do not implement the
17183	 * boundary bits.
17184	 */
17185	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17186		switch (cacheline_size) {
17187		case 16:
17188		case 32:
17189		case 64:
17190		case 128:
17191			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17192				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17193					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17194			} else {
17195				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17196					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17197			}
17198			break;
17199
17200		case 256:
17201			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17202				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17203			break;
17204
17205		default:
17206			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17207				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17208			break;
17209		}
17210	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17211		switch (cacheline_size) {
17212		case 16:
17213		case 32:
17214		case 64:
17215			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17216				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17217				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17218				break;
17219			}
17220			fallthrough;
17221		case 128:
17222		default:
17223			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17224			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17225			break;
17226		}
17227	} else {
17228		switch (cacheline_size) {
17229		case 16:
17230			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17231				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17232					DMA_RWCTRL_WRITE_BNDRY_16);
17233				break;
17234			}
17235			fallthrough;
17236		case 32:
17237			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17238				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17239					DMA_RWCTRL_WRITE_BNDRY_32);
17240				break;
17241			}
17242			fallthrough;
17243		case 64:
17244			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17245				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17246					DMA_RWCTRL_WRITE_BNDRY_64);
17247				break;
17248			}
17249			fallthrough;
17250		case 128:
17251			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17252				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17253					DMA_RWCTRL_WRITE_BNDRY_128);
17254				break;
17255			}
17256			fallthrough;
17257		case 256:
17258			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17259				DMA_RWCTRL_WRITE_BNDRY_256);
17260			break;
17261		case 512:
17262			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17263				DMA_RWCTRL_WRITE_BNDRY_512);
17264			break;
17265		case 1024:
17266		default:
17267			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17268				DMA_RWCTRL_WRITE_BNDRY_1024);
17269			break;
17270		}
17271	}
17272
17273out:
17274	return val;
17275}
17276
17277static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17278			   int size, bool to_device)
17279{
17280	struct tg3_internal_buffer_desc test_desc;
17281	u32 sram_dma_descs;
17282	int i, ret;
17283
17284	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17285
17286	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17287	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17288	tw32(RDMAC_STATUS, 0);
17289	tw32(WDMAC_STATUS, 0);
17290
17291	tw32(BUFMGR_MODE, 0);
17292	tw32(FTQ_RESET, 0);
17293
17294	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17295	test_desc.addr_lo = buf_dma & 0xffffffff;
17296	test_desc.nic_mbuf = 0x00002100;
17297	test_desc.len = size;
17298
17299	/*
17300	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17301	 * the *second* time the tg3 driver was getting loaded after an
17302	 * initial scan.
17303	 *
17304	 * Broadcom tells me:
17305	 *   ...the DMA engine is connected to the GRC block and a DMA
17306	 *   reset may affect the GRC block in some unpredictable way...
17307	 *   The behavior of resets to individual blocks has not been tested.
17308	 *
17309	 * Broadcom noted the GRC reset will also reset all sub-components.
17310	 */
17311	if (to_device) {
17312		test_desc.cqid_sqid = (13 << 8) | 2;
17313
17314		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17315		udelay(40);
17316	} else {
17317		test_desc.cqid_sqid = (16 << 8) | 7;
17318
17319		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17320		udelay(40);
17321	}
17322	test_desc.flags = 0x00000005;
17323
17324	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17325		u32 val;
17326
17327		val = *(((u32 *)&test_desc) + i);
17328		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17329				       sram_dma_descs + (i * sizeof(u32)));
17330		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17331	}
17332	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17333
17334	if (to_device)
17335		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17336	else
17337		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17338
17339	ret = -ENODEV;
17340	for (i = 0; i < 40; i++) {
17341		u32 val;
17342
17343		if (to_device)
17344			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17345		else
17346			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17347		if ((val & 0xffff) == sram_dma_descs) {
17348			ret = 0;
17349			break;
17350		}
17351
17352		udelay(100);
17353	}
17354
17355	return ret;
17356}
17357
17358#define TEST_BUFFER_SIZE	0x2000
17359
17360static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17361	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17362	{ },
17363};
17364
17365static int tg3_test_dma(struct tg3 *tp)
17366{
17367	dma_addr_t buf_dma;
17368	u32 *buf, saved_dma_rwctrl;
17369	int ret = 0;
17370
17371	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17372				 &buf_dma, GFP_KERNEL);
17373	if (!buf) {
17374		ret = -ENOMEM;
17375		goto out_nofree;
17376	}
17377
17378	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17379			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17380
17381	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17382
17383	if (tg3_flag(tp, 57765_PLUS))
17384		goto out;
17385
17386	if (tg3_flag(tp, PCI_EXPRESS)) {
17387		/* DMA read watermark not used on PCIE */
17388		tp->dma_rwctrl |= 0x00180000;
17389	} else if (!tg3_flag(tp, PCIX_MODE)) {
17390		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17391		    tg3_asic_rev(tp) == ASIC_REV_5750)
17392			tp->dma_rwctrl |= 0x003f0000;
17393		else
17394			tp->dma_rwctrl |= 0x003f000f;
17395	} else {
17396		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17397		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17398			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17399			u32 read_water = 0x7;
17400
17401			/* If the 5704 is behind the EPB bridge, we can
17402			 * do the less restrictive ONE_DMA workaround for
17403			 * better performance.
17404			 */
17405			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17406			    tg3_asic_rev(tp) == ASIC_REV_5704)
17407				tp->dma_rwctrl |= 0x8000;
17408			else if (ccval == 0x6 || ccval == 0x7)
17409				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17410
17411			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17412				read_water = 4;
17413			/* Set bit 23 to enable PCIX hw bug fix */
17414			tp->dma_rwctrl |=
17415				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17416				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17417				(1 << 23);
17418		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17419			/* 5780 always in PCIX mode */
17420			tp->dma_rwctrl |= 0x00144000;
17421		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17422			/* 5714 always in PCIX mode */
17423			tp->dma_rwctrl |= 0x00148000;
17424		} else {
17425			tp->dma_rwctrl |= 0x001b000f;
17426		}
17427	}
17428	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17429		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17430
17431	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17432	    tg3_asic_rev(tp) == ASIC_REV_5704)
17433		tp->dma_rwctrl &= 0xfffffff0;
17434
17435	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17436	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17437		/* Remove this if it causes problems for some boards. */
17438		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17439
17440		/* On 5700/5701 chips, we need to set this bit.
17441		 * Otherwise the chip will issue cacheline transactions
17442		 * to streamable DMA memory with not all the byte
17443		 * enables turned on.  This is an error on several
17444		 * RISC PCI controllers, in particular sparc64.
17445		 *
17446		 * On 5703/5704 chips, this bit has been reassigned
17447		 * a different meaning.  In particular, it is used
17448		 * on those chips to enable a PCI-X workaround.
17449		 */
17450		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17451	}
17452
17453	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17454
17455
17456	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17457	    tg3_asic_rev(tp) != ASIC_REV_5701)
17458		goto out;
17459
17460	/* It is best to perform DMA test with maximum write burst size
17461	 * to expose the 5700/5701 write DMA bug.
17462	 */
17463	saved_dma_rwctrl = tp->dma_rwctrl;
17464	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17465	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17466
17467	while (1) {
17468		u32 *p = buf, i;
17469
17470		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17471			p[i] = i;
17472
17473		/* Send the buffer to the chip. */
17474		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17475		if (ret) {
17476			dev_err(&tp->pdev->dev,
17477				"%s: Buffer write failed. err = %d\n",
17478				__func__, ret);
17479			break;
17480		}
17481
17482		/* Now read it back. */
17483		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17484		if (ret) {
17485			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17486				"err = %d\n", __func__, ret);
17487			break;
17488		}
17489
17490		/* Verify it. */
17491		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17492			if (p[i] == i)
17493				continue;
17494
17495			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17496			    DMA_RWCTRL_WRITE_BNDRY_16) {
17497				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17498				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17499				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17500				break;
17501			} else {
17502				dev_err(&tp->pdev->dev,
17503					"%s: Buffer corrupted on read back! "
17504					"(%d != %d)\n", __func__, p[i], i);
17505				ret = -ENODEV;
17506				goto out;
17507			}
17508		}
17509
17510		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17511			/* Success. */
17512			ret = 0;
17513			break;
17514		}
17515	}
17516	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17517	    DMA_RWCTRL_WRITE_BNDRY_16) {
17518		/* DMA test passed without adjusting DMA boundary,
17519		 * now look for chipsets that are known to expose the
17520		 * DMA bug without failing the test.
17521		 */
17522		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17523			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17524			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17525		} else {
17526			/* Safe to use the calculated DMA boundary. */
17527			tp->dma_rwctrl = saved_dma_rwctrl;
17528		}
17529
17530		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17531	}
17532
17533out:
17534	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17535out_nofree:
17536	return ret;
17537}
17538
17539static void tg3_init_bufmgr_config(struct tg3 *tp)
17540{
17541	if (tg3_flag(tp, 57765_PLUS)) {
17542		tp->bufmgr_config.mbuf_read_dma_low_water =
17543			DEFAULT_MB_RDMA_LOW_WATER_5705;
17544		tp->bufmgr_config.mbuf_mac_rx_low_water =
17545			DEFAULT_MB_MACRX_LOW_WATER_57765;
17546		tp->bufmgr_config.mbuf_high_water =
17547			DEFAULT_MB_HIGH_WATER_57765;
17548
17549		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17550			DEFAULT_MB_RDMA_LOW_WATER_5705;
17551		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17552			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17553		tp->bufmgr_config.mbuf_high_water_jumbo =
17554			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17555	} else if (tg3_flag(tp, 5705_PLUS)) {
17556		tp->bufmgr_config.mbuf_read_dma_low_water =
17557			DEFAULT_MB_RDMA_LOW_WATER_5705;
17558		tp->bufmgr_config.mbuf_mac_rx_low_water =
17559			DEFAULT_MB_MACRX_LOW_WATER_5705;
17560		tp->bufmgr_config.mbuf_high_water =
17561			DEFAULT_MB_HIGH_WATER_5705;
17562		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17563			tp->bufmgr_config.mbuf_mac_rx_low_water =
17564				DEFAULT_MB_MACRX_LOW_WATER_5906;
17565			tp->bufmgr_config.mbuf_high_water =
17566				DEFAULT_MB_HIGH_WATER_5906;
17567		}
17568
17569		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17570			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17571		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17572			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17573		tp->bufmgr_config.mbuf_high_water_jumbo =
17574			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17575	} else {
17576		tp->bufmgr_config.mbuf_read_dma_low_water =
17577			DEFAULT_MB_RDMA_LOW_WATER;
17578		tp->bufmgr_config.mbuf_mac_rx_low_water =
17579			DEFAULT_MB_MACRX_LOW_WATER;
17580		tp->bufmgr_config.mbuf_high_water =
17581			DEFAULT_MB_HIGH_WATER;
17582
17583		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17584			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17585		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17586			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17587		tp->bufmgr_config.mbuf_high_water_jumbo =
17588			DEFAULT_MB_HIGH_WATER_JUMBO;
17589	}
17590
17591	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17592	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17593}
17594
17595static char *tg3_phy_string(struct tg3 *tp)
17596{
17597	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17598	case TG3_PHY_ID_BCM5400:	return "5400";
17599	case TG3_PHY_ID_BCM5401:	return "5401";
17600	case TG3_PHY_ID_BCM5411:	return "5411";
17601	case TG3_PHY_ID_BCM5701:	return "5701";
17602	case TG3_PHY_ID_BCM5703:	return "5703";
17603	case TG3_PHY_ID_BCM5704:	return "5704";
17604	case TG3_PHY_ID_BCM5705:	return "5705";
17605	case TG3_PHY_ID_BCM5750:	return "5750";
17606	case TG3_PHY_ID_BCM5752:	return "5752";
17607	case TG3_PHY_ID_BCM5714:	return "5714";
17608	case TG3_PHY_ID_BCM5780:	return "5780";
17609	case TG3_PHY_ID_BCM5755:	return "5755";
17610	case TG3_PHY_ID_BCM5787:	return "5787";
17611	case TG3_PHY_ID_BCM5784:	return "5784";
17612	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17613	case TG3_PHY_ID_BCM5906:	return "5906";
17614	case TG3_PHY_ID_BCM5761:	return "5761";
17615	case TG3_PHY_ID_BCM5718C:	return "5718C";
17616	case TG3_PHY_ID_BCM5718S:	return "5718S";
17617	case TG3_PHY_ID_BCM57765:	return "57765";
17618	case TG3_PHY_ID_BCM5719C:	return "5719C";
17619	case TG3_PHY_ID_BCM5720C:	return "5720C";
17620	case TG3_PHY_ID_BCM5762:	return "5762C";
17621	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17622	case 0:			return "serdes";
17623	default:		return "unknown";
17624	}
17625}
17626
17627static char *tg3_bus_string(struct tg3 *tp, char *str)
17628{
17629	if (tg3_flag(tp, PCI_EXPRESS)) {
17630		strcpy(str, "PCI Express");
17631		return str;
17632	} else if (tg3_flag(tp, PCIX_MODE)) {
17633		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17634
17635		strcpy(str, "PCIX:");
17636
17637		if ((clock_ctrl == 7) ||
17638		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17639		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17640			strcat(str, "133MHz");
17641		else if (clock_ctrl == 0)
17642			strcat(str, "33MHz");
17643		else if (clock_ctrl == 2)
17644			strcat(str, "50MHz");
17645		else if (clock_ctrl == 4)
17646			strcat(str, "66MHz");
17647		else if (clock_ctrl == 6)
17648			strcat(str, "100MHz");
17649	} else {
17650		strcpy(str, "PCI:");
17651		if (tg3_flag(tp, PCI_HIGH_SPEED))
17652			strcat(str, "66MHz");
17653		else
17654			strcat(str, "33MHz");
17655	}
17656	if (tg3_flag(tp, PCI_32BIT))
17657		strcat(str, ":32-bit");
17658	else
17659		strcat(str, ":64-bit");
17660	return str;
17661}
17662
17663static void tg3_init_coal(struct tg3 *tp)
17664{
17665	struct ethtool_coalesce *ec = &tp->coal;
17666
17667	memset(ec, 0, sizeof(*ec));
17668	ec->cmd = ETHTOOL_GCOALESCE;
17669	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17670	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17671	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17672	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17673	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17674	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17675	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17676	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17677	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17678
17679	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17680				 HOSTCC_MODE_CLRTICK_TXBD)) {
17681		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17682		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17683		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17684		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17685	}
17686
17687	if (tg3_flag(tp, 5705_PLUS)) {
17688		ec->rx_coalesce_usecs_irq = 0;
17689		ec->tx_coalesce_usecs_irq = 0;
17690		ec->stats_block_coalesce_usecs = 0;
17691	}
17692}
17693
17694static int tg3_init_one(struct pci_dev *pdev,
17695				  const struct pci_device_id *ent)
17696{
17697	struct net_device *dev;
17698	struct tg3 *tp;
17699	int i, err;
17700	u32 sndmbx, rcvmbx, intmbx;
17701	char str[40];
17702	u64 dma_mask, persist_dma_mask;
17703	netdev_features_t features = 0;
17704	u8 addr[ETH_ALEN] __aligned(2);
17705
17706	err = pci_enable_device(pdev);
17707	if (err) {
17708		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17709		return err;
17710	}
17711
17712	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17713	if (err) {
17714		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17715		goto err_out_disable_pdev;
17716	}
17717
17718	pci_set_master(pdev);
17719
17720	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17721	if (!dev) {
17722		err = -ENOMEM;
17723		goto err_out_free_res;
17724	}
17725
17726	SET_NETDEV_DEV(dev, &pdev->dev);
17727
17728	tp = netdev_priv(dev);
17729	tp->pdev = pdev;
17730	tp->dev = dev;
17731	tp->rx_mode = TG3_DEF_RX_MODE;
17732	tp->tx_mode = TG3_DEF_TX_MODE;
17733	tp->irq_sync = 1;
17734	tp->pcierr_recovery = false;
17735
17736	if (tg3_debug > 0)
17737		tp->msg_enable = tg3_debug;
17738	else
17739		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17740
17741	if (pdev_is_ssb_gige_core(pdev)) {
17742		tg3_flag_set(tp, IS_SSB_CORE);
17743		if (ssb_gige_must_flush_posted_writes(pdev))
17744			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17745		if (ssb_gige_one_dma_at_once(pdev))
17746			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17747		if (ssb_gige_have_roboswitch(pdev)) {
17748			tg3_flag_set(tp, USE_PHYLIB);
17749			tg3_flag_set(tp, ROBOSWITCH);
17750		}
17751		if (ssb_gige_is_rgmii(pdev))
17752			tg3_flag_set(tp, RGMII_MODE);
17753	}
17754
17755	/* The word/byte swap controls here control register access byte
17756	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17757	 * setting below.
17758	 */
17759	tp->misc_host_ctrl =
17760		MISC_HOST_CTRL_MASK_PCI_INT |
17761		MISC_HOST_CTRL_WORD_SWAP |
17762		MISC_HOST_CTRL_INDIR_ACCESS |
17763		MISC_HOST_CTRL_PCISTATE_RW;
17764
17765	/* The NONFRM (non-frame) byte/word swap controls take effect
17766	 * on descriptor entries, anything which isn't packet data.
17767	 *
17768	 * The StrongARM chips on the board (one for tx, one for rx)
17769	 * are running in big-endian mode.
17770	 */
17771	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17772			GRC_MODE_WSWAP_NONFRM_DATA);
17773#ifdef __BIG_ENDIAN
17774	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17775#endif
17776	spin_lock_init(&tp->lock);
17777	spin_lock_init(&tp->indirect_lock);
17778	INIT_WORK(&tp->reset_task, tg3_reset_task);
17779
17780	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17781	if (!tp->regs) {
17782		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17783		err = -ENOMEM;
17784		goto err_out_free_dev;
17785	}
17786
17787	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17788	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17789	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17790	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17791	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17792	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17793	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17794	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17795	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17796	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17797	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17798	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17799	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17800	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17801	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17802		tg3_flag_set(tp, ENABLE_APE);
17803		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17804		if (!tp->aperegs) {
17805			dev_err(&pdev->dev,
17806				"Cannot map APE registers, aborting\n");
17807			err = -ENOMEM;
17808			goto err_out_iounmap;
17809		}
17810	}
17811
17812	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17813	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17814
17815	dev->ethtool_ops = &tg3_ethtool_ops;
17816	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17817	dev->netdev_ops = &tg3_netdev_ops;
17818	dev->irq = pdev->irq;
17819
17820	err = tg3_get_invariants(tp, ent);
17821	if (err) {
17822		dev_err(&pdev->dev,
17823			"Problem fetching invariants of chip, aborting\n");
17824		goto err_out_apeunmap;
17825	}
17826
17827	/* The EPB bridge inside 5714, 5715, and 5780 and any
17828	 * device behind the EPB cannot support DMA addresses > 40-bit.
17829	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17830	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17831	 * do DMA address check in __tg3_start_xmit().
17832	 */
17833	if (tg3_flag(tp, IS_5788))
17834		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17835	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17836		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17837#ifdef CONFIG_HIGHMEM
17838		dma_mask = DMA_BIT_MASK(64);
17839#endif
17840	} else
17841		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17842
17843	if (tg3_asic_rev(tp) == ASIC_REV_57766)
17844		persist_dma_mask = DMA_BIT_MASK(31);
17845
17846	/* Configure DMA attributes. */
17847	if (dma_mask > DMA_BIT_MASK(32)) {
17848		err = dma_set_mask(&pdev->dev, dma_mask);
17849		if (!err) {
17850			features |= NETIF_F_HIGHDMA;
17851			err = dma_set_coherent_mask(&pdev->dev,
17852						    persist_dma_mask);
17853			if (err < 0) {
17854				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17855					"DMA for consistent allocations\n");
17856				goto err_out_apeunmap;
17857			}
17858		}
17859	}
17860	if (err || dma_mask == DMA_BIT_MASK(32)) {
17861		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17862		if (err) {
17863			dev_err(&pdev->dev,
17864				"No usable DMA configuration, aborting\n");
17865			goto err_out_apeunmap;
17866		}
17867	}
17868
17869	tg3_init_bufmgr_config(tp);
17870
17871	/* 5700 B0 chips do not support checksumming correctly due
17872	 * to hardware bugs.
17873	 */
17874	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17875		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17876
17877		if (tg3_flag(tp, 5755_PLUS))
17878			features |= NETIF_F_IPV6_CSUM;
17879	}
17880
17881	/* TSO is on by default on chips that support hardware TSO.
17882	 * Firmware TSO on older chips gives lower performance, so it
17883	 * is off by default, but can be enabled using ethtool.
17884	 */
17885	if ((tg3_flag(tp, HW_TSO_1) ||
17886	     tg3_flag(tp, HW_TSO_2) ||
17887	     tg3_flag(tp, HW_TSO_3)) &&
17888	    (features & NETIF_F_IP_CSUM))
17889		features |= NETIF_F_TSO;
17890	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17891		if (features & NETIF_F_IPV6_CSUM)
17892			features |= NETIF_F_TSO6;
17893		if (tg3_flag(tp, HW_TSO_3) ||
17894		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17895		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17896		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17897		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17898		    tg3_asic_rev(tp) == ASIC_REV_57780)
17899			features |= NETIF_F_TSO_ECN;
17900	}
17901
17902	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17903			 NETIF_F_HW_VLAN_CTAG_RX;
17904	dev->vlan_features |= features;
17905
17906	/*
17907	 * Add loopback capability only for a subset of devices that support
17908	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17909	 * loopback for the remaining devices.
17910	 */
17911	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17912	    !tg3_flag(tp, CPMU_PRESENT))
17913		/* Add the loopback capability */
17914		features |= NETIF_F_LOOPBACK;
17915
17916	dev->hw_features |= features;
17917	dev->priv_flags |= IFF_UNICAST_FLT;
17918
17919	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17920	dev->min_mtu = TG3_MIN_MTU;
17921	dev->max_mtu = TG3_MAX_MTU(tp);
17922
17923	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17924	    !tg3_flag(tp, TSO_CAPABLE) &&
17925	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17926		tg3_flag_set(tp, MAX_RXPEND_64);
17927		tp->rx_pending = 63;
17928	}
17929
17930	err = tg3_get_device_address(tp, addr);
17931	if (err) {
17932		dev_err(&pdev->dev,
17933			"Could not obtain valid ethernet address, aborting\n");
17934		goto err_out_apeunmap;
17935	}
17936	eth_hw_addr_set(dev, addr);
17937
17938	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17939	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17940	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17941	for (i = 0; i < tp->irq_max; i++) {
17942		struct tg3_napi *tnapi = &tp->napi[i];
17943
17944		tnapi->tp = tp;
17945		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17946
17947		tnapi->int_mbox = intmbx;
17948		intmbx += 0x8;
17949
17950		tnapi->consmbox = rcvmbx;
17951		tnapi->prodmbox = sndmbx;
17952
17953		if (i)
17954			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17955		else
17956			tnapi->coal_now = HOSTCC_MODE_NOW;
17957
17958		if (!tg3_flag(tp, SUPPORT_MSIX))
17959			break;
17960
17961		/*
17962		 * If we support MSIX, we'll be using RSS.  If we're using
17963		 * RSS, the first vector only handles link interrupts and the
17964		 * remaining vectors handle rx and tx interrupts.  Reuse the
17965		 * mailbox values for the next iteration.  The values we setup
17966		 * above are still useful for the single vectored mode.
17967		 */
17968		if (!i)
17969			continue;
17970
17971		rcvmbx += 0x8;
17972
17973		if (sndmbx & 0x4)
17974			sndmbx -= 0x4;
17975		else
17976			sndmbx += 0xc;
17977	}
17978
17979	/*
17980	 * Reset chip in case UNDI or EFI driver did not shutdown
17981	 * DMA self test will enable WDMAC and we'll see (spurious)
17982	 * pending DMA on the PCI bus at that point.
17983	 */
17984	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17985	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17986		tg3_full_lock(tp, 0);
17987		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17988		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17989		tg3_full_unlock(tp);
17990	}
17991
17992	err = tg3_test_dma(tp);
17993	if (err) {
17994		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17995		goto err_out_apeunmap;
17996	}
17997
17998	tg3_init_coal(tp);
17999
18000	pci_set_drvdata(pdev, dev);
18001
18002	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
18003	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
18004	    tg3_asic_rev(tp) == ASIC_REV_5762)
18005		tg3_flag_set(tp, PTP_CAPABLE);
18006
18007	tg3_timer_init(tp);
18008
18009	tg3_carrier_off(tp);
18010
18011	err = register_netdev(dev);
18012	if (err) {
18013		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
18014		goto err_out_apeunmap;
18015	}
18016
18017	if (tg3_flag(tp, PTP_CAPABLE)) {
18018		tg3_ptp_init(tp);
18019		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
18020						   &tp->pdev->dev);
18021		if (IS_ERR(tp->ptp_clock))
18022			tp->ptp_clock = NULL;
18023	}
18024
18025	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18026		    tp->board_part_number,
18027		    tg3_chip_rev_id(tp),
18028		    tg3_bus_string(tp, str),
18029		    dev->dev_addr);
18030
18031	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18032		char *ethtype;
18033
18034		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18035			ethtype = "10/100Base-TX";
18036		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18037			ethtype = "1000Base-SX";
18038		else
18039			ethtype = "10/100/1000Base-T";
18040
18041		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18042			    "(WireSpeed[%d], EEE[%d])\n",
18043			    tg3_phy_string(tp), ethtype,
18044			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18045			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18046	}
18047
18048	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18049		    (dev->features & NETIF_F_RXCSUM) != 0,
18050		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
18051		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18052		    tg3_flag(tp, ENABLE_ASF) != 0,
18053		    tg3_flag(tp, TSO_CAPABLE) != 0);
18054	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18055		    tp->dma_rwctrl,
18056		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18057		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18058
18059	pci_save_state(pdev);
18060
18061	return 0;
18062
18063err_out_apeunmap:
18064	if (tp->aperegs) {
18065		iounmap(tp->aperegs);
18066		tp->aperegs = NULL;
18067	}
18068
18069err_out_iounmap:
18070	if (tp->regs) {
18071		iounmap(tp->regs);
18072		tp->regs = NULL;
18073	}
18074
18075err_out_free_dev:
18076	free_netdev(dev);
18077
18078err_out_free_res:
18079	pci_release_regions(pdev);
18080
18081err_out_disable_pdev:
18082	if (pci_is_enabled(pdev))
18083		pci_disable_device(pdev);
18084	return err;
18085}
18086
18087static void tg3_remove_one(struct pci_dev *pdev)
18088{
18089	struct net_device *dev = pci_get_drvdata(pdev);
18090
18091	if (dev) {
18092		struct tg3 *tp = netdev_priv(dev);
18093
18094		tg3_ptp_fini(tp);
18095
18096		release_firmware(tp->fw);
18097
18098		tg3_reset_task_cancel(tp);
18099
18100		if (tg3_flag(tp, USE_PHYLIB)) {
18101			tg3_phy_fini(tp);
18102			tg3_mdio_fini(tp);
18103		}
18104
18105		unregister_netdev(dev);
18106		if (tp->aperegs) {
18107			iounmap(tp->aperegs);
18108			tp->aperegs = NULL;
18109		}
18110		if (tp->regs) {
18111			iounmap(tp->regs);
18112			tp->regs = NULL;
18113		}
18114		free_netdev(dev);
18115		pci_release_regions(pdev);
18116		pci_disable_device(pdev);
18117	}
18118}
18119
18120#ifdef CONFIG_PM_SLEEP
18121static int tg3_suspend(struct device *device)
18122{
18123	struct net_device *dev = dev_get_drvdata(device);
18124	struct tg3 *tp = netdev_priv(dev);
18125
18126	rtnl_lock();
18127
18128	if (!netif_running(dev))
18129		goto unlock;
18130
18131	tg3_reset_task_cancel(tp);
18132	tg3_phy_stop(tp);
18133	tg3_netif_stop(tp);
18134
18135	tg3_timer_stop(tp);
18136
18137	tg3_full_lock(tp, 1);
18138	tg3_disable_ints(tp);
18139	tg3_full_unlock(tp);
18140
18141	netif_device_detach(dev);
18142
18143	tg3_full_lock(tp, 0);
18144	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18145	tg3_flag_clear(tp, INIT_COMPLETE);
18146	tg3_full_unlock(tp);
18147
18148	tg3_power_down_prepare(tp);
18149
18150unlock:
18151	rtnl_unlock();
18152	return 0;
18153}
18154
18155static int tg3_resume(struct device *device)
18156{
18157	struct net_device *dev = dev_get_drvdata(device);
18158	struct tg3 *tp = netdev_priv(dev);
18159	int err = 0;
18160
18161	rtnl_lock();
18162
18163	if (!netif_running(dev))
18164		goto unlock;
18165
18166	netif_device_attach(dev);
18167
18168	tg3_full_lock(tp, 0);
18169
18170	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18171
18172	tg3_flag_set(tp, INIT_COMPLETE);
18173	err = tg3_restart_hw(tp,
18174			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18175	if (err)
18176		goto out;
18177
18178	tg3_timer_start(tp);
18179
18180	tg3_netif_start(tp);
18181
18182out:
18183	tg3_full_unlock(tp);
18184
18185	if (!err)
18186		tg3_phy_start(tp);
18187
18188unlock:
18189	rtnl_unlock();
18190	return err;
18191}
18192#endif /* CONFIG_PM_SLEEP */
18193
18194static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18195
18196/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
18197 * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
18198 * be, powered down.
18199 */
18200static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
18201	{
18202		.matches = {
18203			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18204			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
18205		},
18206	},
18207	{
18208		.matches = {
18209			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18210			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
18211		},
18212	},
18213	{
18214		.matches = {
18215			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18216			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
18217		},
18218	},
18219	{
18220		.matches = {
18221			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18222			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
18223		},
18224	},
18225	{
18226		.matches = {
18227			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18228			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
18229		},
18230	},
18231	{
18232		.matches = {
18233			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18234			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
18235		},
18236	},
18237	{}
18238};
18239
18240static void tg3_shutdown(struct pci_dev *pdev)
18241{
18242	struct net_device *dev = pci_get_drvdata(pdev);
18243	struct tg3 *tp = netdev_priv(dev);
18244
18245	tg3_reset_task_cancel(tp);
18246
18247	rtnl_lock();
18248
18249	netif_device_detach(dev);
18250
18251	if (netif_running(dev))
18252		dev_close(dev);
18253
18254	if (system_state == SYSTEM_POWER_OFF)
18255		tg3_power_down(tp);
18256	else if (system_state == SYSTEM_RESTART &&
18257		 dmi_first_match(tg3_restart_aer_quirk_table) &&
18258		 pdev->current_state != PCI_D3cold &&
18259		 pdev->current_state != PCI_UNKNOWN) {
18260		/* Disable PCIe AER on the tg3 to avoid a fatal
18261		 * error during this system restart.
18262		 */
18263		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
18264					   PCI_EXP_DEVCTL_CERE |
18265					   PCI_EXP_DEVCTL_NFERE |
18266					   PCI_EXP_DEVCTL_FERE |
18267					   PCI_EXP_DEVCTL_URRE);
18268	}
18269
18270	rtnl_unlock();
18271
18272	pci_disable_device(pdev);
18273}
18274
18275/**
18276 * tg3_io_error_detected - called when PCI error is detected
18277 * @pdev: Pointer to PCI device
18278 * @state: The current pci connection state
18279 *
18280 * This function is called after a PCI bus error affecting
18281 * this device has been detected.
18282 */
18283static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18284					      pci_channel_state_t state)
18285{
18286	struct net_device *netdev = pci_get_drvdata(pdev);
18287	struct tg3 *tp = netdev_priv(netdev);
18288	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18289
18290	netdev_info(netdev, "PCI I/O error detected\n");
18291
18292	/* Want to make sure that the reset task doesn't run */
18293	tg3_reset_task_cancel(tp);
18294
18295	rtnl_lock();
18296
18297	/* Could be second call or maybe we don't have netdev yet */
18298	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18299		goto done;
18300
18301	/* We needn't recover from permanent error */
18302	if (state == pci_channel_io_frozen)
18303		tp->pcierr_recovery = true;
18304
18305	tg3_phy_stop(tp);
18306
18307	tg3_netif_stop(tp);
18308
18309	tg3_timer_stop(tp);
18310
18311	netif_device_detach(netdev);
18312
18313	/* Clean up software state, even if MMIO is blocked */
18314	tg3_full_lock(tp, 0);
18315	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18316	tg3_full_unlock(tp);
18317
18318done:
18319	if (state == pci_channel_io_perm_failure) {
18320		if (netdev) {
18321			tg3_napi_enable(tp);
18322			dev_close(netdev);
18323		}
18324		err = PCI_ERS_RESULT_DISCONNECT;
18325	} else {
18326		pci_disable_device(pdev);
18327	}
18328
18329	rtnl_unlock();
18330
18331	return err;
18332}
18333
18334/**
18335 * tg3_io_slot_reset - called after the pci bus has been reset.
18336 * @pdev: Pointer to PCI device
18337 *
18338 * Restart the card from scratch, as if from a cold-boot.
18339 * At this point, the card has experienced a hard reset,
18340 * followed by fixups by BIOS, and has its config space
18341 * set up identically to what it was at cold boot.
18342 */
18343static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18344{
18345	struct net_device *netdev = pci_get_drvdata(pdev);
18346	struct tg3 *tp = netdev_priv(netdev);
18347	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18348	int err;
18349
18350	rtnl_lock();
18351
18352	if (pci_enable_device(pdev)) {
18353		dev_err(&pdev->dev,
18354			"Cannot re-enable PCI device after reset.\n");
18355		goto done;
18356	}
18357
18358	pci_set_master(pdev);
18359	pci_restore_state(pdev);
18360	pci_save_state(pdev);
18361
18362	if (!netdev || !netif_running(netdev)) {
18363		rc = PCI_ERS_RESULT_RECOVERED;
18364		goto done;
18365	}
18366
18367	err = tg3_power_up(tp);
18368	if (err)
18369		goto done;
18370
18371	rc = PCI_ERS_RESULT_RECOVERED;
18372
18373done:
18374	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18375		tg3_napi_enable(tp);
18376		dev_close(netdev);
18377	}
18378	rtnl_unlock();
18379
18380	return rc;
18381}
18382
18383/**
18384 * tg3_io_resume - called when traffic can start flowing again.
18385 * @pdev: Pointer to PCI device
18386 *
18387 * This callback is called when the error recovery driver tells
18388 * us that its OK to resume normal operation.
18389 */
18390static void tg3_io_resume(struct pci_dev *pdev)
18391{
18392	struct net_device *netdev = pci_get_drvdata(pdev);
18393	struct tg3 *tp = netdev_priv(netdev);
18394	int err;
18395
18396	rtnl_lock();
18397
18398	if (!netdev || !netif_running(netdev))
18399		goto done;
18400
18401	tg3_full_lock(tp, 0);
18402	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18403	tg3_flag_set(tp, INIT_COMPLETE);
18404	err = tg3_restart_hw(tp, true);
18405	if (err) {
18406		tg3_full_unlock(tp);
18407		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18408		goto done;
18409	}
18410
18411	netif_device_attach(netdev);
18412
18413	tg3_timer_start(tp);
18414
18415	tg3_netif_start(tp);
18416
18417	tg3_full_unlock(tp);
18418
18419	tg3_phy_start(tp);
18420
18421done:
18422	tp->pcierr_recovery = false;
18423	rtnl_unlock();
18424}
18425
18426static const struct pci_error_handlers tg3_err_handler = {
18427	.error_detected	= tg3_io_error_detected,
18428	.slot_reset	= tg3_io_slot_reset,
18429	.resume		= tg3_io_resume
18430};
18431
18432static struct pci_driver tg3_driver = {
18433	.name		= DRV_MODULE_NAME,
18434	.id_table	= tg3_pci_tbl,
18435	.probe		= tg3_init_one,
18436	.remove		= tg3_remove_one,
18437	.err_handler	= &tg3_err_handler,
18438	.driver.pm	= &tg3_pm_ops,
18439	.shutdown	= tg3_shutdown,
18440};
18441
18442module_pci_driver(tg3_driver);