Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
    1/*
    2 * tg3.c: Broadcom Tigon3 ethernet driver.
    3 *
    4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
    5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
    6 * Copyright (C) 2004 Sun Microsystems Inc.
    7 * Copyright (C) 2005-2012 Broadcom Corporation.
    8 *
    9 * Firmware is:
   10 *	Derived from proprietary unpublished source code,
   11 *	Copyright (C) 2000-2003 Broadcom Corporation.
   12 *
   13 *	Permission is hereby granted for the distribution of this firmware
   14 *	data in hexadecimal or equivalent format, provided this copyright
   15 *	notice is accompanying it.
   16 */
   17
   18
   19#include <linux/module.h>
   20#include <linux/moduleparam.h>
   21#include <linux/stringify.h>
   22#include <linux/kernel.h>
   23#include <linux/types.h>
   24#include <linux/compiler.h>
   25#include <linux/slab.h>
   26#include <linux/delay.h>
   27#include <linux/in.h>
   28#include <linux/init.h>
   29#include <linux/interrupt.h>
   30#include <linux/ioport.h>
   31#include <linux/pci.h>
   32#include <linux/netdevice.h>
   33#include <linux/etherdevice.h>
   34#include <linux/skbuff.h>
   35#include <linux/ethtool.h>
   36#include <linux/mdio.h>
   37#include <linux/mii.h>
   38#include <linux/phy.h>
   39#include <linux/brcmphy.h>
   40#include <linux/if_vlan.h>
   41#include <linux/ip.h>
   42#include <linux/tcp.h>
   43#include <linux/workqueue.h>
   44#include <linux/prefetch.h>
   45#include <linux/dma-mapping.h>
   46#include <linux/firmware.h>
   47
   48#include <net/checksum.h>
   49#include <net/ip.h>
   50
   51#include <linux/io.h>
   52#include <asm/byteorder.h>
   53#include <linux/uaccess.h>
   54
   55#ifdef CONFIG_SPARC
   56#include <asm/idprom.h>
   57#include <asm/prom.h>
   58#endif
   59
   60#define BAR_0	0
   61#define BAR_2	2
   62
   63#include "tg3.h"
   64
   65/* Functions & macros to verify TG3_FLAGS types */
   66
   67static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
   68{
   69	return test_bit(flag, bits);
   70}
   71
   72static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
   73{
   74	set_bit(flag, bits);
   75}
   76
   77static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
   78{
   79	clear_bit(flag, bits);
   80}
   81
   82#define tg3_flag(tp, flag)				\
   83	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
   84#define tg3_flag_set(tp, flag)				\
   85	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
   86#define tg3_flag_clear(tp, flag)			\
   87	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
   88
   89#define DRV_MODULE_NAME		"tg3"
   90#define TG3_MAJ_NUM			3
   91#define TG3_MIN_NUM			123
   92#define DRV_MODULE_VERSION	\
   93	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
   94#define DRV_MODULE_RELDATE	"March 21, 2012"
   95
   96#define RESET_KIND_SHUTDOWN	0
   97#define RESET_KIND_INIT		1
   98#define RESET_KIND_SUSPEND	2
   99
  100#define TG3_DEF_RX_MODE		0
  101#define TG3_DEF_TX_MODE		0
  102#define TG3_DEF_MSG_ENABLE	  \
  103	(NETIF_MSG_DRV		| \
  104	 NETIF_MSG_PROBE	| \
  105	 NETIF_MSG_LINK		| \
  106	 NETIF_MSG_TIMER	| \
  107	 NETIF_MSG_IFDOWN	| \
  108	 NETIF_MSG_IFUP		| \
  109	 NETIF_MSG_RX_ERR	| \
  110	 NETIF_MSG_TX_ERR)
  111
  112#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
  113
  114/* length of time before we decide the hardware is borked,
  115 * and dev->tx_timeout() should be called to fix the problem
  116 */
  117
  118#define TG3_TX_TIMEOUT			(5 * HZ)
  119
  120/* hardware minimum and maximum for a single frame's data payload */
  121#define TG3_MIN_MTU			60
  122#define TG3_MAX_MTU(tp)	\
  123	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
  124
  125/* These numbers seem to be hard coded in the NIC firmware somehow.
  126 * You can't change the ring sizes, but you can change where you place
  127 * them in the NIC onboard memory.
  128 */
  129#define TG3_RX_STD_RING_SIZE(tp) \
  130	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  131	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
  132#define TG3_DEF_RX_RING_PENDING		200
  133#define TG3_RX_JMB_RING_SIZE(tp) \
  134	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
  135	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
  136#define TG3_DEF_RX_JUMBO_RING_PENDING	100
  137
  138/* Do not place this n-ring entries value into the tp struct itself,
  139 * we really want to expose these constants to GCC so that modulo et
  140 * al.  operations are done with shifts and masks instead of with
  141 * hw multiply/modulo instructions.  Another solution would be to
  142 * replace things like '% foo' with '& (foo - 1)'.
  143 */
  144
  145#define TG3_TX_RING_SIZE		512
  146#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
  147
  148#define TG3_RX_STD_RING_BYTES(tp) \
  149	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
  150#define TG3_RX_JMB_RING_BYTES(tp) \
  151	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
  152#define TG3_RX_RCB_RING_BYTES(tp) \
  153	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
  154#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
  155				 TG3_TX_RING_SIZE)
  156#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
  157
  158#define TG3_DMA_BYTE_ENAB		64
  159
  160#define TG3_RX_STD_DMA_SZ		1536
  161#define TG3_RX_JMB_DMA_SZ		9046
  162
  163#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
  164
  165#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
  166#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
  167
  168#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
  169	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
  170
  171#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
  172	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
  173
  174/* Due to a hardware bug, the 5701 can only DMA to memory addresses
  175 * that are at least dword aligned when used in PCIX mode.  The driver
  176 * works around this bug by double copying the packet.  This workaround
  177 * is built into the normal double copy length check for efficiency.
  178 *
  179 * However, the double copy is only necessary on those architectures
  180 * where unaligned memory accesses are inefficient.  For those architectures
  181 * where unaligned memory accesses incur little penalty, we can reintegrate
  182 * the 5701 in the normal rx path.  Doing so saves a device structure
  183 * dereference by hardcoding the double copy threshold in place.
  184 */
  185#define TG3_RX_COPY_THRESHOLD		256
  186#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  187	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
  188#else
  189	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
  190#endif
  191
  192#if (NET_IP_ALIGN != 0)
  193#define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
  194#else
  195#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
  196#endif
  197
  198/* minimum number of free TX descriptors required to wake up TX process */
  199#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
  200#define TG3_TX_BD_DMA_MAX_2K		2048
  201#define TG3_TX_BD_DMA_MAX_4K		4096
  202
  203#define TG3_RAW_IP_ALIGN 2
  204
  205#define TG3_FW_UPDATE_TIMEOUT_SEC	5
  206#define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
  207
  208#define FIRMWARE_TG3		"tigon/tg3.bin"
  209#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
  210#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
  211
  212static char version[] __devinitdata =
  213	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
  214
  215MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
  216MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
  217MODULE_LICENSE("GPL");
  218MODULE_VERSION(DRV_MODULE_VERSION);
  219MODULE_FIRMWARE(FIRMWARE_TG3);
  220MODULE_FIRMWARE(FIRMWARE_TG3TSO);
  221MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
  222
  223static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
  224module_param(tg3_debug, int, 0);
  225MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
  226
  227static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
  228	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
  229	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
  230	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
  231	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
  232	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
  233	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
  234	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
  235	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
  236	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
  237	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
  238	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
  239	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
  240	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
  241	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
  242	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
  243	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
  244	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
  245	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
  246	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
  247	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
  248	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
  249	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
  250	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
  251	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
  252	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
  253	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
  254	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
  255	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
  256	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
  257	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
  258	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
  259	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
  260	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
  261	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
  262	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
  263	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
  264	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
  265	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
  266	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
  267	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
  268	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
  269	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
  270	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
  271	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
  272	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
  273	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
  274	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
  275	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
  276	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
  277	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
  278	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
  279	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
  280	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
  281	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
  282	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
  283	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
  284	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
  285	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
  286	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
  287	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
  288	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
  289	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
  290	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
  291	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
  292	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
  293	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
  294	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
  295	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
  296	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
  297	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
  298	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
  299	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
  300	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
  301	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
  302	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
  303	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
  304	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
  305	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
  306	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
  307	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
  308	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
  309	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
  310	{}
  311};
  312
  313MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
  314
  315static const struct {
  316	const char string[ETH_GSTRING_LEN];
  317} ethtool_stats_keys[] = {
  318	{ "rx_octets" },
  319	{ "rx_fragments" },
  320	{ "rx_ucast_packets" },
  321	{ "rx_mcast_packets" },
  322	{ "rx_bcast_packets" },
  323	{ "rx_fcs_errors" },
  324	{ "rx_align_errors" },
  325	{ "rx_xon_pause_rcvd" },
  326	{ "rx_xoff_pause_rcvd" },
  327	{ "rx_mac_ctrl_rcvd" },
  328	{ "rx_xoff_entered" },
  329	{ "rx_frame_too_long_errors" },
  330	{ "rx_jabbers" },
  331	{ "rx_undersize_packets" },
  332	{ "rx_in_length_errors" },
  333	{ "rx_out_length_errors" },
  334	{ "rx_64_or_less_octet_packets" },
  335	{ "rx_65_to_127_octet_packets" },
  336	{ "rx_128_to_255_octet_packets" },
  337	{ "rx_256_to_511_octet_packets" },
  338	{ "rx_512_to_1023_octet_packets" },
  339	{ "rx_1024_to_1522_octet_packets" },
  340	{ "rx_1523_to_2047_octet_packets" },
  341	{ "rx_2048_to_4095_octet_packets" },
  342	{ "rx_4096_to_8191_octet_packets" },
  343	{ "rx_8192_to_9022_octet_packets" },
  344
  345	{ "tx_octets" },
  346	{ "tx_collisions" },
  347
  348	{ "tx_xon_sent" },
  349	{ "tx_xoff_sent" },
  350	{ "tx_flow_control" },
  351	{ "tx_mac_errors" },
  352	{ "tx_single_collisions" },
  353	{ "tx_mult_collisions" },
  354	{ "tx_deferred" },
  355	{ "tx_excessive_collisions" },
  356	{ "tx_late_collisions" },
  357	{ "tx_collide_2times" },
  358	{ "tx_collide_3times" },
  359	{ "tx_collide_4times" },
  360	{ "tx_collide_5times" },
  361	{ "tx_collide_6times" },
  362	{ "tx_collide_7times" },
  363	{ "tx_collide_8times" },
  364	{ "tx_collide_9times" },
  365	{ "tx_collide_10times" },
  366	{ "tx_collide_11times" },
  367	{ "tx_collide_12times" },
  368	{ "tx_collide_13times" },
  369	{ "tx_collide_14times" },
  370	{ "tx_collide_15times" },
  371	{ "tx_ucast_packets" },
  372	{ "tx_mcast_packets" },
  373	{ "tx_bcast_packets" },
  374	{ "tx_carrier_sense_errors" },
  375	{ "tx_discards" },
  376	{ "tx_errors" },
  377
  378	{ "dma_writeq_full" },
  379	{ "dma_write_prioq_full" },
  380	{ "rxbds_empty" },
  381	{ "rx_discards" },
  382	{ "rx_errors" },
  383	{ "rx_threshold_hit" },
  384
  385	{ "dma_readq_full" },
  386	{ "dma_read_prioq_full" },
  387	{ "tx_comp_queue_full" },
  388
  389	{ "ring_set_send_prod_index" },
  390	{ "ring_status_update" },
  391	{ "nic_irqs" },
  392	{ "nic_avoided_irqs" },
  393	{ "nic_tx_threshold_hit" },
  394
  395	{ "mbuf_lwm_thresh_hit" },
  396};
  397
  398#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
  399
  400
  401static const struct {
  402	const char string[ETH_GSTRING_LEN];
  403} ethtool_test_keys[] = {
  404	{ "nvram test        (online) " },
  405	{ "link test         (online) " },
  406	{ "register test     (offline)" },
  407	{ "memory test       (offline)" },
  408	{ "mac loopback test (offline)" },
  409	{ "phy loopback test (offline)" },
  410	{ "ext loopback test (offline)" },
  411	{ "interrupt test    (offline)" },
  412};
  413
  414#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
  415
  416
  417static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
  418{
  419	writel(val, tp->regs + off);
  420}
  421
  422static u32 tg3_read32(struct tg3 *tp, u32 off)
  423{
  424	return readl(tp->regs + off);
  425}
  426
  427static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
  428{
  429	writel(val, tp->aperegs + off);
  430}
  431
  432static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
  433{
  434	return readl(tp->aperegs + off);
  435}
  436
  437static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
  438{
  439	unsigned long flags;
  440
  441	spin_lock_irqsave(&tp->indirect_lock, flags);
  442	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  443	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  444	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  445}
  446
  447static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
  448{
  449	writel(val, tp->regs + off);
  450	readl(tp->regs + off);
  451}
  452
  453static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
  454{
  455	unsigned long flags;
  456	u32 val;
  457
  458	spin_lock_irqsave(&tp->indirect_lock, flags);
  459	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
  460	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  461	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  462	return val;
  463}
  464
  465static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
  466{
  467	unsigned long flags;
  468
  469	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
  470		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
  471				       TG3_64BIT_REG_LOW, val);
  472		return;
  473	}
  474	if (off == TG3_RX_STD_PROD_IDX_REG) {
  475		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
  476				       TG3_64BIT_REG_LOW, val);
  477		return;
  478	}
  479
  480	spin_lock_irqsave(&tp->indirect_lock, flags);
  481	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  482	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
  483	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  484
  485	/* In indirect mode when disabling interrupts, we also need
  486	 * to clear the interrupt bit in the GRC local ctrl register.
  487	 */
  488	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
  489	    (val == 0x1)) {
  490		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
  491				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
  492	}
  493}
  494
  495static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
  496{
  497	unsigned long flags;
  498	u32 val;
  499
  500	spin_lock_irqsave(&tp->indirect_lock, flags);
  501	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
  502	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
  503	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  504	return val;
  505}
  506
  507/* usec_wait specifies the wait time in usec when writing to certain registers
  508 * where it is unsafe to read back the register without some delay.
  509 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
  510 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
  511 */
  512static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
  513{
  514	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
  515		/* Non-posted methods */
  516		tp->write32(tp, off, val);
  517	else {
  518		/* Posted method */
  519		tg3_write32(tp, off, val);
  520		if (usec_wait)
  521			udelay(usec_wait);
  522		tp->read32(tp, off);
  523	}
  524	/* Wait again after the read for the posted method to guarantee that
  525	 * the wait time is met.
  526	 */
  527	if (usec_wait)
  528		udelay(usec_wait);
  529}
  530
  531static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
  532{
  533	tp->write32_mbox(tp, off, val);
  534	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
  535		tp->read32_mbox(tp, off);
  536}
  537
  538static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
  539{
  540	void __iomem *mbox = tp->regs + off;
  541	writel(val, mbox);
  542	if (tg3_flag(tp, TXD_MBOX_HWBUG))
  543		writel(val, mbox);
  544	if (tg3_flag(tp, MBOX_WRITE_REORDER))
  545		readl(mbox);
  546}
  547
  548static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
  549{
  550	return readl(tp->regs + off + GRCMBOX_BASE);
  551}
  552
  553static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
  554{
  555	writel(val, tp->regs + off + GRCMBOX_BASE);
  556}
  557
  558#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
  559#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
  560#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
  561#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
  562#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
  563
  564#define tw32(reg, val)			tp->write32(tp, reg, val)
  565#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
  566#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
  567#define tr32(reg)			tp->read32(tp, reg)
  568
  569static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
  570{
  571	unsigned long flags;
  572
  573	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  574	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
  575		return;
  576
  577	spin_lock_irqsave(&tp->indirect_lock, flags);
  578	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  579		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  580		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  581
  582		/* Always leave this as zero. */
  583		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  584	} else {
  585		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  586		tw32_f(TG3PCI_MEM_WIN_DATA, val);
  587
  588		/* Always leave this as zero. */
  589		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  590	}
  591	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  592}
  593
  594static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
  595{
  596	unsigned long flags;
  597
  598	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
  599	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
  600		*val = 0;
  601		return;
  602	}
  603
  604	spin_lock_irqsave(&tp->indirect_lock, flags);
  605	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
  606		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
  607		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
  608
  609		/* Always leave this as zero. */
  610		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
  611	} else {
  612		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
  613		*val = tr32(TG3PCI_MEM_WIN_DATA);
  614
  615		/* Always leave this as zero. */
  616		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
  617	}
  618	spin_unlock_irqrestore(&tp->indirect_lock, flags);
  619}
  620
  621static void tg3_ape_lock_init(struct tg3 *tp)
  622{
  623	int i;
  624	u32 regbase, bit;
  625
  626	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  627		regbase = TG3_APE_LOCK_GRANT;
  628	else
  629		regbase = TG3_APE_PER_LOCK_GRANT;
  630
  631	/* Make sure the driver hasn't any stale locks. */
  632	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
  633		switch (i) {
  634		case TG3_APE_LOCK_PHY0:
  635		case TG3_APE_LOCK_PHY1:
  636		case TG3_APE_LOCK_PHY2:
  637		case TG3_APE_LOCK_PHY3:
  638			bit = APE_LOCK_GRANT_DRIVER;
  639			break;
  640		default:
  641			if (!tp->pci_fn)
  642				bit = APE_LOCK_GRANT_DRIVER;
  643			else
  644				bit = 1 << tp->pci_fn;
  645		}
  646		tg3_ape_write32(tp, regbase + 4 * i, bit);
  647	}
  648
  649}
  650
  651static int tg3_ape_lock(struct tg3 *tp, int locknum)
  652{
  653	int i, off;
  654	int ret = 0;
  655	u32 status, req, gnt, bit;
  656
  657	if (!tg3_flag(tp, ENABLE_APE))
  658		return 0;
  659
  660	switch (locknum) {
  661	case TG3_APE_LOCK_GPIO:
  662		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  663			return 0;
  664	case TG3_APE_LOCK_GRC:
  665	case TG3_APE_LOCK_MEM:
  666		if (!tp->pci_fn)
  667			bit = APE_LOCK_REQ_DRIVER;
  668		else
  669			bit = 1 << tp->pci_fn;
  670		break;
  671	default:
  672		return -EINVAL;
  673	}
  674
  675	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
  676		req = TG3_APE_LOCK_REQ;
  677		gnt = TG3_APE_LOCK_GRANT;
  678	} else {
  679		req = TG3_APE_PER_LOCK_REQ;
  680		gnt = TG3_APE_PER_LOCK_GRANT;
  681	}
  682
  683	off = 4 * locknum;
  684
  685	tg3_ape_write32(tp, req + off, bit);
  686
  687	/* Wait for up to 1 millisecond to acquire lock. */
  688	for (i = 0; i < 100; i++) {
  689		status = tg3_ape_read32(tp, gnt + off);
  690		if (status == bit)
  691			break;
  692		udelay(10);
  693	}
  694
  695	if (status != bit) {
  696		/* Revoke the lock request. */
  697		tg3_ape_write32(tp, gnt + off, bit);
  698		ret = -EBUSY;
  699	}
  700
  701	return ret;
  702}
  703
  704static void tg3_ape_unlock(struct tg3 *tp, int locknum)
  705{
  706	u32 gnt, bit;
  707
  708	if (!tg3_flag(tp, ENABLE_APE))
  709		return;
  710
  711	switch (locknum) {
  712	case TG3_APE_LOCK_GPIO:
  713		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  714			return;
  715	case TG3_APE_LOCK_GRC:
  716	case TG3_APE_LOCK_MEM:
  717		if (!tp->pci_fn)
  718			bit = APE_LOCK_GRANT_DRIVER;
  719		else
  720			bit = 1 << tp->pci_fn;
  721		break;
  722	default:
  723		return;
  724	}
  725
  726	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
  727		gnt = TG3_APE_LOCK_GRANT;
  728	else
  729		gnt = TG3_APE_PER_LOCK_GRANT;
  730
  731	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
  732}
  733
  734static void tg3_ape_send_event(struct tg3 *tp, u32 event)
  735{
  736	int i;
  737	u32 apedata;
  738
  739	/* NCSI does not support APE events */
  740	if (tg3_flag(tp, APE_HAS_NCSI))
  741		return;
  742
  743	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
  744	if (apedata != APE_SEG_SIG_MAGIC)
  745		return;
  746
  747	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
  748	if (!(apedata & APE_FW_STATUS_READY))
  749		return;
  750
  751	/* Wait for up to 1 millisecond for APE to service previous event. */
  752	for (i = 0; i < 10; i++) {
  753		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
  754			return;
  755
  756		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
  757
  758		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  759			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
  760					event | APE_EVENT_STATUS_EVENT_PENDING);
  761
  762		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
  763
  764		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  765			break;
  766
  767		udelay(100);
  768	}
  769
  770	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
  771		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
  772}
  773
  774static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
  775{
  776	u32 event;
  777	u32 apedata;
  778
  779	if (!tg3_flag(tp, ENABLE_APE))
  780		return;
  781
  782	switch (kind) {
  783	case RESET_KIND_INIT:
  784		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
  785				APE_HOST_SEG_SIG_MAGIC);
  786		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
  787				APE_HOST_SEG_LEN_MAGIC);
  788		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
  789		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
  790		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
  791			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
  792		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
  793				APE_HOST_BEHAV_NO_PHYLOCK);
  794		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
  795				    TG3_APE_HOST_DRVR_STATE_START);
  796
  797		event = APE_EVENT_STATUS_STATE_START;
  798		break;
  799	case RESET_KIND_SHUTDOWN:
  800		/* With the interface we are currently using,
  801		 * APE does not track driver state.  Wiping
  802		 * out the HOST SEGMENT SIGNATURE forces
  803		 * the APE to assume OS absent status.
  804		 */
  805		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
  806
  807		if (device_may_wakeup(&tp->pdev->dev) &&
  808		    tg3_flag(tp, WOL_ENABLE)) {
  809			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
  810					    TG3_APE_HOST_WOL_SPEED_AUTO);
  811			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
  812		} else
  813			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
  814
  815		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
  816
  817		event = APE_EVENT_STATUS_STATE_UNLOAD;
  818		break;
  819	case RESET_KIND_SUSPEND:
  820		event = APE_EVENT_STATUS_STATE_SUSPEND;
  821		break;
  822	default:
  823		return;
  824	}
  825
  826	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
  827
  828	tg3_ape_send_event(tp, event);
  829}
  830
  831static void tg3_disable_ints(struct tg3 *tp)
  832{
  833	int i;
  834
  835	tw32(TG3PCI_MISC_HOST_CTRL,
  836	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
  837	for (i = 0; i < tp->irq_max; i++)
  838		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
  839}
  840
  841static void tg3_enable_ints(struct tg3 *tp)
  842{
  843	int i;
  844
  845	tp->irq_sync = 0;
  846	wmb();
  847
  848	tw32(TG3PCI_MISC_HOST_CTRL,
  849	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
  850
  851	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
  852	for (i = 0; i < tp->irq_cnt; i++) {
  853		struct tg3_napi *tnapi = &tp->napi[i];
  854
  855		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
  856		if (tg3_flag(tp, 1SHOT_MSI))
  857			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
  858
  859		tp->coal_now |= tnapi->coal_now;
  860	}
  861
  862	/* Force an initial interrupt */
  863	if (!tg3_flag(tp, TAGGED_STATUS) &&
  864	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
  865		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
  866	else
  867		tw32(HOSTCC_MODE, tp->coal_now);
  868
  869	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
  870}
  871
  872static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
  873{
  874	struct tg3 *tp = tnapi->tp;
  875	struct tg3_hw_status *sblk = tnapi->hw_status;
  876	unsigned int work_exists = 0;
  877
  878	/* check for phy events */
  879	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
  880		if (sblk->status & SD_STATUS_LINK_CHG)
  881			work_exists = 1;
  882	}
  883
  884	/* check for TX work to do */
  885	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
  886		work_exists = 1;
  887
  888	/* check for RX work to do */
  889	if (tnapi->rx_rcb_prod_idx &&
  890	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
  891		work_exists = 1;
  892
  893	return work_exists;
  894}
  895
  896/* tg3_int_reenable
  897 *  similar to tg3_enable_ints, but it accurately determines whether there
  898 *  is new work pending and can return without flushing the PIO write
  899 *  which reenables interrupts
  900 */
  901static void tg3_int_reenable(struct tg3_napi *tnapi)
  902{
  903	struct tg3 *tp = tnapi->tp;
  904
  905	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
  906	mmiowb();
  907
  908	/* When doing tagged status, this work check is unnecessary.
  909	 * The last_tag we write above tells the chip which piece of
  910	 * work we've completed.
  911	 */
  912	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
  913		tw32(HOSTCC_MODE, tp->coalesce_mode |
  914		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
  915}
  916
  917static void tg3_switch_clocks(struct tg3 *tp)
  918{
  919	u32 clock_ctrl;
  920	u32 orig_clock_ctrl;
  921
  922	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
  923		return;
  924
  925	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
  926
  927	orig_clock_ctrl = clock_ctrl;
  928	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
  929		       CLOCK_CTRL_CLKRUN_OENABLE |
  930		       0x1f);
  931	tp->pci_clock_ctrl = clock_ctrl;
  932
  933	if (tg3_flag(tp, 5705_PLUS)) {
  934		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
  935			tw32_wait_f(TG3PCI_CLOCK_CTRL,
  936				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
  937		}
  938	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
  939		tw32_wait_f(TG3PCI_CLOCK_CTRL,
  940			    clock_ctrl |
  941			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
  942			    40);
  943		tw32_wait_f(TG3PCI_CLOCK_CTRL,
  944			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
  945			    40);
  946	}
  947	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
  948}
  949
  950#define PHY_BUSY_LOOPS	5000
  951
  952static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
  953{
  954	u32 frame_val;
  955	unsigned int loops;
  956	int ret;
  957
  958	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  959		tw32_f(MAC_MI_MODE,
  960		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
  961		udelay(80);
  962	}
  963
  964	*val = 0x0;
  965
  966	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
  967		      MI_COM_PHY_ADDR_MASK);
  968	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
  969		      MI_COM_REG_ADDR_MASK);
  970	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
  971
  972	tw32_f(MAC_MI_COM, frame_val);
  973
  974	loops = PHY_BUSY_LOOPS;
  975	while (loops != 0) {
  976		udelay(10);
  977		frame_val = tr32(MAC_MI_COM);
  978
  979		if ((frame_val & MI_COM_BUSY) == 0) {
  980			udelay(5);
  981			frame_val = tr32(MAC_MI_COM);
  982			break;
  983		}
  984		loops -= 1;
  985	}
  986
  987	ret = -EBUSY;
  988	if (loops != 0) {
  989		*val = frame_val & MI_COM_DATA_MASK;
  990		ret = 0;
  991	}
  992
  993	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
  994		tw32_f(MAC_MI_MODE, tp->mi_mode);
  995		udelay(80);
  996	}
  997
  998	return ret;
  999}
 1000
 1001static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 1002{
 1003	u32 frame_val;
 1004	unsigned int loops;
 1005	int ret;
 1006
 1007	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 1008	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
 1009		return 0;
 1010
 1011	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1012		tw32_f(MAC_MI_MODE,
 1013		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 1014		udelay(80);
 1015	}
 1016
 1017	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 1018		      MI_COM_PHY_ADDR_MASK);
 1019	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 1020		      MI_COM_REG_ADDR_MASK);
 1021	frame_val |= (val & MI_COM_DATA_MASK);
 1022	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
 1023
 1024	tw32_f(MAC_MI_COM, frame_val);
 1025
 1026	loops = PHY_BUSY_LOOPS;
 1027	while (loops != 0) {
 1028		udelay(10);
 1029		frame_val = tr32(MAC_MI_COM);
 1030		if ((frame_val & MI_COM_BUSY) == 0) {
 1031			udelay(5);
 1032			frame_val = tr32(MAC_MI_COM);
 1033			break;
 1034		}
 1035		loops -= 1;
 1036	}
 1037
 1038	ret = -EBUSY;
 1039	if (loops != 0)
 1040		ret = 0;
 1041
 1042	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 1043		tw32_f(MAC_MI_MODE, tp->mi_mode);
 1044		udelay(80);
 1045	}
 1046
 1047	return ret;
 1048}
 1049
 1050static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
 1051{
 1052	int err;
 1053
 1054	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 1055	if (err)
 1056		goto done;
 1057
 1058	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 1059	if (err)
 1060		goto done;
 1061
 1062	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 1063			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 1064	if (err)
 1065		goto done;
 1066
 1067	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
 1068
 1069done:
 1070	return err;
 1071}
 1072
 1073static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
 1074{
 1075	int err;
 1076
 1077	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
 1078	if (err)
 1079		goto done;
 1080
 1081	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
 1082	if (err)
 1083		goto done;
 1084
 1085	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
 1086			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
 1087	if (err)
 1088		goto done;
 1089
 1090	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
 1091
 1092done:
 1093	return err;
 1094}
 1095
 1096static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
 1097{
 1098	int err;
 1099
 1100	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 1101	if (!err)
 1102		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
 1103
 1104	return err;
 1105}
 1106
 1107static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
 1108{
 1109	int err;
 1110
 1111	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
 1112	if (!err)
 1113		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
 1114
 1115	return err;
 1116}
 1117
 1118static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
 1119{
 1120	int err;
 1121
 1122	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
 1123			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
 1124			   MII_TG3_AUXCTL_SHDWSEL_MISC);
 1125	if (!err)
 1126		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
 1127
 1128	return err;
 1129}
 1130
 1131static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
 1132{
 1133	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
 1134		set |= MII_TG3_AUXCTL_MISC_WREN;
 1135
 1136	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
 1137}
 1138
 1139#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
 1140	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
 1141			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
 1142			     MII_TG3_AUXCTL_ACTL_TX_6DB)
 1143
 1144#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
 1145	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
 1146			     MII_TG3_AUXCTL_ACTL_TX_6DB);
 1147
 1148static int tg3_bmcr_reset(struct tg3 *tp)
 1149{
 1150	u32 phy_control;
 1151	int limit, err;
 1152
 1153	/* OK, reset it, and poll the BMCR_RESET bit until it
 1154	 * clears or we time out.
 1155	 */
 1156	phy_control = BMCR_RESET;
 1157	err = tg3_writephy(tp, MII_BMCR, phy_control);
 1158	if (err != 0)
 1159		return -EBUSY;
 1160
 1161	limit = 5000;
 1162	while (limit--) {
 1163		err = tg3_readphy(tp, MII_BMCR, &phy_control);
 1164		if (err != 0)
 1165			return -EBUSY;
 1166
 1167		if ((phy_control & BMCR_RESET) == 0) {
 1168			udelay(40);
 1169			break;
 1170		}
 1171		udelay(10);
 1172	}
 1173	if (limit < 0)
 1174		return -EBUSY;
 1175
 1176	return 0;
 1177}
 1178
 1179static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 1180{
 1181	struct tg3 *tp = bp->priv;
 1182	u32 val;
 1183
 1184	spin_lock_bh(&tp->lock);
 1185
 1186	if (tg3_readphy(tp, reg, &val))
 1187		val = -EIO;
 1188
 1189	spin_unlock_bh(&tp->lock);
 1190
 1191	return val;
 1192}
 1193
 1194static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 1195{
 1196	struct tg3 *tp = bp->priv;
 1197	u32 ret = 0;
 1198
 1199	spin_lock_bh(&tp->lock);
 1200
 1201	if (tg3_writephy(tp, reg, val))
 1202		ret = -EIO;
 1203
 1204	spin_unlock_bh(&tp->lock);
 1205
 1206	return ret;
 1207}
 1208
 1209static int tg3_mdio_reset(struct mii_bus *bp)
 1210{
 1211	return 0;
 1212}
 1213
 1214static void tg3_mdio_config_5785(struct tg3 *tp)
 1215{
 1216	u32 val;
 1217	struct phy_device *phydev;
 1218
 1219	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 1220	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 1221	case PHY_ID_BCM50610:
 1222	case PHY_ID_BCM50610M:
 1223		val = MAC_PHYCFG2_50610_LED_MODES;
 1224		break;
 1225	case PHY_ID_BCMAC131:
 1226		val = MAC_PHYCFG2_AC131_LED_MODES;
 1227		break;
 1228	case PHY_ID_RTL8211C:
 1229		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
 1230		break;
 1231	case PHY_ID_RTL8201E:
 1232		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
 1233		break;
 1234	default:
 1235		return;
 1236	}
 1237
 1238	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
 1239		tw32(MAC_PHYCFG2, val);
 1240
 1241		val = tr32(MAC_PHYCFG1);
 1242		val &= ~(MAC_PHYCFG1_RGMII_INT |
 1243			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
 1244		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
 1245		tw32(MAC_PHYCFG1, val);
 1246
 1247		return;
 1248	}
 1249
 1250	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
 1251		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
 1252		       MAC_PHYCFG2_FMODE_MASK_MASK |
 1253		       MAC_PHYCFG2_GMODE_MASK_MASK |
 1254		       MAC_PHYCFG2_ACT_MASK_MASK   |
 1255		       MAC_PHYCFG2_QUAL_MASK_MASK |
 1256		       MAC_PHYCFG2_INBAND_ENABLE;
 1257
 1258	tw32(MAC_PHYCFG2, val);
 1259
 1260	val = tr32(MAC_PHYCFG1);
 1261	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
 1262		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
 1263	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 1264		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1265			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
 1266		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1267			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
 1268	}
 1269	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
 1270	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
 1271	tw32(MAC_PHYCFG1, val);
 1272
 1273	val = tr32(MAC_EXT_RGMII_MODE);
 1274	val &= ~(MAC_RGMII_MODE_RX_INT_B |
 1275		 MAC_RGMII_MODE_RX_QUALITY |
 1276		 MAC_RGMII_MODE_RX_ACTIVITY |
 1277		 MAC_RGMII_MODE_RX_ENG_DET |
 1278		 MAC_RGMII_MODE_TX_ENABLE |
 1279		 MAC_RGMII_MODE_TX_LOWPWR |
 1280		 MAC_RGMII_MODE_TX_RESET);
 1281	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
 1282		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1283			val |= MAC_RGMII_MODE_RX_INT_B |
 1284			       MAC_RGMII_MODE_RX_QUALITY |
 1285			       MAC_RGMII_MODE_RX_ACTIVITY |
 1286			       MAC_RGMII_MODE_RX_ENG_DET;
 1287		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1288			val |= MAC_RGMII_MODE_TX_ENABLE |
 1289			       MAC_RGMII_MODE_TX_LOWPWR |
 1290			       MAC_RGMII_MODE_TX_RESET;
 1291	}
 1292	tw32(MAC_EXT_RGMII_MODE, val);
 1293}
 1294
 1295static void tg3_mdio_start(struct tg3 *tp)
 1296{
 1297	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
 1298	tw32_f(MAC_MI_MODE, tp->mi_mode);
 1299	udelay(80);
 1300
 1301	if (tg3_flag(tp, MDIOBUS_INITED) &&
 1302	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 1303		tg3_mdio_config_5785(tp);
 1304}
 1305
 1306static int tg3_mdio_init(struct tg3 *tp)
 1307{
 1308	int i;
 1309	u32 reg;
 1310	struct phy_device *phydev;
 1311
 1312	if (tg3_flag(tp, 5717_PLUS)) {
 1313		u32 is_serdes;
 1314
 1315		tp->phy_addr = tp->pci_fn + 1;
 1316
 1317		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
 1318			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
 1319		else
 1320			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
 1321				    TG3_CPMU_PHY_STRAP_IS_SERDES;
 1322		if (is_serdes)
 1323			tp->phy_addr += 7;
 1324	} else
 1325		tp->phy_addr = TG3_PHY_MII_ADDR;
 1326
 1327	tg3_mdio_start(tp);
 1328
 1329	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
 1330		return 0;
 1331
 1332	tp->mdio_bus = mdiobus_alloc();
 1333	if (tp->mdio_bus == NULL)
 1334		return -ENOMEM;
 1335
 1336	tp->mdio_bus->name     = "tg3 mdio bus";
 1337	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
 1338		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
 1339	tp->mdio_bus->priv     = tp;
 1340	tp->mdio_bus->parent   = &tp->pdev->dev;
 1341	tp->mdio_bus->read     = &tg3_mdio_read;
 1342	tp->mdio_bus->write    = &tg3_mdio_write;
 1343	tp->mdio_bus->reset    = &tg3_mdio_reset;
 1344	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
 1345	tp->mdio_bus->irq      = &tp->mdio_irq[0];
 1346
 1347	for (i = 0; i < PHY_MAX_ADDR; i++)
 1348		tp->mdio_bus->irq[i] = PHY_POLL;
 1349
 1350	/* The bus registration will look for all the PHYs on the mdio bus.
 1351	 * Unfortunately, it does not ensure the PHY is powered up before
 1352	 * accessing the PHY ID registers.  A chip reset is the
 1353	 * quickest way to bring the device back to an operational state..
 1354	 */
 1355	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
 1356		tg3_bmcr_reset(tp);
 1357
 1358	i = mdiobus_register(tp->mdio_bus);
 1359	if (i) {
 1360		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
 1361		mdiobus_free(tp->mdio_bus);
 1362		return i;
 1363	}
 1364
 1365	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 1366
 1367	if (!phydev || !phydev->drv) {
 1368		dev_warn(&tp->pdev->dev, "No PHY devices\n");
 1369		mdiobus_unregister(tp->mdio_bus);
 1370		mdiobus_free(tp->mdio_bus);
 1371		return -ENODEV;
 1372	}
 1373
 1374	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 1375	case PHY_ID_BCM57780:
 1376		phydev->interface = PHY_INTERFACE_MODE_GMII;
 1377		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1378		break;
 1379	case PHY_ID_BCM50610:
 1380	case PHY_ID_BCM50610M:
 1381		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
 1382				     PHY_BRCM_RX_REFCLK_UNUSED |
 1383				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
 1384				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1385		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
 1386			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
 1387		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
 1388			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
 1389		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
 1390			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
 1391		/* fallthru */
 1392	case PHY_ID_RTL8211C:
 1393		phydev->interface = PHY_INTERFACE_MODE_RGMII;
 1394		break;
 1395	case PHY_ID_RTL8201E:
 1396	case PHY_ID_BCMAC131:
 1397		phydev->interface = PHY_INTERFACE_MODE_MII;
 1398		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
 1399		tp->phy_flags |= TG3_PHYFLG_IS_FET;
 1400		break;
 1401	}
 1402
 1403	tg3_flag_set(tp, MDIOBUS_INITED);
 1404
 1405	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 1406		tg3_mdio_config_5785(tp);
 1407
 1408	return 0;
 1409}
 1410
 1411static void tg3_mdio_fini(struct tg3 *tp)
 1412{
 1413	if (tg3_flag(tp, MDIOBUS_INITED)) {
 1414		tg3_flag_clear(tp, MDIOBUS_INITED);
 1415		mdiobus_unregister(tp->mdio_bus);
 1416		mdiobus_free(tp->mdio_bus);
 1417	}
 1418}
 1419
 1420/* tp->lock is held. */
 1421static inline void tg3_generate_fw_event(struct tg3 *tp)
 1422{
 1423	u32 val;
 1424
 1425	val = tr32(GRC_RX_CPU_EVENT);
 1426	val |= GRC_RX_CPU_DRIVER_EVENT;
 1427	tw32_f(GRC_RX_CPU_EVENT, val);
 1428
 1429	tp->last_event_jiffies = jiffies;
 1430}
 1431
 1432#define TG3_FW_EVENT_TIMEOUT_USEC 2500
 1433
 1434/* tp->lock is held. */
 1435static void tg3_wait_for_event_ack(struct tg3 *tp)
 1436{
 1437	int i;
 1438	unsigned int delay_cnt;
 1439	long time_remain;
 1440
 1441	/* If enough time has passed, no wait is necessary. */
 1442	time_remain = (long)(tp->last_event_jiffies + 1 +
 1443		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
 1444		      (long)jiffies;
 1445	if (time_remain < 0)
 1446		return;
 1447
 1448	/* Check if we can shorten the wait time. */
 1449	delay_cnt = jiffies_to_usecs(time_remain);
 1450	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
 1451		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
 1452	delay_cnt = (delay_cnt >> 3) + 1;
 1453
 1454	for (i = 0; i < delay_cnt; i++) {
 1455		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
 1456			break;
 1457		udelay(8);
 1458	}
 1459}
 1460
 1461/* tp->lock is held. */
 1462static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
 1463{
 1464	u32 reg, val;
 1465
 1466	val = 0;
 1467	if (!tg3_readphy(tp, MII_BMCR, &reg))
 1468		val = reg << 16;
 1469	if (!tg3_readphy(tp, MII_BMSR, &reg))
 1470		val |= (reg & 0xffff);
 1471	*data++ = val;
 1472
 1473	val = 0;
 1474	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
 1475		val = reg << 16;
 1476	if (!tg3_readphy(tp, MII_LPA, &reg))
 1477		val |= (reg & 0xffff);
 1478	*data++ = val;
 1479
 1480	val = 0;
 1481	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
 1482		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
 1483			val = reg << 16;
 1484		if (!tg3_readphy(tp, MII_STAT1000, &reg))
 1485			val |= (reg & 0xffff);
 1486	}
 1487	*data++ = val;
 1488
 1489	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
 1490		val = reg << 16;
 1491	else
 1492		val = 0;
 1493	*data++ = val;
 1494}
 1495
 1496/* tp->lock is held. */
 1497static void tg3_ump_link_report(struct tg3 *tp)
 1498{
 1499	u32 data[4];
 1500
 1501	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
 1502		return;
 1503
 1504	tg3_phy_gather_ump_data(tp, data);
 1505
 1506	tg3_wait_for_event_ack(tp);
 1507
 1508	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
 1509	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
 1510	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
 1511	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
 1512	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
 1513	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
 1514
 1515	tg3_generate_fw_event(tp);
 1516}
 1517
 1518/* tp->lock is held. */
 1519static void tg3_stop_fw(struct tg3 *tp)
 1520{
 1521	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
 1522		/* Wait for RX cpu to ACK the previous event. */
 1523		tg3_wait_for_event_ack(tp);
 1524
 1525		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
 1526
 1527		tg3_generate_fw_event(tp);
 1528
 1529		/* Wait for RX cpu to ACK this event. */
 1530		tg3_wait_for_event_ack(tp);
 1531	}
 1532}
 1533
 1534/* tp->lock is held. */
 1535static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
 1536{
 1537	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
 1538		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
 1539
 1540	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 1541		switch (kind) {
 1542		case RESET_KIND_INIT:
 1543			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1544				      DRV_STATE_START);
 1545			break;
 1546
 1547		case RESET_KIND_SHUTDOWN:
 1548			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1549				      DRV_STATE_UNLOAD);
 1550			break;
 1551
 1552		case RESET_KIND_SUSPEND:
 1553			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1554				      DRV_STATE_SUSPEND);
 1555			break;
 1556
 1557		default:
 1558			break;
 1559		}
 1560	}
 1561
 1562	if (kind == RESET_KIND_INIT ||
 1563	    kind == RESET_KIND_SUSPEND)
 1564		tg3_ape_driver_state_change(tp, kind);
 1565}
 1566
 1567/* tp->lock is held. */
 1568static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
 1569{
 1570	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
 1571		switch (kind) {
 1572		case RESET_KIND_INIT:
 1573			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1574				      DRV_STATE_START_DONE);
 1575			break;
 1576
 1577		case RESET_KIND_SHUTDOWN:
 1578			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1579				      DRV_STATE_UNLOAD_DONE);
 1580			break;
 1581
 1582		default:
 1583			break;
 1584		}
 1585	}
 1586
 1587	if (kind == RESET_KIND_SHUTDOWN)
 1588		tg3_ape_driver_state_change(tp, kind);
 1589}
 1590
 1591/* tp->lock is held. */
 1592static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
 1593{
 1594	if (tg3_flag(tp, ENABLE_ASF)) {
 1595		switch (kind) {
 1596		case RESET_KIND_INIT:
 1597			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1598				      DRV_STATE_START);
 1599			break;
 1600
 1601		case RESET_KIND_SHUTDOWN:
 1602			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1603				      DRV_STATE_UNLOAD);
 1604			break;
 1605
 1606		case RESET_KIND_SUSPEND:
 1607			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
 1608				      DRV_STATE_SUSPEND);
 1609			break;
 1610
 1611		default:
 1612			break;
 1613		}
 1614	}
 1615}
 1616
 1617static int tg3_poll_fw(struct tg3 *tp)
 1618{
 1619	int i;
 1620	u32 val;
 1621
 1622	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 1623		/* Wait up to 20ms for init done. */
 1624		for (i = 0; i < 200; i++) {
 1625			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
 1626				return 0;
 1627			udelay(100);
 1628		}
 1629		return -ENODEV;
 1630	}
 1631
 1632	/* Wait for firmware initialization to complete. */
 1633	for (i = 0; i < 100000; i++) {
 1634		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
 1635		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 1636			break;
 1637		udelay(10);
 1638	}
 1639
 1640	/* Chip might not be fitted with firmware.  Some Sun onboard
 1641	 * parts are configured like that.  So don't signal the timeout
 1642	 * of the above loop as an error, but do report the lack of
 1643	 * running firmware once.
 1644	 */
 1645	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
 1646		tg3_flag_set(tp, NO_FWARE_REPORTED);
 1647
 1648		netdev_info(tp->dev, "No firmware running\n");
 1649	}
 1650
 1651	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
 1652		/* The 57765 A0 needs a little more
 1653		 * time to do some important work.
 1654		 */
 1655		mdelay(10);
 1656	}
 1657
 1658	return 0;
 1659}
 1660
 1661static void tg3_link_report(struct tg3 *tp)
 1662{
 1663	if (!netif_carrier_ok(tp->dev)) {
 1664		netif_info(tp, link, tp->dev, "Link is down\n");
 1665		tg3_ump_link_report(tp);
 1666	} else if (netif_msg_link(tp)) {
 1667		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
 1668			    (tp->link_config.active_speed == SPEED_1000 ?
 1669			     1000 :
 1670			     (tp->link_config.active_speed == SPEED_100 ?
 1671			      100 : 10)),
 1672			    (tp->link_config.active_duplex == DUPLEX_FULL ?
 1673			     "full" : "half"));
 1674
 1675		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
 1676			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
 1677			    "on" : "off",
 1678			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
 1679			    "on" : "off");
 1680
 1681		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
 1682			netdev_info(tp->dev, "EEE is %s\n",
 1683				    tp->setlpicnt ? "enabled" : "disabled");
 1684
 1685		tg3_ump_link_report(tp);
 1686	}
 1687}
 1688
 1689static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
 1690{
 1691	u16 miireg;
 1692
 1693	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
 1694		miireg = ADVERTISE_1000XPAUSE;
 1695	else if (flow_ctrl & FLOW_CTRL_TX)
 1696		miireg = ADVERTISE_1000XPSE_ASYM;
 1697	else if (flow_ctrl & FLOW_CTRL_RX)
 1698		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
 1699	else
 1700		miireg = 0;
 1701
 1702	return miireg;
 1703}
 1704
 1705static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
 1706{
 1707	u8 cap = 0;
 1708
 1709	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
 1710		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
 1711	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
 1712		if (lcladv & ADVERTISE_1000XPAUSE)
 1713			cap = FLOW_CTRL_RX;
 1714		if (rmtadv & ADVERTISE_1000XPAUSE)
 1715			cap = FLOW_CTRL_TX;
 1716	}
 1717
 1718	return cap;
 1719}
 1720
 1721static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
 1722{
 1723	u8 autoneg;
 1724	u8 flowctrl = 0;
 1725	u32 old_rx_mode = tp->rx_mode;
 1726	u32 old_tx_mode = tp->tx_mode;
 1727
 1728	if (tg3_flag(tp, USE_PHYLIB))
 1729		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
 1730	else
 1731		autoneg = tp->link_config.autoneg;
 1732
 1733	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
 1734		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
 1735			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
 1736		else
 1737			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
 1738	} else
 1739		flowctrl = tp->link_config.flowctrl;
 1740
 1741	tp->link_config.active_flowctrl = flowctrl;
 1742
 1743	if (flowctrl & FLOW_CTRL_RX)
 1744		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
 1745	else
 1746		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
 1747
 1748	if (old_rx_mode != tp->rx_mode)
 1749		tw32_f(MAC_RX_MODE, tp->rx_mode);
 1750
 1751	if (flowctrl & FLOW_CTRL_TX)
 1752		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
 1753	else
 1754		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
 1755
 1756	if (old_tx_mode != tp->tx_mode)
 1757		tw32_f(MAC_TX_MODE, tp->tx_mode);
 1758}
 1759
 1760static void tg3_adjust_link(struct net_device *dev)
 1761{
 1762	u8 oldflowctrl, linkmesg = 0;
 1763	u32 mac_mode, lcl_adv, rmt_adv;
 1764	struct tg3 *tp = netdev_priv(dev);
 1765	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 1766
 1767	spin_lock_bh(&tp->lock);
 1768
 1769	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
 1770				    MAC_MODE_HALF_DUPLEX);
 1771
 1772	oldflowctrl = tp->link_config.active_flowctrl;
 1773
 1774	if (phydev->link) {
 1775		lcl_adv = 0;
 1776		rmt_adv = 0;
 1777
 1778		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
 1779			mac_mode |= MAC_MODE_PORT_MODE_MII;
 1780		else if (phydev->speed == SPEED_1000 ||
 1781			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
 1782			mac_mode |= MAC_MODE_PORT_MODE_GMII;
 1783		else
 1784			mac_mode |= MAC_MODE_PORT_MODE_MII;
 1785
 1786		if (phydev->duplex == DUPLEX_HALF)
 1787			mac_mode |= MAC_MODE_HALF_DUPLEX;
 1788		else {
 1789			lcl_adv = mii_advertise_flowctrl(
 1790				  tp->link_config.flowctrl);
 1791
 1792			if (phydev->pause)
 1793				rmt_adv = LPA_PAUSE_CAP;
 1794			if (phydev->asym_pause)
 1795				rmt_adv |= LPA_PAUSE_ASYM;
 1796		}
 1797
 1798		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 1799	} else
 1800		mac_mode |= MAC_MODE_PORT_MODE_GMII;
 1801
 1802	if (mac_mode != tp->mac_mode) {
 1803		tp->mac_mode = mac_mode;
 1804		tw32_f(MAC_MODE, tp->mac_mode);
 1805		udelay(40);
 1806	}
 1807
 1808	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
 1809		if (phydev->speed == SPEED_10)
 1810			tw32(MAC_MI_STAT,
 1811			     MAC_MI_STAT_10MBPS_MODE |
 1812			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 1813		else
 1814			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 1815	}
 1816
 1817	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
 1818		tw32(MAC_TX_LENGTHS,
 1819		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 1820		      (6 << TX_LENGTHS_IPG_SHIFT) |
 1821		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
 1822	else
 1823		tw32(MAC_TX_LENGTHS,
 1824		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 1825		      (6 << TX_LENGTHS_IPG_SHIFT) |
 1826		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
 1827
 1828	if (phydev->link != tp->old_link ||
 1829	    phydev->speed != tp->link_config.active_speed ||
 1830	    phydev->duplex != tp->link_config.active_duplex ||
 1831	    oldflowctrl != tp->link_config.active_flowctrl)
 1832		linkmesg = 1;
 1833
 1834	tp->old_link = phydev->link;
 1835	tp->link_config.active_speed = phydev->speed;
 1836	tp->link_config.active_duplex = phydev->duplex;
 1837
 1838	spin_unlock_bh(&tp->lock);
 1839
 1840	if (linkmesg)
 1841		tg3_link_report(tp);
 1842}
 1843
 1844static int tg3_phy_init(struct tg3 *tp)
 1845{
 1846	struct phy_device *phydev;
 1847
 1848	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
 1849		return 0;
 1850
 1851	/* Bring the PHY back to a known state. */
 1852	tg3_bmcr_reset(tp);
 1853
 1854	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 1855
 1856	/* Attach the MAC to the PHY. */
 1857	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
 1858			     phydev->dev_flags, phydev->interface);
 1859	if (IS_ERR(phydev)) {
 1860		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
 1861		return PTR_ERR(phydev);
 1862	}
 1863
 1864	/* Mask with MAC supported features. */
 1865	switch (phydev->interface) {
 1866	case PHY_INTERFACE_MODE_GMII:
 1867	case PHY_INTERFACE_MODE_RGMII:
 1868		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 1869			phydev->supported &= (PHY_GBIT_FEATURES |
 1870					      SUPPORTED_Pause |
 1871					      SUPPORTED_Asym_Pause);
 1872			break;
 1873		}
 1874		/* fallthru */
 1875	case PHY_INTERFACE_MODE_MII:
 1876		phydev->supported &= (PHY_BASIC_FEATURES |
 1877				      SUPPORTED_Pause |
 1878				      SUPPORTED_Asym_Pause);
 1879		break;
 1880	default:
 1881		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 1882		return -EINVAL;
 1883	}
 1884
 1885	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
 1886
 1887	phydev->advertising = phydev->supported;
 1888
 1889	return 0;
 1890}
 1891
 1892static void tg3_phy_start(struct tg3 *tp)
 1893{
 1894	struct phy_device *phydev;
 1895
 1896	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 1897		return;
 1898
 1899	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 1900
 1901	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 1902		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 1903		phydev->speed = tp->link_config.speed;
 1904		phydev->duplex = tp->link_config.duplex;
 1905		phydev->autoneg = tp->link_config.autoneg;
 1906		phydev->advertising = tp->link_config.advertising;
 1907	}
 1908
 1909	phy_start(phydev);
 1910
 1911	phy_start_aneg(phydev);
 1912}
 1913
 1914static void tg3_phy_stop(struct tg3 *tp)
 1915{
 1916	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
 1917		return;
 1918
 1919	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 1920}
 1921
 1922static void tg3_phy_fini(struct tg3 *tp)
 1923{
 1924	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
 1925		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
 1926		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
 1927	}
 1928}
 1929
 1930static int tg3_phy_set_extloopbk(struct tg3 *tp)
 1931{
 1932	int err;
 1933	u32 val;
 1934
 1935	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 1936		return 0;
 1937
 1938	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 1939		/* Cannot do read-modify-write on 5401 */
 1940		err = tg3_phy_auxctl_write(tp,
 1941					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 1942					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
 1943					   0x4c20);
 1944		goto done;
 1945	}
 1946
 1947	err = tg3_phy_auxctl_read(tp,
 1948				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 1949	if (err)
 1950		return err;
 1951
 1952	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
 1953	err = tg3_phy_auxctl_write(tp,
 1954				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
 1955
 1956done:
 1957	return err;
 1958}
 1959
 1960static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
 1961{
 1962	u32 phytest;
 1963
 1964	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 1965		u32 phy;
 1966
 1967		tg3_writephy(tp, MII_TG3_FET_TEST,
 1968			     phytest | MII_TG3_FET_SHADOW_EN);
 1969		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
 1970			if (enable)
 1971				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
 1972			else
 1973				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
 1974			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
 1975		}
 1976		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 1977	}
 1978}
 1979
 1980static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
 1981{
 1982	u32 reg;
 1983
 1984	if (!tg3_flag(tp, 5705_PLUS) ||
 1985	    (tg3_flag(tp, 5717_PLUS) &&
 1986	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
 1987		return;
 1988
 1989	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 1990		tg3_phy_fet_toggle_apd(tp, enable);
 1991		return;
 1992	}
 1993
 1994	reg = MII_TG3_MISC_SHDW_WREN |
 1995	      MII_TG3_MISC_SHDW_SCR5_SEL |
 1996	      MII_TG3_MISC_SHDW_SCR5_LPED |
 1997	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
 1998	      MII_TG3_MISC_SHDW_SCR5_SDTL |
 1999	      MII_TG3_MISC_SHDW_SCR5_C125OE;
 2000	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
 2001		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 2002
 2003	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
 2004
 2005
 2006	reg = MII_TG3_MISC_SHDW_WREN |
 2007	      MII_TG3_MISC_SHDW_APD_SEL |
 2008	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
 2009	if (enable)
 2010		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 2011
 2012	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
 2013}
 2014
 2015static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
 2016{
 2017	u32 phy;
 2018
 2019	if (!tg3_flag(tp, 5705_PLUS) ||
 2020	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 2021		return;
 2022
 2023	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 2024		u32 ephy;
 2025
 2026		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
 2027			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
 2028
 2029			tg3_writephy(tp, MII_TG3_FET_TEST,
 2030				     ephy | MII_TG3_FET_SHADOW_EN);
 2031			if (!tg3_readphy(tp, reg, &phy)) {
 2032				if (enable)
 2033					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 2034				else
 2035					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
 2036				tg3_writephy(tp, reg, phy);
 2037			}
 2038			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
 2039		}
 2040	} else {
 2041		int ret;
 2042
 2043		ret = tg3_phy_auxctl_read(tp,
 2044					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
 2045		if (!ret) {
 2046			if (enable)
 2047				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 2048			else
 2049				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
 2050			tg3_phy_auxctl_write(tp,
 2051					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
 2052		}
 2053	}
 2054}
 2055
 2056static void tg3_phy_set_wirespeed(struct tg3 *tp)
 2057{
 2058	int ret;
 2059	u32 val;
 2060
 2061	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
 2062		return;
 2063
 2064	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
 2065	if (!ret)
 2066		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
 2067				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
 2068}
 2069
 2070static void tg3_phy_apply_otp(struct tg3 *tp)
 2071{
 2072	u32 otp, phy;
 2073
 2074	if (!tp->phy_otp)
 2075		return;
 2076
 2077	otp = tp->phy_otp;
 2078
 2079	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
 2080		return;
 2081
 2082	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
 2083	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
 2084	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
 2085
 2086	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
 2087	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
 2088	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
 2089
 2090	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
 2091	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
 2092	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
 2093
 2094	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
 2095	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
 2096
 2097	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
 2098	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
 2099
 2100	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
 2101	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
 2102	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
 2103
 2104	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2105}
 2106
 2107static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
 2108{
 2109	u32 val;
 2110
 2111	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 2112		return;
 2113
 2114	tp->setlpicnt = 0;
 2115
 2116	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 2117	    current_link_up == 1 &&
 2118	    tp->link_config.active_duplex == DUPLEX_FULL &&
 2119	    (tp->link_config.active_speed == SPEED_100 ||
 2120	     tp->link_config.active_speed == SPEED_1000)) {
 2121		u32 eeectl;
 2122
 2123		if (tp->link_config.active_speed == SPEED_1000)
 2124			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
 2125		else
 2126			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
 2127
 2128		tw32(TG3_CPMU_EEE_CTRL, eeectl);
 2129
 2130		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
 2131				  TG3_CL45_D7_EEERES_STAT, &val);
 2132
 2133		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
 2134		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
 2135			tp->setlpicnt = 2;
 2136	}
 2137
 2138	if (!tp->setlpicnt) {
 2139		if (current_link_up == 1 &&
 2140		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 2141			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
 2142			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2143		}
 2144
 2145		val = tr32(TG3_CPMU_EEE_MODE);
 2146		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 2147	}
 2148}
 2149
 2150static void tg3_phy_eee_enable(struct tg3 *tp)
 2151{
 2152	u32 val;
 2153
 2154	if (tp->link_config.active_speed == SPEED_1000 &&
 2155	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 2156	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 2157	     tg3_flag(tp, 57765_CLASS)) &&
 2158	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 2159		val = MII_TG3_DSP_TAP26_ALNOKO |
 2160		      MII_TG3_DSP_TAP26_RMRXSTO;
 2161		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 2162		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2163	}
 2164
 2165	val = tr32(TG3_CPMU_EEE_MODE);
 2166	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
 2167}
 2168
 2169static int tg3_wait_macro_done(struct tg3 *tp)
 2170{
 2171	int limit = 100;
 2172
 2173	while (limit--) {
 2174		u32 tmp32;
 2175
 2176		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
 2177			if ((tmp32 & 0x1000) == 0)
 2178				break;
 2179		}
 2180	}
 2181	if (limit < 0)
 2182		return -EBUSY;
 2183
 2184	return 0;
 2185}
 2186
 2187static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
 2188{
 2189	static const u32 test_pat[4][6] = {
 2190	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
 2191	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
 2192	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
 2193	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
 2194	};
 2195	int chan;
 2196
 2197	for (chan = 0; chan < 4; chan++) {
 2198		int i;
 2199
 2200		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2201			     (chan * 0x2000) | 0x0200);
 2202		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 2203
 2204		for (i = 0; i < 6; i++)
 2205			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
 2206				     test_pat[chan][i]);
 2207
 2208		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 2209		if (tg3_wait_macro_done(tp)) {
 2210			*resetp = 1;
 2211			return -EBUSY;
 2212		}
 2213
 2214		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2215			     (chan * 0x2000) | 0x0200);
 2216		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
 2217		if (tg3_wait_macro_done(tp)) {
 2218			*resetp = 1;
 2219			return -EBUSY;
 2220		}
 2221
 2222		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
 2223		if (tg3_wait_macro_done(tp)) {
 2224			*resetp = 1;
 2225			return -EBUSY;
 2226		}
 2227
 2228		for (i = 0; i < 6; i += 2) {
 2229			u32 low, high;
 2230
 2231			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
 2232			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
 2233			    tg3_wait_macro_done(tp)) {
 2234				*resetp = 1;
 2235				return -EBUSY;
 2236			}
 2237			low &= 0x7fff;
 2238			high &= 0x000f;
 2239			if (low != test_pat[chan][i] ||
 2240			    high != test_pat[chan][i+1]) {
 2241				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
 2242				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
 2243				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
 2244
 2245				return -EBUSY;
 2246			}
 2247		}
 2248	}
 2249
 2250	return 0;
 2251}
 2252
 2253static int tg3_phy_reset_chanpat(struct tg3 *tp)
 2254{
 2255	int chan;
 2256
 2257	for (chan = 0; chan < 4; chan++) {
 2258		int i;
 2259
 2260		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 2261			     (chan * 0x2000) | 0x0200);
 2262		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
 2263		for (i = 0; i < 6; i++)
 2264			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
 2265		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
 2266		if (tg3_wait_macro_done(tp))
 2267			return -EBUSY;
 2268	}
 2269
 2270	return 0;
 2271}
 2272
 2273static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
 2274{
 2275	u32 reg32, phy9_orig;
 2276	int retries, do_phy_reset, err;
 2277
 2278	retries = 10;
 2279	do_phy_reset = 1;
 2280	do {
 2281		if (do_phy_reset) {
 2282			err = tg3_bmcr_reset(tp);
 2283			if (err)
 2284				return err;
 2285			do_phy_reset = 0;
 2286		}
 2287
 2288		/* Disable transmitter and interrupt.  */
 2289		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
 2290			continue;
 2291
 2292		reg32 |= 0x3000;
 2293		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 2294
 2295		/* Set full-duplex, 1000 mbps.  */
 2296		tg3_writephy(tp, MII_BMCR,
 2297			     BMCR_FULLDPLX | BMCR_SPEED1000);
 2298
 2299		/* Set to master mode.  */
 2300		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
 2301			continue;
 2302
 2303		tg3_writephy(tp, MII_CTRL1000,
 2304			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 2305
 2306		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
 2307		if (err)
 2308			return err;
 2309
 2310		/* Block the PHY control access.  */
 2311		tg3_phydsp_write(tp, 0x8005, 0x0800);
 2312
 2313		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
 2314		if (!err)
 2315			break;
 2316	} while (--retries);
 2317
 2318	err = tg3_phy_reset_chanpat(tp);
 2319	if (err)
 2320		return err;
 2321
 2322	tg3_phydsp_write(tp, 0x8005, 0x0000);
 2323
 2324	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
 2325	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
 2326
 2327	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2328
 2329	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
 2330
 2331	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
 2332		reg32 &= ~0x3000;
 2333		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
 2334	} else if (!err)
 2335		err = -EBUSY;
 2336
 2337	return err;
 2338}
 2339
 2340/* This will reset the tigon3 PHY if there is no valid
 2341 * link unless the FORCE argument is non-zero.
 2342 */
 2343static int tg3_phy_reset(struct tg3 *tp)
 2344{
 2345	u32 val, cpmuctrl;
 2346	int err;
 2347
 2348	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 2349		val = tr32(GRC_MISC_CFG);
 2350		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
 2351		udelay(40);
 2352	}
 2353	err  = tg3_readphy(tp, MII_BMSR, &val);
 2354	err |= tg3_readphy(tp, MII_BMSR, &val);
 2355	if (err != 0)
 2356		return -EBUSY;
 2357
 2358	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
 2359		netif_carrier_off(tp->dev);
 2360		tg3_link_report(tp);
 2361	}
 2362
 2363	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 2364	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 2365	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 2366		err = tg3_phy_reset_5703_4_5(tp);
 2367		if (err)
 2368			return err;
 2369		goto out;
 2370	}
 2371
 2372	cpmuctrl = 0;
 2373	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
 2374	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
 2375		cpmuctrl = tr32(TG3_CPMU_CTRL);
 2376		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
 2377			tw32(TG3_CPMU_CTRL,
 2378			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
 2379	}
 2380
 2381	err = tg3_bmcr_reset(tp);
 2382	if (err)
 2383		return err;
 2384
 2385	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
 2386		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
 2387		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
 2388
 2389		tw32(TG3_CPMU_CTRL, cpmuctrl);
 2390	}
 2391
 2392	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
 2393	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
 2394		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 2395		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
 2396		    CPMU_LSPD_1000MB_MACCLK_12_5) {
 2397			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 2398			udelay(40);
 2399			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 2400		}
 2401	}
 2402
 2403	if (tg3_flag(tp, 5717_PLUS) &&
 2404	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
 2405		return 0;
 2406
 2407	tg3_phy_apply_otp(tp);
 2408
 2409	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 2410		tg3_phy_toggle_apd(tp, true);
 2411	else
 2412		tg3_phy_toggle_apd(tp, false);
 2413
 2414out:
 2415	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
 2416	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 2417		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
 2418		tg3_phydsp_write(tp, 0x000a, 0x0323);
 2419		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2420	}
 2421
 2422	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
 2423		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 2424		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 2425	}
 2426
 2427	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
 2428		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 2429			tg3_phydsp_write(tp, 0x000a, 0x310b);
 2430			tg3_phydsp_write(tp, 0x201f, 0x9506);
 2431			tg3_phydsp_write(tp, 0x401f, 0x14e2);
 2432			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2433		}
 2434	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
 2435		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
 2436			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
 2437			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
 2438				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
 2439				tg3_writephy(tp, MII_TG3_TEST1,
 2440					     MII_TG3_TEST1_TRIM_EN | 0x4);
 2441			} else
 2442				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
 2443
 2444			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 2445		}
 2446	}
 2447
 2448	/* Set Extended packet length bit (bit 14) on all chips that */
 2449	/* support jumbo frames */
 2450	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 2451		/* Cannot do read-modify-write on 5401 */
 2452		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 2453	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
 2454		/* Set bit 14 with read-modify-write to preserve other bits */
 2455		err = tg3_phy_auxctl_read(tp,
 2456					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
 2457		if (!err)
 2458			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
 2459					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
 2460	}
 2461
 2462	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
 2463	 * jumbo frames transmission.
 2464	 */
 2465	if (tg3_flag(tp, JUMBO_CAPABLE)) {
 2466		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
 2467			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 2468				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
 2469	}
 2470
 2471	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 2472		/* adjust output voltage */
 2473		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
 2474	}
 2475
 2476	tg3_phy_toggle_automdix(tp, 1);
 2477	tg3_phy_set_wirespeed(tp);
 2478	return 0;
 2479}
 2480
 2481#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
 2482#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
 2483#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
 2484					  TG3_GPIO_MSG_NEED_VAUX)
 2485#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
 2486	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
 2487	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
 2488	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
 2489	 (TG3_GPIO_MSG_DRVR_PRES << 12))
 2490
 2491#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
 2492	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
 2493	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
 2494	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
 2495	 (TG3_GPIO_MSG_NEED_VAUX << 12))
 2496
 2497static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
 2498{
 2499	u32 status, shift;
 2500
 2501	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 2502	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 2503		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
 2504	else
 2505		status = tr32(TG3_CPMU_DRV_STATUS);
 2506
 2507	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
 2508	status &= ~(TG3_GPIO_MSG_MASK << shift);
 2509	status |= (newstat << shift);
 2510
 2511	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 2512	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 2513		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
 2514	else
 2515		tw32(TG3_CPMU_DRV_STATUS, status);
 2516
 2517	return status >> TG3_APE_GPIO_MSG_SHIFT;
 2518}
 2519
 2520static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
 2521{
 2522	if (!tg3_flag(tp, IS_NIC))
 2523		return 0;
 2524
 2525	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 2526	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 2527	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 2528		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 2529			return -EIO;
 2530
 2531		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
 2532
 2533		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 2534			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2535
 2536		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 2537	} else {
 2538		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
 2539			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2540	}
 2541
 2542	return 0;
 2543}
 2544
 2545static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
 2546{
 2547	u32 grc_local_ctrl;
 2548
 2549	if (!tg3_flag(tp, IS_NIC) ||
 2550	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 2551	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
 2552		return;
 2553
 2554	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
 2555
 2556	tw32_wait_f(GRC_LOCAL_CTRL,
 2557		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 2558		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2559
 2560	tw32_wait_f(GRC_LOCAL_CTRL,
 2561		    grc_local_ctrl,
 2562		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2563
 2564	tw32_wait_f(GRC_LOCAL_CTRL,
 2565		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
 2566		    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2567}
 2568
 2569static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
 2570{
 2571	if (!tg3_flag(tp, IS_NIC))
 2572		return;
 2573
 2574	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 2575	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 2576		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 2577			    (GRC_LCLCTRL_GPIO_OE0 |
 2578			     GRC_LCLCTRL_GPIO_OE1 |
 2579			     GRC_LCLCTRL_GPIO_OE2 |
 2580			     GRC_LCLCTRL_GPIO_OUTPUT0 |
 2581			     GRC_LCLCTRL_GPIO_OUTPUT1),
 2582			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2583	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
 2584		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
 2585		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
 2586		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
 2587				     GRC_LCLCTRL_GPIO_OE1 |
 2588				     GRC_LCLCTRL_GPIO_OE2 |
 2589				     GRC_LCLCTRL_GPIO_OUTPUT0 |
 2590				     GRC_LCLCTRL_GPIO_OUTPUT1 |
 2591				     tp->grc_local_ctrl;
 2592		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2593			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2594
 2595		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
 2596		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2597			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2598
 2599		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
 2600		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
 2601			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2602	} else {
 2603		u32 no_gpio2;
 2604		u32 grc_local_ctrl = 0;
 2605
 2606		/* Workaround to prevent overdrawing Amps. */
 2607		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 2608			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
 2609			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
 2610				    grc_local_ctrl,
 2611				    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2612		}
 2613
 2614		/* On 5753 and variants, GPIO2 cannot be used. */
 2615		no_gpio2 = tp->nic_sram_data_cfg &
 2616			   NIC_SRAM_DATA_CFG_NO_GPIO2;
 2617
 2618		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
 2619				  GRC_LCLCTRL_GPIO_OE1 |
 2620				  GRC_LCLCTRL_GPIO_OE2 |
 2621				  GRC_LCLCTRL_GPIO_OUTPUT1 |
 2622				  GRC_LCLCTRL_GPIO_OUTPUT2;
 2623		if (no_gpio2) {
 2624			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
 2625					    GRC_LCLCTRL_GPIO_OUTPUT2);
 2626		}
 2627		tw32_wait_f(GRC_LOCAL_CTRL,
 2628			    tp->grc_local_ctrl | grc_local_ctrl,
 2629			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2630
 2631		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
 2632
 2633		tw32_wait_f(GRC_LOCAL_CTRL,
 2634			    tp->grc_local_ctrl | grc_local_ctrl,
 2635			    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2636
 2637		if (!no_gpio2) {
 2638			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
 2639			tw32_wait_f(GRC_LOCAL_CTRL,
 2640				    tp->grc_local_ctrl | grc_local_ctrl,
 2641				    TG3_GRC_LCLCTL_PWRSW_DELAY);
 2642		}
 2643	}
 2644}
 2645
 2646static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
 2647{
 2648	u32 msg = 0;
 2649
 2650	/* Serialize power state transitions */
 2651	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
 2652		return;
 2653
 2654	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
 2655		msg = TG3_GPIO_MSG_NEED_VAUX;
 2656
 2657	msg = tg3_set_function_status(tp, msg);
 2658
 2659	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
 2660		goto done;
 2661
 2662	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
 2663		tg3_pwrsrc_switch_to_vaux(tp);
 2664	else
 2665		tg3_pwrsrc_die_with_vmain(tp);
 2666
 2667done:
 2668	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
 2669}
 2670
 2671static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
 2672{
 2673	bool need_vaux = false;
 2674
 2675	/* The GPIOs do something completely different on 57765. */
 2676	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
 2677		return;
 2678
 2679	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 2680	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 2681	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 2682		tg3_frob_aux_power_5717(tp, include_wol ?
 2683					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
 2684		return;
 2685	}
 2686
 2687	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
 2688		struct net_device *dev_peer;
 2689
 2690		dev_peer = pci_get_drvdata(tp->pdev_peer);
 2691
 2692		/* remove_one() may have been run on the peer. */
 2693		if (dev_peer) {
 2694			struct tg3 *tp_peer = netdev_priv(dev_peer);
 2695
 2696			if (tg3_flag(tp_peer, INIT_COMPLETE))
 2697				return;
 2698
 2699			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
 2700			    tg3_flag(tp_peer, ENABLE_ASF))
 2701				need_vaux = true;
 2702		}
 2703	}
 2704
 2705	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
 2706	    tg3_flag(tp, ENABLE_ASF))
 2707		need_vaux = true;
 2708
 2709	if (need_vaux)
 2710		tg3_pwrsrc_switch_to_vaux(tp);
 2711	else
 2712		tg3_pwrsrc_die_with_vmain(tp);
 2713}
 2714
 2715static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
 2716{
 2717	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
 2718		return 1;
 2719	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
 2720		if (speed != SPEED_10)
 2721			return 1;
 2722	} else if (speed == SPEED_10)
 2723		return 1;
 2724
 2725	return 0;
 2726}
 2727
 2728static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
 2729{
 2730	u32 val;
 2731
 2732	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 2733		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 2734			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
 2735			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
 2736
 2737			sg_dig_ctrl |=
 2738				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
 2739			tw32(SG_DIG_CTRL, sg_dig_ctrl);
 2740			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
 2741		}
 2742		return;
 2743	}
 2744
 2745	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 2746		tg3_bmcr_reset(tp);
 2747		val = tr32(GRC_MISC_CFG);
 2748		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
 2749		udelay(40);
 2750		return;
 2751	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 2752		u32 phytest;
 2753		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
 2754			u32 phy;
 2755
 2756			tg3_writephy(tp, MII_ADVERTISE, 0);
 2757			tg3_writephy(tp, MII_BMCR,
 2758				     BMCR_ANENABLE | BMCR_ANRESTART);
 2759
 2760			tg3_writephy(tp, MII_TG3_FET_TEST,
 2761				     phytest | MII_TG3_FET_SHADOW_EN);
 2762			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
 2763				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
 2764				tg3_writephy(tp,
 2765					     MII_TG3_FET_SHDW_AUXMODE4,
 2766					     phy);
 2767			}
 2768			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
 2769		}
 2770		return;
 2771	} else if (do_low_power) {
 2772		tg3_writephy(tp, MII_TG3_EXT_CTRL,
 2773			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
 2774
 2775		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 2776		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
 2777		      MII_TG3_AUXCTL_PCTL_VREG_11V;
 2778		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
 2779	}
 2780
 2781	/* The PHY should not be powered down on some chips because
 2782	 * of bugs.
 2783	 */
 2784	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 2785	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 2786	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
 2787	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
 2788	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
 2789	     !tp->pci_fn))
 2790		return;
 2791
 2792	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
 2793	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
 2794		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
 2795		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
 2796		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
 2797		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
 2798	}
 2799
 2800	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
 2801}
 2802
 2803/* tp->lock is held. */
 2804static int tg3_nvram_lock(struct tg3 *tp)
 2805{
 2806	if (tg3_flag(tp, NVRAM)) {
 2807		int i;
 2808
 2809		if (tp->nvram_lock_cnt == 0) {
 2810			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
 2811			for (i = 0; i < 8000; i++) {
 2812				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
 2813					break;
 2814				udelay(20);
 2815			}
 2816			if (i == 8000) {
 2817				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
 2818				return -ENODEV;
 2819			}
 2820		}
 2821		tp->nvram_lock_cnt++;
 2822	}
 2823	return 0;
 2824}
 2825
 2826/* tp->lock is held. */
 2827static void tg3_nvram_unlock(struct tg3 *tp)
 2828{
 2829	if (tg3_flag(tp, NVRAM)) {
 2830		if (tp->nvram_lock_cnt > 0)
 2831			tp->nvram_lock_cnt--;
 2832		if (tp->nvram_lock_cnt == 0)
 2833			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
 2834	}
 2835}
 2836
 2837/* tp->lock is held. */
 2838static void tg3_enable_nvram_access(struct tg3 *tp)
 2839{
 2840	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 2841		u32 nvaccess = tr32(NVRAM_ACCESS);
 2842
 2843		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
 2844	}
 2845}
 2846
 2847/* tp->lock is held. */
 2848static void tg3_disable_nvram_access(struct tg3 *tp)
 2849{
 2850	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
 2851		u32 nvaccess = tr32(NVRAM_ACCESS);
 2852
 2853		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
 2854	}
 2855}
 2856
 2857static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
 2858					u32 offset, u32 *val)
 2859{
 2860	u32 tmp;
 2861	int i;
 2862
 2863	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
 2864		return -EINVAL;
 2865
 2866	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
 2867					EEPROM_ADDR_DEVID_MASK |
 2868					EEPROM_ADDR_READ);
 2869	tw32(GRC_EEPROM_ADDR,
 2870	     tmp |
 2871	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
 2872	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
 2873	      EEPROM_ADDR_ADDR_MASK) |
 2874	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
 2875
 2876	for (i = 0; i < 1000; i++) {
 2877		tmp = tr32(GRC_EEPROM_ADDR);
 2878
 2879		if (tmp & EEPROM_ADDR_COMPLETE)
 2880			break;
 2881		msleep(1);
 2882	}
 2883	if (!(tmp & EEPROM_ADDR_COMPLETE))
 2884		return -EBUSY;
 2885
 2886	tmp = tr32(GRC_EEPROM_DATA);
 2887
 2888	/*
 2889	 * The data will always be opposite the native endian
 2890	 * format.  Perform a blind byteswap to compensate.
 2891	 */
 2892	*val = swab32(tmp);
 2893
 2894	return 0;
 2895}
 2896
 2897#define NVRAM_CMD_TIMEOUT 10000
 2898
 2899static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 2900{
 2901	int i;
 2902
 2903	tw32(NVRAM_CMD, nvram_cmd);
 2904	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
 2905		udelay(10);
 2906		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
 2907			udelay(10);
 2908			break;
 2909		}
 2910	}
 2911
 2912	if (i == NVRAM_CMD_TIMEOUT)
 2913		return -EBUSY;
 2914
 2915	return 0;
 2916}
 2917
 2918static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
 2919{
 2920	if (tg3_flag(tp, NVRAM) &&
 2921	    tg3_flag(tp, NVRAM_BUFFERED) &&
 2922	    tg3_flag(tp, FLASH) &&
 2923	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 2924	    (tp->nvram_jedecnum == JEDEC_ATMEL))
 2925
 2926		addr = ((addr / tp->nvram_pagesize) <<
 2927			ATMEL_AT45DB0X1B_PAGE_POS) +
 2928		       (addr % tp->nvram_pagesize);
 2929
 2930	return addr;
 2931}
 2932
 2933static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
 2934{
 2935	if (tg3_flag(tp, NVRAM) &&
 2936	    tg3_flag(tp, NVRAM_BUFFERED) &&
 2937	    tg3_flag(tp, FLASH) &&
 2938	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
 2939	    (tp->nvram_jedecnum == JEDEC_ATMEL))
 2940
 2941		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
 2942			tp->nvram_pagesize) +
 2943		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
 2944
 2945	return addr;
 2946}
 2947
 2948/* NOTE: Data read in from NVRAM is byteswapped according to
 2949 * the byteswapping settings for all other register accesses.
 2950 * tg3 devices are BE devices, so on a BE machine, the data
 2951 * returned will be exactly as it is seen in NVRAM.  On a LE
 2952 * machine, the 32-bit value will be byteswapped.
 2953 */
 2954static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
 2955{
 2956	int ret;
 2957
 2958	if (!tg3_flag(tp, NVRAM))
 2959		return tg3_nvram_read_using_eeprom(tp, offset, val);
 2960
 2961	offset = tg3_nvram_phys_addr(tp, offset);
 2962
 2963	if (offset > NVRAM_ADDR_MSK)
 2964		return -EINVAL;
 2965
 2966	ret = tg3_nvram_lock(tp);
 2967	if (ret)
 2968		return ret;
 2969
 2970	tg3_enable_nvram_access(tp);
 2971
 2972	tw32(NVRAM_ADDR, offset);
 2973	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
 2974		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
 2975
 2976	if (ret == 0)
 2977		*val = tr32(NVRAM_RDDATA);
 2978
 2979	tg3_disable_nvram_access(tp);
 2980
 2981	tg3_nvram_unlock(tp);
 2982
 2983	return ret;
 2984}
 2985
 2986/* Ensures NVRAM data is in bytestream format. */
 2987static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
 2988{
 2989	u32 v;
 2990	int res = tg3_nvram_read(tp, offset, &v);
 2991	if (!res)
 2992		*val = cpu_to_be32(v);
 2993	return res;
 2994}
 2995
 2996static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
 2997				    u32 offset, u32 len, u8 *buf)
 2998{
 2999	int i, j, rc = 0;
 3000	u32 val;
 3001
 3002	for (i = 0; i < len; i += 4) {
 3003		u32 addr;
 3004		__be32 data;
 3005
 3006		addr = offset + i;
 3007
 3008		memcpy(&data, buf + i, 4);
 3009
 3010		/*
 3011		 * The SEEPROM interface expects the data to always be opposite
 3012		 * the native endian format.  We accomplish this by reversing
 3013		 * all the operations that would have been performed on the
 3014		 * data from a call to tg3_nvram_read_be32().
 3015		 */
 3016		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
 3017
 3018		val = tr32(GRC_EEPROM_ADDR);
 3019		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
 3020
 3021		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
 3022			EEPROM_ADDR_READ);
 3023		tw32(GRC_EEPROM_ADDR, val |
 3024			(0 << EEPROM_ADDR_DEVID_SHIFT) |
 3025			(addr & EEPROM_ADDR_ADDR_MASK) |
 3026			EEPROM_ADDR_START |
 3027			EEPROM_ADDR_WRITE);
 3028
 3029		for (j = 0; j < 1000; j++) {
 3030			val = tr32(GRC_EEPROM_ADDR);
 3031
 3032			if (val & EEPROM_ADDR_COMPLETE)
 3033				break;
 3034			msleep(1);
 3035		}
 3036		if (!(val & EEPROM_ADDR_COMPLETE)) {
 3037			rc = -EBUSY;
 3038			break;
 3039		}
 3040	}
 3041
 3042	return rc;
 3043}
 3044
 3045/* offset and length are dword aligned */
 3046static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
 3047		u8 *buf)
 3048{
 3049	int ret = 0;
 3050	u32 pagesize = tp->nvram_pagesize;
 3051	u32 pagemask = pagesize - 1;
 3052	u32 nvram_cmd;
 3053	u8 *tmp;
 3054
 3055	tmp = kmalloc(pagesize, GFP_KERNEL);
 3056	if (tmp == NULL)
 3057		return -ENOMEM;
 3058
 3059	while (len) {
 3060		int j;
 3061		u32 phy_addr, page_off, size;
 3062
 3063		phy_addr = offset & ~pagemask;
 3064
 3065		for (j = 0; j < pagesize; j += 4) {
 3066			ret = tg3_nvram_read_be32(tp, phy_addr + j,
 3067						  (__be32 *) (tmp + j));
 3068			if (ret)
 3069				break;
 3070		}
 3071		if (ret)
 3072			break;
 3073
 3074		page_off = offset & pagemask;
 3075		size = pagesize;
 3076		if (len < size)
 3077			size = len;
 3078
 3079		len -= size;
 3080
 3081		memcpy(tmp + page_off, buf, size);
 3082
 3083		offset = offset + (pagesize - page_off);
 3084
 3085		tg3_enable_nvram_access(tp);
 3086
 3087		/*
 3088		 * Before we can erase the flash page, we need
 3089		 * to issue a special "write enable" command.
 3090		 */
 3091		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3092
 3093		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3094			break;
 3095
 3096		/* Erase the target page */
 3097		tw32(NVRAM_ADDR, phy_addr);
 3098
 3099		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
 3100			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
 3101
 3102		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3103			break;
 3104
 3105		/* Issue another write enable to start the write. */
 3106		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3107
 3108		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
 3109			break;
 3110
 3111		for (j = 0; j < pagesize; j += 4) {
 3112			__be32 data;
 3113
 3114			data = *((__be32 *) (tmp + j));
 3115
 3116			tw32(NVRAM_WRDATA, be32_to_cpu(data));
 3117
 3118			tw32(NVRAM_ADDR, phy_addr + j);
 3119
 3120			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
 3121				NVRAM_CMD_WR;
 3122
 3123			if (j == 0)
 3124				nvram_cmd |= NVRAM_CMD_FIRST;
 3125			else if (j == (pagesize - 4))
 3126				nvram_cmd |= NVRAM_CMD_LAST;
 3127
 3128			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
 3129			if (ret)
 3130				break;
 3131		}
 3132		if (ret)
 3133			break;
 3134	}
 3135
 3136	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3137	tg3_nvram_exec_cmd(tp, nvram_cmd);
 3138
 3139	kfree(tmp);
 3140
 3141	return ret;
 3142}
 3143
 3144/* offset and length are dword aligned */
 3145static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
 3146		u8 *buf)
 3147{
 3148	int i, ret = 0;
 3149
 3150	for (i = 0; i < len; i += 4, offset += 4) {
 3151		u32 page_off, phy_addr, nvram_cmd;
 3152		__be32 data;
 3153
 3154		memcpy(&data, buf + i, 4);
 3155		tw32(NVRAM_WRDATA, be32_to_cpu(data));
 3156
 3157		page_off = offset % tp->nvram_pagesize;
 3158
 3159		phy_addr = tg3_nvram_phys_addr(tp, offset);
 3160
 3161		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
 3162
 3163		if (page_off == 0 || i == 0)
 3164			nvram_cmd |= NVRAM_CMD_FIRST;
 3165		if (page_off == (tp->nvram_pagesize - 4))
 3166			nvram_cmd |= NVRAM_CMD_LAST;
 3167
 3168		if (i == (len - 4))
 3169			nvram_cmd |= NVRAM_CMD_LAST;
 3170
 3171		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
 3172		    !tg3_flag(tp, FLASH) ||
 3173		    !tg3_flag(tp, 57765_PLUS))
 3174			tw32(NVRAM_ADDR, phy_addr);
 3175
 3176		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
 3177		    !tg3_flag(tp, 5755_PLUS) &&
 3178		    (tp->nvram_jedecnum == JEDEC_ST) &&
 3179		    (nvram_cmd & NVRAM_CMD_FIRST)) {
 3180			u32 cmd;
 3181
 3182			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
 3183			ret = tg3_nvram_exec_cmd(tp, cmd);
 3184			if (ret)
 3185				break;
 3186		}
 3187		if (!tg3_flag(tp, FLASH)) {
 3188			/* We always do complete word writes to eeprom. */
 3189			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
 3190		}
 3191
 3192		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
 3193		if (ret)
 3194			break;
 3195	}
 3196	return ret;
 3197}
 3198
 3199/* offset and length are dword aligned */
 3200static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
 3201{
 3202	int ret;
 3203
 3204	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 3205		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
 3206		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
 3207		udelay(40);
 3208	}
 3209
 3210	if (!tg3_flag(tp, NVRAM)) {
 3211		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
 3212	} else {
 3213		u32 grc_mode;
 3214
 3215		ret = tg3_nvram_lock(tp);
 3216		if (ret)
 3217			return ret;
 3218
 3219		tg3_enable_nvram_access(tp);
 3220		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
 3221			tw32(NVRAM_WRITE1, 0x406);
 3222
 3223		grc_mode = tr32(GRC_MODE);
 3224		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
 3225
 3226		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
 3227			ret = tg3_nvram_write_block_buffered(tp, offset, len,
 3228				buf);
 3229		} else {
 3230			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
 3231				buf);
 3232		}
 3233
 3234		grc_mode = tr32(GRC_MODE);
 3235		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
 3236
 3237		tg3_disable_nvram_access(tp);
 3238		tg3_nvram_unlock(tp);
 3239	}
 3240
 3241	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
 3242		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 3243		udelay(40);
 3244	}
 3245
 3246	return ret;
 3247}
 3248
 3249#define RX_CPU_SCRATCH_BASE	0x30000
 3250#define RX_CPU_SCRATCH_SIZE	0x04000
 3251#define TX_CPU_SCRATCH_BASE	0x34000
 3252#define TX_CPU_SCRATCH_SIZE	0x04000
 3253
 3254/* tp->lock is held. */
 3255static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
 3256{
 3257	int i;
 3258
 3259	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
 3260
 3261	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 3262		u32 val = tr32(GRC_VCPU_EXT_CTRL);
 3263
 3264		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
 3265		return 0;
 3266	}
 3267	if (offset == RX_CPU_BASE) {
 3268		for (i = 0; i < 10000; i++) {
 3269			tw32(offset + CPU_STATE, 0xffffffff);
 3270			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
 3271			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
 3272				break;
 3273		}
 3274
 3275		tw32(offset + CPU_STATE, 0xffffffff);
 3276		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
 3277		udelay(10);
 3278	} else {
 3279		for (i = 0; i < 10000; i++) {
 3280			tw32(offset + CPU_STATE, 0xffffffff);
 3281			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
 3282			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
 3283				break;
 3284		}
 3285	}
 3286
 3287	if (i >= 10000) {
 3288		netdev_err(tp->dev, "%s timed out, %s CPU\n",
 3289			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
 3290		return -ENODEV;
 3291	}
 3292
 3293	/* Clear firmware's nvram arbitration. */
 3294	if (tg3_flag(tp, NVRAM))
 3295		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
 3296	return 0;
 3297}
 3298
 3299struct fw_info {
 3300	unsigned int fw_base;
 3301	unsigned int fw_len;
 3302	const __be32 *fw_data;
 3303};
 3304
 3305/* tp->lock is held. */
 3306static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
 3307				 u32 cpu_scratch_base, int cpu_scratch_size,
 3308				 struct fw_info *info)
 3309{
 3310	int err, lock_err, i;
 3311	void (*write_op)(struct tg3 *, u32, u32);
 3312
 3313	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
 3314		netdev_err(tp->dev,
 3315			   "%s: Trying to load TX cpu firmware which is 5705\n",
 3316			   __func__);
 3317		return -EINVAL;
 3318	}
 3319
 3320	if (tg3_flag(tp, 5705_PLUS))
 3321		write_op = tg3_write_mem;
 3322	else
 3323		write_op = tg3_write_indirect_reg32;
 3324
 3325	/* It is possible that bootcode is still loading at this point.
 3326	 * Get the nvram lock first before halting the cpu.
 3327	 */
 3328	lock_err = tg3_nvram_lock(tp);
 3329	err = tg3_halt_cpu(tp, cpu_base);
 3330	if (!lock_err)
 3331		tg3_nvram_unlock(tp);
 3332	if (err)
 3333		goto out;
 3334
 3335	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
 3336		write_op(tp, cpu_scratch_base + i, 0);
 3337	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3338	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
 3339	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
 3340		write_op(tp, (cpu_scratch_base +
 3341			      (info->fw_base & 0xffff) +
 3342			      (i * sizeof(u32))),
 3343			      be32_to_cpu(info->fw_data[i]));
 3344
 3345	err = 0;
 3346
 3347out:
 3348	return err;
 3349}
 3350
 3351/* tp->lock is held. */
 3352static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
 3353{
 3354	struct fw_info info;
 3355	const __be32 *fw_data;
 3356	int err, i;
 3357
 3358	fw_data = (void *)tp->fw->data;
 3359
 3360	/* Firmware blob starts with version numbers, followed by
 3361	   start address and length. We are setting complete length.
 3362	   length = end_address_of_bss - start_address_of_text.
 3363	   Remainder is the blob to be loaded contiguously
 3364	   from start address. */
 3365
 3366	info.fw_base = be32_to_cpu(fw_data[1]);
 3367	info.fw_len = tp->fw->size - 12;
 3368	info.fw_data = &fw_data[3];
 3369
 3370	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
 3371				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
 3372				    &info);
 3373	if (err)
 3374		return err;
 3375
 3376	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
 3377				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
 3378				    &info);
 3379	if (err)
 3380		return err;
 3381
 3382	/* Now startup only the RX cpu. */
 3383	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 3384	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
 3385
 3386	for (i = 0; i < 5; i++) {
 3387		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
 3388			break;
 3389		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 3390		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
 3391		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
 3392		udelay(1000);
 3393	}
 3394	if (i >= 5) {
 3395		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
 3396			   "should be %08x\n", __func__,
 3397			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
 3398		return -ENODEV;
 3399	}
 3400	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
 3401	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
 3402
 3403	return 0;
 3404}
 3405
 3406/* tp->lock is held. */
 3407static int tg3_load_tso_firmware(struct tg3 *tp)
 3408{
 3409	struct fw_info info;
 3410	const __be32 *fw_data;
 3411	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
 3412	int err, i;
 3413
 3414	if (tg3_flag(tp, HW_TSO_1) ||
 3415	    tg3_flag(tp, HW_TSO_2) ||
 3416	    tg3_flag(tp, HW_TSO_3))
 3417		return 0;
 3418
 3419	fw_data = (void *)tp->fw->data;
 3420
 3421	/* Firmware blob starts with version numbers, followed by
 3422	   start address and length. We are setting complete length.
 3423	   length = end_address_of_bss - start_address_of_text.
 3424	   Remainder is the blob to be loaded contiguously
 3425	   from start address. */
 3426
 3427	info.fw_base = be32_to_cpu(fw_data[1]);
 3428	cpu_scratch_size = tp->fw_len;
 3429	info.fw_len = tp->fw->size - 12;
 3430	info.fw_data = &fw_data[3];
 3431
 3432	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 3433		cpu_base = RX_CPU_BASE;
 3434		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
 3435	} else {
 3436		cpu_base = TX_CPU_BASE;
 3437		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
 3438		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
 3439	}
 3440
 3441	err = tg3_load_firmware_cpu(tp, cpu_base,
 3442				    cpu_scratch_base, cpu_scratch_size,
 3443				    &info);
 3444	if (err)
 3445		return err;
 3446
 3447	/* Now startup the cpu. */
 3448	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3449	tw32_f(cpu_base + CPU_PC, info.fw_base);
 3450
 3451	for (i = 0; i < 5; i++) {
 3452		if (tr32(cpu_base + CPU_PC) == info.fw_base)
 3453			break;
 3454		tw32(cpu_base + CPU_STATE, 0xffffffff);
 3455		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 3456		tw32_f(cpu_base + CPU_PC, info.fw_base);
 3457		udelay(1000);
 3458	}
 3459	if (i >= 5) {
 3460		netdev_err(tp->dev,
 3461			   "%s fails to set CPU PC, is %08x should be %08x\n",
 3462			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
 3463		return -ENODEV;
 3464	}
 3465	tw32(cpu_base + CPU_STATE, 0xffffffff);
 3466	tw32_f(cpu_base + CPU_MODE,  0x00000000);
 3467	return 0;
 3468}
 3469
 3470
 3471/* tp->lock is held. */
 3472static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
 3473{
 3474	u32 addr_high, addr_low;
 3475	int i;
 3476
 3477	addr_high = ((tp->dev->dev_addr[0] << 8) |
 3478		     tp->dev->dev_addr[1]);
 3479	addr_low = ((tp->dev->dev_addr[2] << 24) |
 3480		    (tp->dev->dev_addr[3] << 16) |
 3481		    (tp->dev->dev_addr[4] <<  8) |
 3482		    (tp->dev->dev_addr[5] <<  0));
 3483	for (i = 0; i < 4; i++) {
 3484		if (i == 1 && skip_mac_1)
 3485			continue;
 3486		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
 3487		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
 3488	}
 3489
 3490	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 3491	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 3492		for (i = 0; i < 12; i++) {
 3493			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
 3494			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
 3495		}
 3496	}
 3497
 3498	addr_high = (tp->dev->dev_addr[0] +
 3499		     tp->dev->dev_addr[1] +
 3500		     tp->dev->dev_addr[2] +
 3501		     tp->dev->dev_addr[3] +
 3502		     tp->dev->dev_addr[4] +
 3503		     tp->dev->dev_addr[5]) &
 3504		TX_BACKOFF_SEED_MASK;
 3505	tw32(MAC_TX_BACKOFF_SEED, addr_high);
 3506}
 3507
 3508static void tg3_enable_register_access(struct tg3 *tp)
 3509{
 3510	/*
 3511	 * Make sure register accesses (indirect or otherwise) will function
 3512	 * correctly.
 3513	 */
 3514	pci_write_config_dword(tp->pdev,
 3515			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
 3516}
 3517
 3518static int tg3_power_up(struct tg3 *tp)
 3519{
 3520	int err;
 3521
 3522	tg3_enable_register_access(tp);
 3523
 3524	err = pci_set_power_state(tp->pdev, PCI_D0);
 3525	if (!err) {
 3526		/* Switch out of Vaux if it is a NIC */
 3527		tg3_pwrsrc_switch_to_vmain(tp);
 3528	} else {
 3529		netdev_err(tp->dev, "Transition to D0 failed\n");
 3530	}
 3531
 3532	return err;
 3533}
 3534
 3535static int tg3_setup_phy(struct tg3 *, int);
 3536
 3537static int tg3_power_down_prepare(struct tg3 *tp)
 3538{
 3539	u32 misc_host_ctrl;
 3540	bool device_should_wake, do_low_power;
 3541
 3542	tg3_enable_register_access(tp);
 3543
 3544	/* Restore the CLKREQ setting. */
 3545	if (tg3_flag(tp, CLKREQ_BUG)) {
 3546		u16 lnkctl;
 3547
 3548		pci_read_config_word(tp->pdev,
 3549				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 3550				     &lnkctl);
 3551		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
 3552		pci_write_config_word(tp->pdev,
 3553				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 3554				      lnkctl);
 3555	}
 3556
 3557	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 3558	tw32(TG3PCI_MISC_HOST_CTRL,
 3559	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
 3560
 3561	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
 3562			     tg3_flag(tp, WOL_ENABLE);
 3563
 3564	if (tg3_flag(tp, USE_PHYLIB)) {
 3565		do_low_power = false;
 3566		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
 3567		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 3568			struct phy_device *phydev;
 3569			u32 phyid, advertising;
 3570
 3571			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 3572
 3573			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 3574
 3575			tp->link_config.speed = phydev->speed;
 3576			tp->link_config.duplex = phydev->duplex;
 3577			tp->link_config.autoneg = phydev->autoneg;
 3578			tp->link_config.advertising = phydev->advertising;
 3579
 3580			advertising = ADVERTISED_TP |
 3581				      ADVERTISED_Pause |
 3582				      ADVERTISED_Autoneg |
 3583				      ADVERTISED_10baseT_Half;
 3584
 3585			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
 3586				if (tg3_flag(tp, WOL_SPEED_100MB))
 3587					advertising |=
 3588						ADVERTISED_100baseT_Half |
 3589						ADVERTISED_100baseT_Full |
 3590						ADVERTISED_10baseT_Full;
 3591				else
 3592					advertising |= ADVERTISED_10baseT_Full;
 3593			}
 3594
 3595			phydev->advertising = advertising;
 3596
 3597			phy_start_aneg(phydev);
 3598
 3599			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
 3600			if (phyid != PHY_ID_BCMAC131) {
 3601				phyid &= PHY_BCM_OUI_MASK;
 3602				if (phyid == PHY_BCM_OUI_1 ||
 3603				    phyid == PHY_BCM_OUI_2 ||
 3604				    phyid == PHY_BCM_OUI_3)
 3605					do_low_power = true;
 3606			}
 3607		}
 3608	} else {
 3609		do_low_power = true;
 3610
 3611		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
 3612			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 3613
 3614		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
 3615			tg3_setup_phy(tp, 0);
 3616	}
 3617
 3618	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 3619		u32 val;
 3620
 3621		val = tr32(GRC_VCPU_EXT_CTRL);
 3622		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
 3623	} else if (!tg3_flag(tp, ENABLE_ASF)) {
 3624		int i;
 3625		u32 val;
 3626
 3627		for (i = 0; i < 200; i++) {
 3628			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
 3629			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 3630				break;
 3631			msleep(1);
 3632		}
 3633	}
 3634	if (tg3_flag(tp, WOL_CAP))
 3635		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
 3636						     WOL_DRV_STATE_SHUTDOWN |
 3637						     WOL_DRV_WOL |
 3638						     WOL_SET_MAGIC_PKT);
 3639
 3640	if (device_should_wake) {
 3641		u32 mac_mode;
 3642
 3643		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 3644			if (do_low_power &&
 3645			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 3646				tg3_phy_auxctl_write(tp,
 3647					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
 3648					       MII_TG3_AUXCTL_PCTL_WOL_EN |
 3649					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
 3650					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
 3651				udelay(40);
 3652			}
 3653
 3654			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 3655				mac_mode = MAC_MODE_PORT_MODE_GMII;
 3656			else
 3657				mac_mode = MAC_MODE_PORT_MODE_MII;
 3658
 3659			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
 3660			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
 3661			    ASIC_REV_5700) {
 3662				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
 3663					     SPEED_100 : SPEED_10;
 3664				if (tg3_5700_link_polarity(tp, speed))
 3665					mac_mode |= MAC_MODE_LINK_POLARITY;
 3666				else
 3667					mac_mode &= ~MAC_MODE_LINK_POLARITY;
 3668			}
 3669		} else {
 3670			mac_mode = MAC_MODE_PORT_MODE_TBI;
 3671		}
 3672
 3673		if (!tg3_flag(tp, 5750_PLUS))
 3674			tw32(MAC_LED_CTRL, tp->led_ctrl);
 3675
 3676		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
 3677		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
 3678		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
 3679			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
 3680
 3681		if (tg3_flag(tp, ENABLE_APE))
 3682			mac_mode |= MAC_MODE_APE_TX_EN |
 3683				    MAC_MODE_APE_RX_EN |
 3684				    MAC_MODE_TDE_ENABLE;
 3685
 3686		tw32_f(MAC_MODE, mac_mode);
 3687		udelay(100);
 3688
 3689		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
 3690		udelay(10);
 3691	}
 3692
 3693	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
 3694	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 3695	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
 3696		u32 base_val;
 3697
 3698		base_val = tp->pci_clock_ctrl;
 3699		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
 3700			     CLOCK_CTRL_TXCLK_DISABLE);
 3701
 3702		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
 3703			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
 3704	} else if (tg3_flag(tp, 5780_CLASS) ||
 3705		   tg3_flag(tp, CPMU_PRESENT) ||
 3706		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 3707		/* do nothing */
 3708	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
 3709		u32 newbits1, newbits2;
 3710
 3711		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 3712		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 3713			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
 3714				    CLOCK_CTRL_TXCLK_DISABLE |
 3715				    CLOCK_CTRL_ALTCLK);
 3716			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 3717		} else if (tg3_flag(tp, 5705_PLUS)) {
 3718			newbits1 = CLOCK_CTRL_625_CORE;
 3719			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
 3720		} else {
 3721			newbits1 = CLOCK_CTRL_ALTCLK;
 3722			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
 3723		}
 3724
 3725		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
 3726			    40);
 3727
 3728		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
 3729			    40);
 3730
 3731		if (!tg3_flag(tp, 5705_PLUS)) {
 3732			u32 newbits3;
 3733
 3734			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 3735			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 3736				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
 3737					    CLOCK_CTRL_TXCLK_DISABLE |
 3738					    CLOCK_CTRL_44MHZ_CORE);
 3739			} else {
 3740				newbits3 = CLOCK_CTRL_44MHZ_CORE;
 3741			}
 3742
 3743			tw32_wait_f(TG3PCI_CLOCK_CTRL,
 3744				    tp->pci_clock_ctrl | newbits3, 40);
 3745		}
 3746	}
 3747
 3748	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
 3749		tg3_power_down_phy(tp, do_low_power);
 3750
 3751	tg3_frob_aux_power(tp, true);
 3752
 3753	/* Workaround for unstable PLL clock */
 3754	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
 3755	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
 3756		u32 val = tr32(0x7d00);
 3757
 3758		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
 3759		tw32(0x7d00, val);
 3760		if (!tg3_flag(tp, ENABLE_ASF)) {
 3761			int err;
 3762
 3763			err = tg3_nvram_lock(tp);
 3764			tg3_halt_cpu(tp, RX_CPU_BASE);
 3765			if (!err)
 3766				tg3_nvram_unlock(tp);
 3767		}
 3768	}
 3769
 3770	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
 3771
 3772	return 0;
 3773}
 3774
 3775static void tg3_power_down(struct tg3 *tp)
 3776{
 3777	tg3_power_down_prepare(tp);
 3778
 3779	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
 3780	pci_set_power_state(tp->pdev, PCI_D3hot);
 3781}
 3782
 3783static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
 3784{
 3785	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
 3786	case MII_TG3_AUX_STAT_10HALF:
 3787		*speed = SPEED_10;
 3788		*duplex = DUPLEX_HALF;
 3789		break;
 3790
 3791	case MII_TG3_AUX_STAT_10FULL:
 3792		*speed = SPEED_10;
 3793		*duplex = DUPLEX_FULL;
 3794		break;
 3795
 3796	case MII_TG3_AUX_STAT_100HALF:
 3797		*speed = SPEED_100;
 3798		*duplex = DUPLEX_HALF;
 3799		break;
 3800
 3801	case MII_TG3_AUX_STAT_100FULL:
 3802		*speed = SPEED_100;
 3803		*duplex = DUPLEX_FULL;
 3804		break;
 3805
 3806	case MII_TG3_AUX_STAT_1000HALF:
 3807		*speed = SPEED_1000;
 3808		*duplex = DUPLEX_HALF;
 3809		break;
 3810
 3811	case MII_TG3_AUX_STAT_1000FULL:
 3812		*speed = SPEED_1000;
 3813		*duplex = DUPLEX_FULL;
 3814		break;
 3815
 3816	default:
 3817		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 3818			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
 3819				 SPEED_10;
 3820			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
 3821				  DUPLEX_HALF;
 3822			break;
 3823		}
 3824		*speed = SPEED_UNKNOWN;
 3825		*duplex = DUPLEX_UNKNOWN;
 3826		break;
 3827	}
 3828}
 3829
 3830static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
 3831{
 3832	int err = 0;
 3833	u32 val, new_adv;
 3834
 3835	new_adv = ADVERTISE_CSMA;
 3836	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
 3837	new_adv |= mii_advertise_flowctrl(flowctrl);
 3838
 3839	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
 3840	if (err)
 3841		goto done;
 3842
 3843	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 3844		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
 3845
 3846		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 3847		    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
 3848			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 3849
 3850		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
 3851		if (err)
 3852			goto done;
 3853	}
 3854
 3855	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
 3856		goto done;
 3857
 3858	tw32(TG3_CPMU_EEE_MODE,
 3859	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
 3860
 3861	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
 3862	if (!err) {
 3863		u32 err2;
 3864
 3865		val = 0;
 3866		/* Advertise 100-BaseTX EEE ability */
 3867		if (advertise & ADVERTISED_100baseT_Full)
 3868			val |= MDIO_AN_EEE_ADV_100TX;
 3869		/* Advertise 1000-BaseT EEE ability */
 3870		if (advertise & ADVERTISED_1000baseT_Full)
 3871			val |= MDIO_AN_EEE_ADV_1000T;
 3872		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
 3873		if (err)
 3874			val = 0;
 3875
 3876		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
 3877		case ASIC_REV_5717:
 3878		case ASIC_REV_57765:
 3879		case ASIC_REV_57766:
 3880		case ASIC_REV_5719:
 3881			/* If we advertised any eee advertisements above... */
 3882			if (val)
 3883				val = MII_TG3_DSP_TAP26_ALNOKO |
 3884				      MII_TG3_DSP_TAP26_RMRXSTO |
 3885				      MII_TG3_DSP_TAP26_OPCSINPT;
 3886			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
 3887			/* Fall through */
 3888		case ASIC_REV_5720:
 3889			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
 3890				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
 3891						 MII_TG3_DSP_CH34TP2_HIBW01);
 3892		}
 3893
 3894		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
 3895		if (!err)
 3896			err = err2;
 3897	}
 3898
 3899done:
 3900	return err;
 3901}
 3902
 3903static void tg3_phy_copper_begin(struct tg3 *tp)
 3904{
 3905	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
 3906	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 3907		u32 adv, fc;
 3908
 3909		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
 3910			adv = ADVERTISED_10baseT_Half |
 3911			      ADVERTISED_10baseT_Full;
 3912			if (tg3_flag(tp, WOL_SPEED_100MB))
 3913				adv |= ADVERTISED_100baseT_Half |
 3914				       ADVERTISED_100baseT_Full;
 3915
 3916			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
 3917		} else {
 3918			adv = tp->link_config.advertising;
 3919			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 3920				adv &= ~(ADVERTISED_1000baseT_Half |
 3921					 ADVERTISED_1000baseT_Full);
 3922
 3923			fc = tp->link_config.flowctrl;
 3924		}
 3925
 3926		tg3_phy_autoneg_cfg(tp, adv, fc);
 3927
 3928		tg3_writephy(tp, MII_BMCR,
 3929			     BMCR_ANENABLE | BMCR_ANRESTART);
 3930	} else {
 3931		int i;
 3932		u32 bmcr, orig_bmcr;
 3933
 3934		tp->link_config.active_speed = tp->link_config.speed;
 3935		tp->link_config.active_duplex = tp->link_config.duplex;
 3936
 3937		bmcr = 0;
 3938		switch (tp->link_config.speed) {
 3939		default:
 3940		case SPEED_10:
 3941			break;
 3942
 3943		case SPEED_100:
 3944			bmcr |= BMCR_SPEED100;
 3945			break;
 3946
 3947		case SPEED_1000:
 3948			bmcr |= BMCR_SPEED1000;
 3949			break;
 3950		}
 3951
 3952		if (tp->link_config.duplex == DUPLEX_FULL)
 3953			bmcr |= BMCR_FULLDPLX;
 3954
 3955		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
 3956		    (bmcr != orig_bmcr)) {
 3957			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
 3958			for (i = 0; i < 1500; i++) {
 3959				u32 tmp;
 3960
 3961				udelay(10);
 3962				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
 3963				    tg3_readphy(tp, MII_BMSR, &tmp))
 3964					continue;
 3965				if (!(tmp & BMSR_LSTATUS)) {
 3966					udelay(40);
 3967					break;
 3968				}
 3969			}
 3970			tg3_writephy(tp, MII_BMCR, bmcr);
 3971			udelay(40);
 3972		}
 3973	}
 3974}
 3975
 3976static int tg3_init_5401phy_dsp(struct tg3 *tp)
 3977{
 3978	int err;
 3979
 3980	/* Turn off tap power management. */
 3981	/* Set Extended packet length bit */
 3982	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
 3983
 3984	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
 3985	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
 3986	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
 3987	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
 3988	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
 3989
 3990	udelay(40);
 3991
 3992	return err;
 3993}
 3994
 3995static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
 3996{
 3997	u32 advmsk, tgtadv, advertising;
 3998
 3999	advertising = tp->link_config.advertising;
 4000	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
 4001
 4002	advmsk = ADVERTISE_ALL;
 4003	if (tp->link_config.active_duplex == DUPLEX_FULL) {
 4004		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
 4005		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 4006	}
 4007
 4008	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
 4009		return false;
 4010
 4011	if ((*lcladv & advmsk) != tgtadv)
 4012		return false;
 4013
 4014	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4015		u32 tg3_ctrl;
 4016
 4017		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
 4018
 4019		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
 4020			return false;
 4021
 4022		if (tgtadv &&
 4023		    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 4024		     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
 4025			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
 4026			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
 4027				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
 4028		} else {
 4029			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
 4030		}
 4031
 4032		if (tg3_ctrl != tgtadv)
 4033			return false;
 4034	}
 4035
 4036	return true;
 4037}
 4038
 4039static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
 4040{
 4041	u32 lpeth = 0;
 4042
 4043	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
 4044		u32 val;
 4045
 4046		if (tg3_readphy(tp, MII_STAT1000, &val))
 4047			return false;
 4048
 4049		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
 4050	}
 4051
 4052	if (tg3_readphy(tp, MII_LPA, rmtadv))
 4053		return false;
 4054
 4055	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
 4056	tp->link_config.rmt_adv = lpeth;
 4057
 4058	return true;
 4059}
 4060
 4061static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
 4062{
 4063	int current_link_up;
 4064	u32 bmsr, val;
 4065	u32 lcl_adv, rmt_adv;
 4066	u16 current_speed;
 4067	u8 current_duplex;
 4068	int i, err;
 4069
 4070	tw32(MAC_EVENT, 0);
 4071
 4072	tw32_f(MAC_STATUS,
 4073	     (MAC_STATUS_SYNC_CHANGED |
 4074	      MAC_STATUS_CFG_CHANGED |
 4075	      MAC_STATUS_MI_COMPLETION |
 4076	      MAC_STATUS_LNKSTATE_CHANGED));
 4077	udelay(40);
 4078
 4079	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 4080		tw32_f(MAC_MI_MODE,
 4081		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 4082		udelay(80);
 4083	}
 4084
 4085	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
 4086
 4087	/* Some third-party PHYs need to be reset on link going
 4088	 * down.
 4089	 */
 4090	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
 4091	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
 4092	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
 4093	    netif_carrier_ok(tp->dev)) {
 4094		tg3_readphy(tp, MII_BMSR, &bmsr);
 4095		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4096		    !(bmsr & BMSR_LSTATUS))
 4097			force_reset = 1;
 4098	}
 4099	if (force_reset)
 4100		tg3_phy_reset(tp);
 4101
 4102	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
 4103		tg3_readphy(tp, MII_BMSR, &bmsr);
 4104		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
 4105		    !tg3_flag(tp, INIT_COMPLETE))
 4106			bmsr = 0;
 4107
 4108		if (!(bmsr & BMSR_LSTATUS)) {
 4109			err = tg3_init_5401phy_dsp(tp);
 4110			if (err)
 4111				return err;
 4112
 4113			tg3_readphy(tp, MII_BMSR, &bmsr);
 4114			for (i = 0; i < 1000; i++) {
 4115				udelay(10);
 4116				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4117				    (bmsr & BMSR_LSTATUS)) {
 4118					udelay(40);
 4119					break;
 4120				}
 4121			}
 4122
 4123			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
 4124			    TG3_PHY_REV_BCM5401_B0 &&
 4125			    !(bmsr & BMSR_LSTATUS) &&
 4126			    tp->link_config.active_speed == SPEED_1000) {
 4127				err = tg3_phy_reset(tp);
 4128				if (!err)
 4129					err = tg3_init_5401phy_dsp(tp);
 4130				if (err)
 4131					return err;
 4132			}
 4133		}
 4134	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
 4135		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
 4136		/* 5701 {A0,B0} CRC bug workaround */
 4137		tg3_writephy(tp, 0x15, 0x0a75);
 4138		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 4139		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
 4140		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
 4141	}
 4142
 4143	/* Clear pending interrupts... */
 4144	tg3_readphy(tp, MII_TG3_ISTAT, &val);
 4145	tg3_readphy(tp, MII_TG3_ISTAT, &val);
 4146
 4147	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
 4148		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
 4149	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
 4150		tg3_writephy(tp, MII_TG3_IMASK, ~0);
 4151
 4152	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
 4153	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
 4154		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
 4155			tg3_writephy(tp, MII_TG3_EXT_CTRL,
 4156				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 4157		else
 4158			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
 4159	}
 4160
 4161	current_link_up = 0;
 4162	current_speed = SPEED_UNKNOWN;
 4163	current_duplex = DUPLEX_UNKNOWN;
 4164	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
 4165	tp->link_config.rmt_adv = 0;
 4166
 4167	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
 4168		err = tg3_phy_auxctl_read(tp,
 4169					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 4170					  &val);
 4171		if (!err && !(val & (1 << 10))) {
 4172			tg3_phy_auxctl_write(tp,
 4173					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
 4174					     val | (1 << 10));
 4175			goto relink;
 4176		}
 4177	}
 4178
 4179	bmsr = 0;
 4180	for (i = 0; i < 100; i++) {
 4181		tg3_readphy(tp, MII_BMSR, &bmsr);
 4182		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
 4183		    (bmsr & BMSR_LSTATUS))
 4184			break;
 4185		udelay(40);
 4186	}
 4187
 4188	if (bmsr & BMSR_LSTATUS) {
 4189		u32 aux_stat, bmcr;
 4190
 4191		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
 4192		for (i = 0; i < 2000; i++) {
 4193			udelay(10);
 4194			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
 4195			    aux_stat)
 4196				break;
 4197		}
 4198
 4199		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
 4200					     &current_speed,
 4201					     &current_duplex);
 4202
 4203		bmcr = 0;
 4204		for (i = 0; i < 200; i++) {
 4205			tg3_readphy(tp, MII_BMCR, &bmcr);
 4206			if (tg3_readphy(tp, MII_BMCR, &bmcr))
 4207				continue;
 4208			if (bmcr && bmcr != 0x7fff)
 4209				break;
 4210			udelay(10);
 4211		}
 4212
 4213		lcl_adv = 0;
 4214		rmt_adv = 0;
 4215
 4216		tp->link_config.active_speed = current_speed;
 4217		tp->link_config.active_duplex = current_duplex;
 4218
 4219		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 4220			if ((bmcr & BMCR_ANENABLE) &&
 4221			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
 4222			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
 4223				current_link_up = 1;
 4224		} else {
 4225			if (!(bmcr & BMCR_ANENABLE) &&
 4226			    tp->link_config.speed == current_speed &&
 4227			    tp->link_config.duplex == current_duplex &&
 4228			    tp->link_config.flowctrl ==
 4229			    tp->link_config.active_flowctrl) {
 4230				current_link_up = 1;
 4231			}
 4232		}
 4233
 4234		if (current_link_up == 1 &&
 4235		    tp->link_config.active_duplex == DUPLEX_FULL) {
 4236			u32 reg, bit;
 4237
 4238			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 4239				reg = MII_TG3_FET_GEN_STAT;
 4240				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
 4241			} else {
 4242				reg = MII_TG3_EXT_STAT;
 4243				bit = MII_TG3_EXT_STAT_MDIX;
 4244			}
 4245
 4246			if (!tg3_readphy(tp, reg, &val) && (val & bit))
 4247				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
 4248
 4249			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
 4250		}
 4251	}
 4252
 4253relink:
 4254	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
 4255		tg3_phy_copper_begin(tp);
 4256
 4257		tg3_readphy(tp, MII_BMSR, &bmsr);
 4258		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
 4259		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 4260			current_link_up = 1;
 4261	}
 4262
 4263	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
 4264	if (current_link_up == 1) {
 4265		if (tp->link_config.active_speed == SPEED_100 ||
 4266		    tp->link_config.active_speed == SPEED_10)
 4267			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 4268		else
 4269			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 4270	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 4271		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 4272	else
 4273		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 4274
 4275	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 4276	if (tp->link_config.active_duplex == DUPLEX_HALF)
 4277		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 4278
 4279	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
 4280		if (current_link_up == 1 &&
 4281		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
 4282			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 4283		else
 4284			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 4285	}
 4286
 4287	/* ??? Without this setting Netgear GA302T PHY does not
 4288	 * ??? send/receive packets...
 4289	 */
 4290	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
 4291	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
 4292		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
 4293		tw32_f(MAC_MI_MODE, tp->mi_mode);
 4294		udelay(80);
 4295	}
 4296
 4297	tw32_f(MAC_MODE, tp->mac_mode);
 4298	udelay(40);
 4299
 4300	tg3_phy_eee_adjust(tp, current_link_up);
 4301
 4302	if (tg3_flag(tp, USE_LINKCHG_REG)) {
 4303		/* Polled via timer. */
 4304		tw32_f(MAC_EVENT, 0);
 4305	} else {
 4306		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 4307	}
 4308	udelay(40);
 4309
 4310	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
 4311	    current_link_up == 1 &&
 4312	    tp->link_config.active_speed == SPEED_1000 &&
 4313	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
 4314		udelay(120);
 4315		tw32_f(MAC_STATUS,
 4316		     (MAC_STATUS_SYNC_CHANGED |
 4317		      MAC_STATUS_CFG_CHANGED));
 4318		udelay(40);
 4319		tg3_write_mem(tp,
 4320			      NIC_SRAM_FIRMWARE_MBOX,
 4321			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
 4322	}
 4323
 4324	/* Prevent send BD corruption. */
 4325	if (tg3_flag(tp, CLKREQ_BUG)) {
 4326		u16 oldlnkctl, newlnkctl;
 4327
 4328		pci_read_config_word(tp->pdev,
 4329				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
 4330				     &oldlnkctl);
 4331		if (tp->link_config.active_speed == SPEED_100 ||
 4332		    tp->link_config.active_speed == SPEED_10)
 4333			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
 4334		else
 4335			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
 4336		if (newlnkctl != oldlnkctl)
 4337			pci_write_config_word(tp->pdev,
 4338					      pci_pcie_cap(tp->pdev) +
 4339					      PCI_EXP_LNKCTL, newlnkctl);
 4340	}
 4341
 4342	if (current_link_up != netif_carrier_ok(tp->dev)) {
 4343		if (current_link_up)
 4344			netif_carrier_on(tp->dev);
 4345		else
 4346			netif_carrier_off(tp->dev);
 4347		tg3_link_report(tp);
 4348	}
 4349
 4350	return 0;
 4351}
 4352
 4353struct tg3_fiber_aneginfo {
 4354	int state;
 4355#define ANEG_STATE_UNKNOWN		0
 4356#define ANEG_STATE_AN_ENABLE		1
 4357#define ANEG_STATE_RESTART_INIT		2
 4358#define ANEG_STATE_RESTART		3
 4359#define ANEG_STATE_DISABLE_LINK_OK	4
 4360#define ANEG_STATE_ABILITY_DETECT_INIT	5
 4361#define ANEG_STATE_ABILITY_DETECT	6
 4362#define ANEG_STATE_ACK_DETECT_INIT	7
 4363#define ANEG_STATE_ACK_DETECT		8
 4364#define ANEG_STATE_COMPLETE_ACK_INIT	9
 4365#define ANEG_STATE_COMPLETE_ACK		10
 4366#define ANEG_STATE_IDLE_DETECT_INIT	11
 4367#define ANEG_STATE_IDLE_DETECT		12
 4368#define ANEG_STATE_LINK_OK		13
 4369#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
 4370#define ANEG_STATE_NEXT_PAGE_WAIT	15
 4371
 4372	u32 flags;
 4373#define MR_AN_ENABLE		0x00000001
 4374#define MR_RESTART_AN		0x00000002
 4375#define MR_AN_COMPLETE		0x00000004
 4376#define MR_PAGE_RX		0x00000008
 4377#define MR_NP_LOADED		0x00000010
 4378#define MR_TOGGLE_TX		0x00000020
 4379#define MR_LP_ADV_FULL_DUPLEX	0x00000040
 4380#define MR_LP_ADV_HALF_DUPLEX	0x00000080
 4381#define MR_LP_ADV_SYM_PAUSE	0x00000100
 4382#define MR_LP_ADV_ASYM_PAUSE	0x00000200
 4383#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
 4384#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
 4385#define MR_LP_ADV_NEXT_PAGE	0x00001000
 4386#define MR_TOGGLE_RX		0x00002000
 4387#define MR_NP_RX		0x00004000
 4388
 4389#define MR_LINK_OK		0x80000000
 4390
 4391	unsigned long link_time, cur_time;
 4392
 4393	u32 ability_match_cfg;
 4394	int ability_match_count;
 4395
 4396	char ability_match, idle_match, ack_match;
 4397
 4398	u32 txconfig, rxconfig;
 4399#define ANEG_CFG_NP		0x00000080
 4400#define ANEG_CFG_ACK		0x00000040
 4401#define ANEG_CFG_RF2		0x00000020
 4402#define ANEG_CFG_RF1		0x00000010
 4403#define ANEG_CFG_PS2		0x00000001
 4404#define ANEG_CFG_PS1		0x00008000
 4405#define ANEG_CFG_HD		0x00004000
 4406#define ANEG_CFG_FD		0x00002000
 4407#define ANEG_CFG_INVAL		0x00001f06
 4408
 4409};
 4410#define ANEG_OK		0
 4411#define ANEG_DONE	1
 4412#define ANEG_TIMER_ENAB	2
 4413#define ANEG_FAILED	-1
 4414
 4415#define ANEG_STATE_SETTLE_TIME	10000
 4416
 4417static int tg3_fiber_aneg_smachine(struct tg3 *tp,
 4418				   struct tg3_fiber_aneginfo *ap)
 4419{
 4420	u16 flowctrl;
 4421	unsigned long delta;
 4422	u32 rx_cfg_reg;
 4423	int ret;
 4424
 4425	if (ap->state == ANEG_STATE_UNKNOWN) {
 4426		ap->rxconfig = 0;
 4427		ap->link_time = 0;
 4428		ap->cur_time = 0;
 4429		ap->ability_match_cfg = 0;
 4430		ap->ability_match_count = 0;
 4431		ap->ability_match = 0;
 4432		ap->idle_match = 0;
 4433		ap->ack_match = 0;
 4434	}
 4435	ap->cur_time++;
 4436
 4437	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
 4438		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
 4439
 4440		if (rx_cfg_reg != ap->ability_match_cfg) {
 4441			ap->ability_match_cfg = rx_cfg_reg;
 4442			ap->ability_match = 0;
 4443			ap->ability_match_count = 0;
 4444		} else {
 4445			if (++ap->ability_match_count > 1) {
 4446				ap->ability_match = 1;
 4447				ap->ability_match_cfg = rx_cfg_reg;
 4448			}
 4449		}
 4450		if (rx_cfg_reg & ANEG_CFG_ACK)
 4451			ap->ack_match = 1;
 4452		else
 4453			ap->ack_match = 0;
 4454
 4455		ap->idle_match = 0;
 4456	} else {
 4457		ap->idle_match = 1;
 4458		ap->ability_match_cfg = 0;
 4459		ap->ability_match_count = 0;
 4460		ap->ability_match = 0;
 4461		ap->ack_match = 0;
 4462
 4463		rx_cfg_reg = 0;
 4464	}
 4465
 4466	ap->rxconfig = rx_cfg_reg;
 4467	ret = ANEG_OK;
 4468
 4469	switch (ap->state) {
 4470	case ANEG_STATE_UNKNOWN:
 4471		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
 4472			ap->state = ANEG_STATE_AN_ENABLE;
 4473
 4474		/* fallthru */
 4475	case ANEG_STATE_AN_ENABLE:
 4476		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
 4477		if (ap->flags & MR_AN_ENABLE) {
 4478			ap->link_time = 0;
 4479			ap->cur_time = 0;
 4480			ap->ability_match_cfg = 0;
 4481			ap->ability_match_count = 0;
 4482			ap->ability_match = 0;
 4483			ap->idle_match = 0;
 4484			ap->ack_match = 0;
 4485
 4486			ap->state = ANEG_STATE_RESTART_INIT;
 4487		} else {
 4488			ap->state = ANEG_STATE_DISABLE_LINK_OK;
 4489		}
 4490		break;
 4491
 4492	case ANEG_STATE_RESTART_INIT:
 4493		ap->link_time = ap->cur_time;
 4494		ap->flags &= ~(MR_NP_LOADED);
 4495		ap->txconfig = 0;
 4496		tw32(MAC_TX_AUTO_NEG, 0);
 4497		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 4498		tw32_f(MAC_MODE, tp->mac_mode);
 4499		udelay(40);
 4500
 4501		ret = ANEG_TIMER_ENAB;
 4502		ap->state = ANEG_STATE_RESTART;
 4503
 4504		/* fallthru */
 4505	case ANEG_STATE_RESTART:
 4506		delta = ap->cur_time - ap->link_time;
 4507		if (delta > ANEG_STATE_SETTLE_TIME)
 4508			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
 4509		else
 4510			ret = ANEG_TIMER_ENAB;
 4511		break;
 4512
 4513	case ANEG_STATE_DISABLE_LINK_OK:
 4514		ret = ANEG_DONE;
 4515		break;
 4516
 4517	case ANEG_STATE_ABILITY_DETECT_INIT:
 4518		ap->flags &= ~(MR_TOGGLE_TX);
 4519		ap->txconfig = ANEG_CFG_FD;
 4520		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 4521		if (flowctrl & ADVERTISE_1000XPAUSE)
 4522			ap->txconfig |= ANEG_CFG_PS1;
 4523		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 4524			ap->txconfig |= ANEG_CFG_PS2;
 4525		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 4526		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 4527		tw32_f(MAC_MODE, tp->mac_mode);
 4528		udelay(40);
 4529
 4530		ap->state = ANEG_STATE_ABILITY_DETECT;
 4531		break;
 4532
 4533	case ANEG_STATE_ABILITY_DETECT:
 4534		if (ap->ability_match != 0 && ap->rxconfig != 0)
 4535			ap->state = ANEG_STATE_ACK_DETECT_INIT;
 4536		break;
 4537
 4538	case ANEG_STATE_ACK_DETECT_INIT:
 4539		ap->txconfig |= ANEG_CFG_ACK;
 4540		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
 4541		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
 4542		tw32_f(MAC_MODE, tp->mac_mode);
 4543		udelay(40);
 4544
 4545		ap->state = ANEG_STATE_ACK_DETECT;
 4546
 4547		/* fallthru */
 4548	case ANEG_STATE_ACK_DETECT:
 4549		if (ap->ack_match != 0) {
 4550			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
 4551			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
 4552				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
 4553			} else {
 4554				ap->state = ANEG_STATE_AN_ENABLE;
 4555			}
 4556		} else if (ap->ability_match != 0 &&
 4557			   ap->rxconfig == 0) {
 4558			ap->state = ANEG_STATE_AN_ENABLE;
 4559		}
 4560		break;
 4561
 4562	case ANEG_STATE_COMPLETE_ACK_INIT:
 4563		if (ap->rxconfig & ANEG_CFG_INVAL) {
 4564			ret = ANEG_FAILED;
 4565			break;
 4566		}
 4567		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
 4568			       MR_LP_ADV_HALF_DUPLEX |
 4569			       MR_LP_ADV_SYM_PAUSE |
 4570			       MR_LP_ADV_ASYM_PAUSE |
 4571			       MR_LP_ADV_REMOTE_FAULT1 |
 4572			       MR_LP_ADV_REMOTE_FAULT2 |
 4573			       MR_LP_ADV_NEXT_PAGE |
 4574			       MR_TOGGLE_RX |
 4575			       MR_NP_RX);
 4576		if (ap->rxconfig & ANEG_CFG_FD)
 4577			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
 4578		if (ap->rxconfig & ANEG_CFG_HD)
 4579			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
 4580		if (ap->rxconfig & ANEG_CFG_PS1)
 4581			ap->flags |= MR_LP_ADV_SYM_PAUSE;
 4582		if (ap->rxconfig & ANEG_CFG_PS2)
 4583			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
 4584		if (ap->rxconfig & ANEG_CFG_RF1)
 4585			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
 4586		if (ap->rxconfig & ANEG_CFG_RF2)
 4587			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
 4588		if (ap->rxconfig & ANEG_CFG_NP)
 4589			ap->flags |= MR_LP_ADV_NEXT_PAGE;
 4590
 4591		ap->link_time = ap->cur_time;
 4592
 4593		ap->flags ^= (MR_TOGGLE_TX);
 4594		if (ap->rxconfig & 0x0008)
 4595			ap->flags |= MR_TOGGLE_RX;
 4596		if (ap->rxconfig & ANEG_CFG_NP)
 4597			ap->flags |= MR_NP_RX;
 4598		ap->flags |= MR_PAGE_RX;
 4599
 4600		ap->state = ANEG_STATE_COMPLETE_ACK;
 4601		ret = ANEG_TIMER_ENAB;
 4602		break;
 4603
 4604	case ANEG_STATE_COMPLETE_ACK:
 4605		if (ap->ability_match != 0 &&
 4606		    ap->rxconfig == 0) {
 4607			ap->state = ANEG_STATE_AN_ENABLE;
 4608			break;
 4609		}
 4610		delta = ap->cur_time - ap->link_time;
 4611		if (delta > ANEG_STATE_SETTLE_TIME) {
 4612			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
 4613				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 4614			} else {
 4615				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
 4616				    !(ap->flags & MR_NP_RX)) {
 4617					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
 4618				} else {
 4619					ret = ANEG_FAILED;
 4620				}
 4621			}
 4622		}
 4623		break;
 4624
 4625	case ANEG_STATE_IDLE_DETECT_INIT:
 4626		ap->link_time = ap->cur_time;
 4627		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 4628		tw32_f(MAC_MODE, tp->mac_mode);
 4629		udelay(40);
 4630
 4631		ap->state = ANEG_STATE_IDLE_DETECT;
 4632		ret = ANEG_TIMER_ENAB;
 4633		break;
 4634
 4635	case ANEG_STATE_IDLE_DETECT:
 4636		if (ap->ability_match != 0 &&
 4637		    ap->rxconfig == 0) {
 4638			ap->state = ANEG_STATE_AN_ENABLE;
 4639			break;
 4640		}
 4641		delta = ap->cur_time - ap->link_time;
 4642		if (delta > ANEG_STATE_SETTLE_TIME) {
 4643			/* XXX another gem from the Broadcom driver :( */
 4644			ap->state = ANEG_STATE_LINK_OK;
 4645		}
 4646		break;
 4647
 4648	case ANEG_STATE_LINK_OK:
 4649		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
 4650		ret = ANEG_DONE;
 4651		break;
 4652
 4653	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
 4654		/* ??? unimplemented */
 4655		break;
 4656
 4657	case ANEG_STATE_NEXT_PAGE_WAIT:
 4658		/* ??? unimplemented */
 4659		break;
 4660
 4661	default:
 4662		ret = ANEG_FAILED;
 4663		break;
 4664	}
 4665
 4666	return ret;
 4667}
 4668
 4669static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
 4670{
 4671	int res = 0;
 4672	struct tg3_fiber_aneginfo aninfo;
 4673	int status = ANEG_FAILED;
 4674	unsigned int tick;
 4675	u32 tmp;
 4676
 4677	tw32_f(MAC_TX_AUTO_NEG, 0);
 4678
 4679	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
 4680	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
 4681	udelay(40);
 4682
 4683	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
 4684	udelay(40);
 4685
 4686	memset(&aninfo, 0, sizeof(aninfo));
 4687	aninfo.flags |= MR_AN_ENABLE;
 4688	aninfo.state = ANEG_STATE_UNKNOWN;
 4689	aninfo.cur_time = 0;
 4690	tick = 0;
 4691	while (++tick < 195000) {
 4692		status = tg3_fiber_aneg_smachine(tp, &aninfo);
 4693		if (status == ANEG_DONE || status == ANEG_FAILED)
 4694			break;
 4695
 4696		udelay(1);
 4697	}
 4698
 4699	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
 4700	tw32_f(MAC_MODE, tp->mac_mode);
 4701	udelay(40);
 4702
 4703	*txflags = aninfo.txconfig;
 4704	*rxflags = aninfo.flags;
 4705
 4706	if (status == ANEG_DONE &&
 4707	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
 4708			     MR_LP_ADV_FULL_DUPLEX)))
 4709		res = 1;
 4710
 4711	return res;
 4712}
 4713
 4714static void tg3_init_bcm8002(struct tg3 *tp)
 4715{
 4716	u32 mac_status = tr32(MAC_STATUS);
 4717	int i;
 4718
 4719	/* Reset when initting first time or we have a link. */
 4720	if (tg3_flag(tp, INIT_COMPLETE) &&
 4721	    !(mac_status & MAC_STATUS_PCS_SYNCED))
 4722		return;
 4723
 4724	/* Set PLL lock range. */
 4725	tg3_writephy(tp, 0x16, 0x8007);
 4726
 4727	/* SW reset */
 4728	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
 4729
 4730	/* Wait for reset to complete. */
 4731	/* XXX schedule_timeout() ... */
 4732	for (i = 0; i < 500; i++)
 4733		udelay(10);
 4734
 4735	/* Config mode; select PMA/Ch 1 regs. */
 4736	tg3_writephy(tp, 0x10, 0x8411);
 4737
 4738	/* Enable auto-lock and comdet, select txclk for tx. */
 4739	tg3_writephy(tp, 0x11, 0x0a10);
 4740
 4741	tg3_writephy(tp, 0x18, 0x00a0);
 4742	tg3_writephy(tp, 0x16, 0x41ff);
 4743
 4744	/* Assert and deassert POR. */
 4745	tg3_writephy(tp, 0x13, 0x0400);
 4746	udelay(40);
 4747	tg3_writephy(tp, 0x13, 0x0000);
 4748
 4749	tg3_writephy(tp, 0x11, 0x0a50);
 4750	udelay(40);
 4751	tg3_writephy(tp, 0x11, 0x0a10);
 4752
 4753	/* Wait for signal to stabilize */
 4754	/* XXX schedule_timeout() ... */
 4755	for (i = 0; i < 15000; i++)
 4756		udelay(10);
 4757
 4758	/* Deselect the channel register so we can read the PHYID
 4759	 * later.
 4760	 */
 4761	tg3_writephy(tp, 0x10, 0x8011);
 4762}
 4763
 4764static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
 4765{
 4766	u16 flowctrl;
 4767	u32 sg_dig_ctrl, sg_dig_status;
 4768	u32 serdes_cfg, expected_sg_dig_ctrl;
 4769	int workaround, port_a;
 4770	int current_link_up;
 4771
 4772	serdes_cfg = 0;
 4773	expected_sg_dig_ctrl = 0;
 4774	workaround = 0;
 4775	port_a = 1;
 4776	current_link_up = 0;
 4777
 4778	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
 4779	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
 4780		workaround = 1;
 4781		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
 4782			port_a = 0;
 4783
 4784		/* preserve bits 0-11,13,14 for signal pre-emphasis */
 4785		/* preserve bits 20-23 for voltage regulator */
 4786		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
 4787	}
 4788
 4789	sg_dig_ctrl = tr32(SG_DIG_CTRL);
 4790
 4791	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
 4792		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
 4793			if (workaround) {
 4794				u32 val = serdes_cfg;
 4795
 4796				if (port_a)
 4797					val |= 0xc010000;
 4798				else
 4799					val |= 0x4010000;
 4800				tw32_f(MAC_SERDES_CFG, val);
 4801			}
 4802
 4803			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 4804		}
 4805		if (mac_status & MAC_STATUS_PCS_SYNCED) {
 4806			tg3_setup_flow_control(tp, 0, 0);
 4807			current_link_up = 1;
 4808		}
 4809		goto out;
 4810	}
 4811
 4812	/* Want auto-negotiation.  */
 4813	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
 4814
 4815	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 4816	if (flowctrl & ADVERTISE_1000XPAUSE)
 4817		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
 4818	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
 4819		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
 4820
 4821	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
 4822		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
 4823		    tp->serdes_counter &&
 4824		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
 4825				    MAC_STATUS_RCVD_CFG)) ==
 4826		     MAC_STATUS_PCS_SYNCED)) {
 4827			tp->serdes_counter--;
 4828			current_link_up = 1;
 4829			goto out;
 4830		}
 4831restart_autoneg:
 4832		if (workaround)
 4833			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
 4834		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
 4835		udelay(5);
 4836		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
 4837
 4838		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 4839		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 4840	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
 4841				 MAC_STATUS_SIGNAL_DET)) {
 4842		sg_dig_status = tr32(SG_DIG_STATUS);
 4843		mac_status = tr32(MAC_STATUS);
 4844
 4845		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
 4846		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
 4847			u32 local_adv = 0, remote_adv = 0;
 4848
 4849			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
 4850				local_adv |= ADVERTISE_1000XPAUSE;
 4851			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
 4852				local_adv |= ADVERTISE_1000XPSE_ASYM;
 4853
 4854			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
 4855				remote_adv |= LPA_1000XPAUSE;
 4856			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
 4857				remote_adv |= LPA_1000XPAUSE_ASYM;
 4858
 4859			tp->link_config.rmt_adv =
 4860					   mii_adv_to_ethtool_adv_x(remote_adv);
 4861
 4862			tg3_setup_flow_control(tp, local_adv, remote_adv);
 4863			current_link_up = 1;
 4864			tp->serdes_counter = 0;
 4865			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 4866		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
 4867			if (tp->serdes_counter)
 4868				tp->serdes_counter--;
 4869			else {
 4870				if (workaround) {
 4871					u32 val = serdes_cfg;
 4872
 4873					if (port_a)
 4874						val |= 0xc010000;
 4875					else
 4876						val |= 0x4010000;
 4877
 4878					tw32_f(MAC_SERDES_CFG, val);
 4879				}
 4880
 4881				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
 4882				udelay(40);
 4883
 4884				/* Link parallel detection - link is up */
 4885				/* only if we have PCS_SYNC and not */
 4886				/* receiving config code words */
 4887				mac_status = tr32(MAC_STATUS);
 4888				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
 4889				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
 4890					tg3_setup_flow_control(tp, 0, 0);
 4891					current_link_up = 1;
 4892					tp->phy_flags |=
 4893						TG3_PHYFLG_PARALLEL_DETECT;
 4894					tp->serdes_counter =
 4895						SERDES_PARALLEL_DET_TIMEOUT;
 4896				} else
 4897					goto restart_autoneg;
 4898			}
 4899		}
 4900	} else {
 4901		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
 4902		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 4903	}
 4904
 4905out:
 4906	return current_link_up;
 4907}
 4908
 4909static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
 4910{
 4911	int current_link_up = 0;
 4912
 4913	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
 4914		goto out;
 4915
 4916	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 4917		u32 txflags, rxflags;
 4918		int i;
 4919
 4920		if (fiber_autoneg(tp, &txflags, &rxflags)) {
 4921			u32 local_adv = 0, remote_adv = 0;
 4922
 4923			if (txflags & ANEG_CFG_PS1)
 4924				local_adv |= ADVERTISE_1000XPAUSE;
 4925			if (txflags & ANEG_CFG_PS2)
 4926				local_adv |= ADVERTISE_1000XPSE_ASYM;
 4927
 4928			if (rxflags & MR_LP_ADV_SYM_PAUSE)
 4929				remote_adv |= LPA_1000XPAUSE;
 4930			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
 4931				remote_adv |= LPA_1000XPAUSE_ASYM;
 4932
 4933			tp->link_config.rmt_adv =
 4934					   mii_adv_to_ethtool_adv_x(remote_adv);
 4935
 4936			tg3_setup_flow_control(tp, local_adv, remote_adv);
 4937
 4938			current_link_up = 1;
 4939		}
 4940		for (i = 0; i < 30; i++) {
 4941			udelay(20);
 4942			tw32_f(MAC_STATUS,
 4943			       (MAC_STATUS_SYNC_CHANGED |
 4944				MAC_STATUS_CFG_CHANGED));
 4945			udelay(40);
 4946			if ((tr32(MAC_STATUS) &
 4947			     (MAC_STATUS_SYNC_CHANGED |
 4948			      MAC_STATUS_CFG_CHANGED)) == 0)
 4949				break;
 4950		}
 4951
 4952		mac_status = tr32(MAC_STATUS);
 4953		if (current_link_up == 0 &&
 4954		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
 4955		    !(mac_status & MAC_STATUS_RCVD_CFG))
 4956			current_link_up = 1;
 4957	} else {
 4958		tg3_setup_flow_control(tp, 0, 0);
 4959
 4960		/* Forcing 1000FD link up. */
 4961		current_link_up = 1;
 4962
 4963		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
 4964		udelay(40);
 4965
 4966		tw32_f(MAC_MODE, tp->mac_mode);
 4967		udelay(40);
 4968	}
 4969
 4970out:
 4971	return current_link_up;
 4972}
 4973
 4974static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
 4975{
 4976	u32 orig_pause_cfg;
 4977	u16 orig_active_speed;
 4978	u8 orig_active_duplex;
 4979	u32 mac_status;
 4980	int current_link_up;
 4981	int i;
 4982
 4983	orig_pause_cfg = tp->link_config.active_flowctrl;
 4984	orig_active_speed = tp->link_config.active_speed;
 4985	orig_active_duplex = tp->link_config.active_duplex;
 4986
 4987	if (!tg3_flag(tp, HW_AUTONEG) &&
 4988	    netif_carrier_ok(tp->dev) &&
 4989	    tg3_flag(tp, INIT_COMPLETE)) {
 4990		mac_status = tr32(MAC_STATUS);
 4991		mac_status &= (MAC_STATUS_PCS_SYNCED |
 4992			       MAC_STATUS_SIGNAL_DET |
 4993			       MAC_STATUS_CFG_CHANGED |
 4994			       MAC_STATUS_RCVD_CFG);
 4995		if (mac_status == (MAC_STATUS_PCS_SYNCED |
 4996				   MAC_STATUS_SIGNAL_DET)) {
 4997			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 4998					    MAC_STATUS_CFG_CHANGED));
 4999			return 0;
 5000		}
 5001	}
 5002
 5003	tw32_f(MAC_TX_AUTO_NEG, 0);
 5004
 5005	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 5006	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
 5007	tw32_f(MAC_MODE, tp->mac_mode);
 5008	udelay(40);
 5009
 5010	if (tp->phy_id == TG3_PHY_ID_BCM8002)
 5011		tg3_init_bcm8002(tp);
 5012
 5013	/* Enable link change event even when serdes polling.  */
 5014	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5015	udelay(40);
 5016
 5017	current_link_up = 0;
 5018	tp->link_config.rmt_adv = 0;
 5019	mac_status = tr32(MAC_STATUS);
 5020
 5021	if (tg3_flag(tp, HW_AUTONEG))
 5022		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
 5023	else
 5024		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
 5025
 5026	tp->napi[0].hw_status->status =
 5027		(SD_STATUS_UPDATED |
 5028		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
 5029
 5030	for (i = 0; i < 100; i++) {
 5031		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
 5032				    MAC_STATUS_CFG_CHANGED));
 5033		udelay(5);
 5034		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
 5035					 MAC_STATUS_CFG_CHANGED |
 5036					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
 5037			break;
 5038	}
 5039
 5040	mac_status = tr32(MAC_STATUS);
 5041	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
 5042		current_link_up = 0;
 5043		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 5044		    tp->serdes_counter == 0) {
 5045			tw32_f(MAC_MODE, (tp->mac_mode |
 5046					  MAC_MODE_SEND_CONFIGS));
 5047			udelay(1);
 5048			tw32_f(MAC_MODE, tp->mac_mode);
 5049		}
 5050	}
 5051
 5052	if (current_link_up == 1) {
 5053		tp->link_config.active_speed = SPEED_1000;
 5054		tp->link_config.active_duplex = DUPLEX_FULL;
 5055		tw32(MAC_LED_CTRL, (tp->led_ctrl |
 5056				    LED_CTRL_LNKLED_OVERRIDE |
 5057				    LED_CTRL_1000MBPS_ON));
 5058	} else {
 5059		tp->link_config.active_speed = SPEED_UNKNOWN;
 5060		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
 5061		tw32(MAC_LED_CTRL, (tp->led_ctrl |
 5062				    LED_CTRL_LNKLED_OVERRIDE |
 5063				    LED_CTRL_TRAFFIC_OVERRIDE));
 5064	}
 5065
 5066	if (current_link_up != netif_carrier_ok(tp->dev)) {
 5067		if (current_link_up)
 5068			netif_carrier_on(tp->dev);
 5069		else
 5070			netif_carrier_off(tp->dev);
 5071		tg3_link_report(tp);
 5072	} else {
 5073		u32 now_pause_cfg = tp->link_config.active_flowctrl;
 5074		if (orig_pause_cfg != now_pause_cfg ||
 5075		    orig_active_speed != tp->link_config.active_speed ||
 5076		    orig_active_duplex != tp->link_config.active_duplex)
 5077			tg3_link_report(tp);
 5078	}
 5079
 5080	return 0;
 5081}
 5082
 5083static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
 5084{
 5085	int current_link_up, err = 0;
 5086	u32 bmsr, bmcr;
 5087	u16 current_speed;
 5088	u8 current_duplex;
 5089	u32 local_adv, remote_adv;
 5090
 5091	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 5092	tw32_f(MAC_MODE, tp->mac_mode);
 5093	udelay(40);
 5094
 5095	tw32(MAC_EVENT, 0);
 5096
 5097	tw32_f(MAC_STATUS,
 5098	     (MAC_STATUS_SYNC_CHANGED |
 5099	      MAC_STATUS_CFG_CHANGED |
 5100	      MAC_STATUS_MI_COMPLETION |
 5101	      MAC_STATUS_LNKSTATE_CHANGED));
 5102	udelay(40);
 5103
 5104	if (force_reset)
 5105		tg3_phy_reset(tp);
 5106
 5107	current_link_up = 0;
 5108	current_speed = SPEED_UNKNOWN;
 5109	current_duplex = DUPLEX_UNKNOWN;
 5110	tp->link_config.rmt_adv = 0;
 5111
 5112	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5113	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5114	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 5115		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 5116			bmsr |= BMSR_LSTATUS;
 5117		else
 5118			bmsr &= ~BMSR_LSTATUS;
 5119	}
 5120
 5121	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
 5122
 5123	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
 5124	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 5125		/* do nothing, just check for link up at the end */
 5126	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 5127		u32 adv, newadv;
 5128
 5129		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 5130		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
 5131				 ADVERTISE_1000XPAUSE |
 5132				 ADVERTISE_1000XPSE_ASYM |
 5133				 ADVERTISE_SLCT);
 5134
 5135		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
 5136		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
 5137
 5138		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
 5139			tg3_writephy(tp, MII_ADVERTISE, newadv);
 5140			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
 5141			tg3_writephy(tp, MII_BMCR, bmcr);
 5142
 5143			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5144			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
 5145			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5146
 5147			return err;
 5148		}
 5149	} else {
 5150		u32 new_bmcr;
 5151
 5152		bmcr &= ~BMCR_SPEED1000;
 5153		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
 5154
 5155		if (tp->link_config.duplex == DUPLEX_FULL)
 5156			new_bmcr |= BMCR_FULLDPLX;
 5157
 5158		if (new_bmcr != bmcr) {
 5159			/* BMCR_SPEED1000 is a reserved bit that needs
 5160			 * to be set on write.
 5161			 */
 5162			new_bmcr |= BMCR_SPEED1000;
 5163
 5164			/* Force a linkdown */
 5165			if (netif_carrier_ok(tp->dev)) {
 5166				u32 adv;
 5167
 5168				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
 5169				adv &= ~(ADVERTISE_1000XFULL |
 5170					 ADVERTISE_1000XHALF |
 5171					 ADVERTISE_SLCT);
 5172				tg3_writephy(tp, MII_ADVERTISE, adv);
 5173				tg3_writephy(tp, MII_BMCR, bmcr |
 5174							   BMCR_ANRESTART |
 5175							   BMCR_ANENABLE);
 5176				udelay(10);
 5177				netif_carrier_off(tp->dev);
 5178			}
 5179			tg3_writephy(tp, MII_BMCR, new_bmcr);
 5180			bmcr = new_bmcr;
 5181			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5182			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
 5183			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
 5184			    ASIC_REV_5714) {
 5185				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
 5186					bmsr |= BMSR_LSTATUS;
 5187				else
 5188					bmsr &= ~BMSR_LSTATUS;
 5189			}
 5190			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5191		}
 5192	}
 5193
 5194	if (bmsr & BMSR_LSTATUS) {
 5195		current_speed = SPEED_1000;
 5196		current_link_up = 1;
 5197		if (bmcr & BMCR_FULLDPLX)
 5198			current_duplex = DUPLEX_FULL;
 5199		else
 5200			current_duplex = DUPLEX_HALF;
 5201
 5202		local_adv = 0;
 5203		remote_adv = 0;
 5204
 5205		if (bmcr & BMCR_ANENABLE) {
 5206			u32 common;
 5207
 5208			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
 5209			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
 5210			common = local_adv & remote_adv;
 5211			if (common & (ADVERTISE_1000XHALF |
 5212				      ADVERTISE_1000XFULL)) {
 5213				if (common & ADVERTISE_1000XFULL)
 5214					current_duplex = DUPLEX_FULL;
 5215				else
 5216					current_duplex = DUPLEX_HALF;
 5217
 5218				tp->link_config.rmt_adv =
 5219					   mii_adv_to_ethtool_adv_x(remote_adv);
 5220			} else if (!tg3_flag(tp, 5780_CLASS)) {
 5221				/* Link is up via parallel detect */
 5222			} else {
 5223				current_link_up = 0;
 5224			}
 5225		}
 5226	}
 5227
 5228	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
 5229		tg3_setup_flow_control(tp, local_adv, remote_adv);
 5230
 5231	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
 5232	if (tp->link_config.active_duplex == DUPLEX_HALF)
 5233		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
 5234
 5235	tw32_f(MAC_MODE, tp->mac_mode);
 5236	udelay(40);
 5237
 5238	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
 5239
 5240	tp->link_config.active_speed = current_speed;
 5241	tp->link_config.active_duplex = current_duplex;
 5242
 5243	if (current_link_up != netif_carrier_ok(tp->dev)) {
 5244		if (current_link_up)
 5245			netif_carrier_on(tp->dev);
 5246		else {
 5247			netif_carrier_off(tp->dev);
 5248			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5249		}
 5250		tg3_link_report(tp);
 5251	}
 5252	return err;
 5253}
 5254
 5255static void tg3_serdes_parallel_detect(struct tg3 *tp)
 5256{
 5257	if (tp->serdes_counter) {
 5258		/* Give autoneg time to complete. */
 5259		tp->serdes_counter--;
 5260		return;
 5261	}
 5262
 5263	if (!netif_carrier_ok(tp->dev) &&
 5264	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
 5265		u32 bmcr;
 5266
 5267		tg3_readphy(tp, MII_BMCR, &bmcr);
 5268		if (bmcr & BMCR_ANENABLE) {
 5269			u32 phy1, phy2;
 5270
 5271			/* Select shadow register 0x1f */
 5272			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
 5273			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
 5274
 5275			/* Select expansion interrupt status register */
 5276			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 5277					 MII_TG3_DSP_EXP1_INT_STAT);
 5278			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 5279			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 5280
 5281			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
 5282				/* We have signal detect and not receiving
 5283				 * config code words, link is up by parallel
 5284				 * detection.
 5285				 */
 5286
 5287				bmcr &= ~BMCR_ANENABLE;
 5288				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
 5289				tg3_writephy(tp, MII_BMCR, bmcr);
 5290				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
 5291			}
 5292		}
 5293	} else if (netif_carrier_ok(tp->dev) &&
 5294		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
 5295		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
 5296		u32 phy2;
 5297
 5298		/* Select expansion interrupt status register */
 5299		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
 5300				 MII_TG3_DSP_EXP1_INT_STAT);
 5301		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
 5302		if (phy2 & 0x20) {
 5303			u32 bmcr;
 5304
 5305			/* Config code words received, turn on autoneg. */
 5306			tg3_readphy(tp, MII_BMCR, &bmcr);
 5307			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
 5308
 5309			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 5310
 5311		}
 5312	}
 5313}
 5314
 5315static int tg3_setup_phy(struct tg3 *tp, int force_reset)
 5316{
 5317	u32 val;
 5318	int err;
 5319
 5320	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 5321		err = tg3_setup_fiber_phy(tp, force_reset);
 5322	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
 5323		err = tg3_setup_fiber_mii_phy(tp, force_reset);
 5324	else
 5325		err = tg3_setup_copper_phy(tp, force_reset);
 5326
 5327	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
 5328		u32 scale;
 5329
 5330		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
 5331		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
 5332			scale = 65;
 5333		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
 5334			scale = 6;
 5335		else
 5336			scale = 12;
 5337
 5338		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
 5339		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
 5340		tw32(GRC_MISC_CFG, val);
 5341	}
 5342
 5343	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 5344	      (6 << TX_LENGTHS_IPG_SHIFT);
 5345	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 5346		val |= tr32(MAC_TX_LENGTHS) &
 5347		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
 5348			TX_LENGTHS_CNT_DWN_VAL_MSK);
 5349
 5350	if (tp->link_config.active_speed == SPEED_1000 &&
 5351	    tp->link_config.active_duplex == DUPLEX_HALF)
 5352		tw32(MAC_TX_LENGTHS, val |
 5353		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
 5354	else
 5355		tw32(MAC_TX_LENGTHS, val |
 5356		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
 5357
 5358	if (!tg3_flag(tp, 5705_PLUS)) {
 5359		if (netif_carrier_ok(tp->dev)) {
 5360			tw32(HOSTCC_STAT_COAL_TICKS,
 5361			     tp->coal.stats_block_coalesce_usecs);
 5362		} else {
 5363			tw32(HOSTCC_STAT_COAL_TICKS, 0);
 5364		}
 5365	}
 5366
 5367	if (tg3_flag(tp, ASPM_WORKAROUND)) {
 5368		val = tr32(PCIE_PWR_MGMT_THRESH);
 5369		if (!netif_carrier_ok(tp->dev))
 5370			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
 5371			      tp->pwrmgmt_thresh;
 5372		else
 5373			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
 5374		tw32(PCIE_PWR_MGMT_THRESH, val);
 5375	}
 5376
 5377	return err;
 5378}
 5379
 5380static inline int tg3_irq_sync(struct tg3 *tp)
 5381{
 5382	return tp->irq_sync;
 5383}
 5384
 5385static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
 5386{
 5387	int i;
 5388
 5389	dst = (u32 *)((u8 *)dst + off);
 5390	for (i = 0; i < len; i += sizeof(u32))
 5391		*dst++ = tr32(off + i);
 5392}
 5393
 5394static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
 5395{
 5396	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
 5397	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
 5398	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
 5399	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
 5400	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
 5401	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
 5402	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
 5403	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
 5404	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
 5405	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
 5406	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
 5407	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
 5408	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
 5409	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
 5410	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
 5411	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
 5412	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
 5413	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
 5414	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
 5415
 5416	if (tg3_flag(tp, SUPPORT_MSIX))
 5417		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
 5418
 5419	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
 5420	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
 5421	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
 5422	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
 5423	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
 5424	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
 5425	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
 5426	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
 5427
 5428	if (!tg3_flag(tp, 5705_PLUS)) {
 5429		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
 5430		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
 5431		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
 5432	}
 5433
 5434	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
 5435	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
 5436	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
 5437	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
 5438	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
 5439
 5440	if (tg3_flag(tp, NVRAM))
 5441		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
 5442}
 5443
 5444static void tg3_dump_state(struct tg3 *tp)
 5445{
 5446	int i;
 5447	u32 *regs;
 5448
 5449	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
 5450	if (!regs) {
 5451		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
 5452		return;
 5453	}
 5454
 5455	if (tg3_flag(tp, PCI_EXPRESS)) {
 5456		/* Read up to but not including private PCI registers */
 5457		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
 5458			regs[i / sizeof(u32)] = tr32(i);
 5459	} else
 5460		tg3_dump_legacy_regs(tp, regs);
 5461
 5462	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
 5463		if (!regs[i + 0] && !regs[i + 1] &&
 5464		    !regs[i + 2] && !regs[i + 3])
 5465			continue;
 5466
 5467		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
 5468			   i * 4,
 5469			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
 5470	}
 5471
 5472	kfree(regs);
 5473
 5474	for (i = 0; i < tp->irq_cnt; i++) {
 5475		struct tg3_napi *tnapi = &tp->napi[i];
 5476
 5477		/* SW status block */
 5478		netdev_err(tp->dev,
 5479			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
 5480			   i,
 5481			   tnapi->hw_status->status,
 5482			   tnapi->hw_status->status_tag,
 5483			   tnapi->hw_status->rx_jumbo_consumer,
 5484			   tnapi->hw_status->rx_consumer,
 5485			   tnapi->hw_status->rx_mini_consumer,
 5486			   tnapi->hw_status->idx[0].rx_producer,
 5487			   tnapi->hw_status->idx[0].tx_consumer);
 5488
 5489		netdev_err(tp->dev,
 5490		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
 5491			   i,
 5492			   tnapi->last_tag, tnapi->last_irq_tag,
 5493			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
 5494			   tnapi->rx_rcb_ptr,
 5495			   tnapi->prodring.rx_std_prod_idx,
 5496			   tnapi->prodring.rx_std_cons_idx,
 5497			   tnapi->prodring.rx_jmb_prod_idx,
 5498			   tnapi->prodring.rx_jmb_cons_idx);
 5499	}
 5500}
 5501
 5502/* This is called whenever we suspect that the system chipset is re-
 5503 * ordering the sequence of MMIO to the tx send mailbox. The symptom
 5504 * is bogus tx completions. We try to recover by setting the
 5505 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
 5506 * in the workqueue.
 5507 */
 5508static void tg3_tx_recover(struct tg3 *tp)
 5509{
 5510	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
 5511	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
 5512
 5513	netdev_warn(tp->dev,
 5514		    "The system may be re-ordering memory-mapped I/O "
 5515		    "cycles to the network device, attempting to recover. "
 5516		    "Please report the problem to the driver maintainer "
 5517		    "and include system chipset information.\n");
 5518
 5519	spin_lock(&tp->lock);
 5520	tg3_flag_set(tp, TX_RECOVERY_PENDING);
 5521	spin_unlock(&tp->lock);
 5522}
 5523
 5524static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
 5525{
 5526	/* Tell compiler to fetch tx indices from memory. */
 5527	barrier();
 5528	return tnapi->tx_pending -
 5529	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
 5530}
 5531
 5532/* Tigon3 never reports partial packet sends.  So we do not
 5533 * need special logic to handle SKBs that have not had all
 5534 * of their frags sent yet, like SunGEM does.
 5535 */
 5536static void tg3_tx(struct tg3_napi *tnapi)
 5537{
 5538	struct tg3 *tp = tnapi->tp;
 5539	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
 5540	u32 sw_idx = tnapi->tx_cons;
 5541	struct netdev_queue *txq;
 5542	int index = tnapi - tp->napi;
 5543	unsigned int pkts_compl = 0, bytes_compl = 0;
 5544
 5545	if (tg3_flag(tp, ENABLE_TSS))
 5546		index--;
 5547
 5548	txq = netdev_get_tx_queue(tp->dev, index);
 5549
 5550	while (sw_idx != hw_idx) {
 5551		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
 5552		struct sk_buff *skb = ri->skb;
 5553		int i, tx_bug = 0;
 5554
 5555		if (unlikely(skb == NULL)) {
 5556			tg3_tx_recover(tp);
 5557			return;
 5558		}
 5559
 5560		pci_unmap_single(tp->pdev,
 5561				 dma_unmap_addr(ri, mapping),
 5562				 skb_headlen(skb),
 5563				 PCI_DMA_TODEVICE);
 5564
 5565		ri->skb = NULL;
 5566
 5567		while (ri->fragmented) {
 5568			ri->fragmented = false;
 5569			sw_idx = NEXT_TX(sw_idx);
 5570			ri = &tnapi->tx_buffers[sw_idx];
 5571		}
 5572
 5573		sw_idx = NEXT_TX(sw_idx);
 5574
 5575		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 5576			ri = &tnapi->tx_buffers[sw_idx];
 5577			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
 5578				tx_bug = 1;
 5579
 5580			pci_unmap_page(tp->pdev,
 5581				       dma_unmap_addr(ri, mapping),
 5582				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
 5583				       PCI_DMA_TODEVICE);
 5584
 5585			while (ri->fragmented) {
 5586				ri->fragmented = false;
 5587				sw_idx = NEXT_TX(sw_idx);
 5588				ri = &tnapi->tx_buffers[sw_idx];
 5589			}
 5590
 5591			sw_idx = NEXT_TX(sw_idx);
 5592		}
 5593
 5594		pkts_compl++;
 5595		bytes_compl += skb->len;
 5596
 5597		dev_kfree_skb(skb);
 5598
 5599		if (unlikely(tx_bug)) {
 5600			tg3_tx_recover(tp);
 5601			return;
 5602		}
 5603	}
 5604
 5605	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 5606
 5607	tnapi->tx_cons = sw_idx;
 5608
 5609	/* Need to make the tx_cons update visible to tg3_start_xmit()
 5610	 * before checking for netif_queue_stopped().  Without the
 5611	 * memory barrier, there is a small possibility that tg3_start_xmit()
 5612	 * will miss it and cause the queue to be stopped forever.
 5613	 */
 5614	smp_mb();
 5615
 5616	if (unlikely(netif_tx_queue_stopped(txq) &&
 5617		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
 5618		__netif_tx_lock(txq, smp_processor_id());
 5619		if (netif_tx_queue_stopped(txq) &&
 5620		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
 5621			netif_tx_wake_queue(txq);
 5622		__netif_tx_unlock(txq);
 5623	}
 5624}
 5625
 5626static void tg3_frag_free(bool is_frag, void *data)
 5627{
 5628	if (is_frag)
 5629		put_page(virt_to_head_page(data));
 5630	else
 5631		kfree(data);
 5632}
 5633
 5634static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
 5635{
 5636	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
 5637		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 5638
 5639	if (!ri->data)
 5640		return;
 5641
 5642	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
 5643			 map_sz, PCI_DMA_FROMDEVICE);
 5644	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
 5645	ri->data = NULL;
 5646}
 5647
 5648
 5649/* Returns size of skb allocated or < 0 on error.
 5650 *
 5651 * We only need to fill in the address because the other members
 5652 * of the RX descriptor are invariant, see tg3_init_rings.
 5653 *
 5654 * Note the purposeful assymetry of cpu vs. chip accesses.  For
 5655 * posting buffers we only dirty the first cache line of the RX
 5656 * descriptor (containing the address).  Whereas for the RX status
 5657 * buffers the cpu only reads the last cacheline of the RX descriptor
 5658 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
 5659 */
 5660static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
 5661			     u32 opaque_key, u32 dest_idx_unmasked,
 5662			     unsigned int *frag_size)
 5663{
 5664	struct tg3_rx_buffer_desc *desc;
 5665	struct ring_info *map;
 5666	u8 *data;
 5667	dma_addr_t mapping;
 5668	int skb_size, data_size, dest_idx;
 5669
 5670	switch (opaque_key) {
 5671	case RXD_OPAQUE_RING_STD:
 5672		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 5673		desc = &tpr->rx_std[dest_idx];
 5674		map = &tpr->rx_std_buffers[dest_idx];
 5675		data_size = tp->rx_pkt_map_sz;
 5676		break;
 5677
 5678	case RXD_OPAQUE_RING_JUMBO:
 5679		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 5680		desc = &tpr->rx_jmb[dest_idx].std;
 5681		map = &tpr->rx_jmb_buffers[dest_idx];
 5682		data_size = TG3_RX_JMB_MAP_SZ;
 5683		break;
 5684
 5685	default:
 5686		return -EINVAL;
 5687	}
 5688
 5689	/* Do not overwrite any of the map or rp information
 5690	 * until we are sure we can commit to a new buffer.
 5691	 *
 5692	 * Callers depend upon this behavior and assume that
 5693	 * we leave everything unchanged if we fail.
 5694	 */
 5695	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
 5696		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 5697	if (skb_size <= PAGE_SIZE) {
 5698		data = netdev_alloc_frag(skb_size);
 5699		*frag_size = skb_size;
 5700	} else {
 5701		data = kmalloc(skb_size, GFP_ATOMIC);
 5702		*frag_size = 0;
 5703	}
 5704	if (!data)
 5705		return -ENOMEM;
 5706
 5707	mapping = pci_map_single(tp->pdev,
 5708				 data + TG3_RX_OFFSET(tp),
 5709				 data_size,
 5710				 PCI_DMA_FROMDEVICE);
 5711	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
 5712		tg3_frag_free(skb_size <= PAGE_SIZE, data);
 5713		return -EIO;
 5714	}
 5715
 5716	map->data = data;
 5717	dma_unmap_addr_set(map, mapping, mapping);
 5718
 5719	desc->addr_hi = ((u64)mapping >> 32);
 5720	desc->addr_lo = ((u64)mapping & 0xffffffff);
 5721
 5722	return data_size;
 5723}
 5724
 5725/* We only need to move over in the address because the other
 5726 * members of the RX descriptor are invariant.  See notes above
 5727 * tg3_alloc_rx_data for full details.
 5728 */
 5729static void tg3_recycle_rx(struct tg3_napi *tnapi,
 5730			   struct tg3_rx_prodring_set *dpr,
 5731			   u32 opaque_key, int src_idx,
 5732			   u32 dest_idx_unmasked)
 5733{
 5734	struct tg3 *tp = tnapi->tp;
 5735	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
 5736	struct ring_info *src_map, *dest_map;
 5737	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
 5738	int dest_idx;
 5739
 5740	switch (opaque_key) {
 5741	case RXD_OPAQUE_RING_STD:
 5742		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
 5743		dest_desc = &dpr->rx_std[dest_idx];
 5744		dest_map = &dpr->rx_std_buffers[dest_idx];
 5745		src_desc = &spr->rx_std[src_idx];
 5746		src_map = &spr->rx_std_buffers[src_idx];
 5747		break;
 5748
 5749	case RXD_OPAQUE_RING_JUMBO:
 5750		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
 5751		dest_desc = &dpr->rx_jmb[dest_idx].std;
 5752		dest_map = &dpr->rx_jmb_buffers[dest_idx];
 5753		src_desc = &spr->rx_jmb[src_idx].std;
 5754		src_map = &spr->rx_jmb_buffers[src_idx];
 5755		break;
 5756
 5757	default:
 5758		return;
 5759	}
 5760
 5761	dest_map->data = src_map->data;
 5762	dma_unmap_addr_set(dest_map, mapping,
 5763			   dma_unmap_addr(src_map, mapping));
 5764	dest_desc->addr_hi = src_desc->addr_hi;
 5765	dest_desc->addr_lo = src_desc->addr_lo;
 5766
 5767	/* Ensure that the update to the skb happens after the physical
 5768	 * addresses have been transferred to the new BD location.
 5769	 */
 5770	smp_wmb();
 5771
 5772	src_map->data = NULL;
 5773}
 5774
 5775/* The RX ring scheme is composed of multiple rings which post fresh
 5776 * buffers to the chip, and one special ring the chip uses to report
 5777 * status back to the host.
 5778 *
 5779 * The special ring reports the status of received packets to the
 5780 * host.  The chip does not write into the original descriptor the
 5781 * RX buffer was obtained from.  The chip simply takes the original
 5782 * descriptor as provided by the host, updates the status and length
 5783 * field, then writes this into the next status ring entry.
 5784 *
 5785 * Each ring the host uses to post buffers to the chip is described
 5786 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
 5787 * it is first placed into the on-chip ram.  When the packet's length
 5788 * is known, it walks down the TG3_BDINFO entries to select the ring.
 5789 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
 5790 * which is within the range of the new packet's length is chosen.
 5791 *
 5792 * The "separate ring for rx status" scheme may sound queer, but it makes
 5793 * sense from a cache coherency perspective.  If only the host writes
 5794 * to the buffer post rings, and only the chip writes to the rx status
 5795 * rings, then cache lines never move beyond shared-modified state.
 5796 * If both the host and chip were to write into the same ring, cache line
 5797 * eviction could occur since both entities want it in an exclusive state.
 5798 */
 5799static int tg3_rx(struct tg3_napi *tnapi, int budget)
 5800{
 5801	struct tg3 *tp = tnapi->tp;
 5802	u32 work_mask, rx_std_posted = 0;
 5803	u32 std_prod_idx, jmb_prod_idx;
 5804	u32 sw_idx = tnapi->rx_rcb_ptr;
 5805	u16 hw_idx;
 5806	int received;
 5807	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
 5808
 5809	hw_idx = *(tnapi->rx_rcb_prod_idx);
 5810	/*
 5811	 * We need to order the read of hw_idx and the read of
 5812	 * the opaque cookie.
 5813	 */
 5814	rmb();
 5815	work_mask = 0;
 5816	received = 0;
 5817	std_prod_idx = tpr->rx_std_prod_idx;
 5818	jmb_prod_idx = tpr->rx_jmb_prod_idx;
 5819	while (sw_idx != hw_idx && budget > 0) {
 5820		struct ring_info *ri;
 5821		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
 5822		unsigned int len;
 5823		struct sk_buff *skb;
 5824		dma_addr_t dma_addr;
 5825		u32 opaque_key, desc_idx, *post_ptr;
 5826		u8 *data;
 5827
 5828		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 5829		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
 5830		if (opaque_key == RXD_OPAQUE_RING_STD) {
 5831			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
 5832			dma_addr = dma_unmap_addr(ri, mapping);
 5833			data = ri->data;
 5834			post_ptr = &std_prod_idx;
 5835			rx_std_posted++;
 5836		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
 5837			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
 5838			dma_addr = dma_unmap_addr(ri, mapping);
 5839			data = ri->data;
 5840			post_ptr = &jmb_prod_idx;
 5841		} else
 5842			goto next_pkt_nopost;
 5843
 5844		work_mask |= opaque_key;
 5845
 5846		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
 5847		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
 5848		drop_it:
 5849			tg3_recycle_rx(tnapi, tpr, opaque_key,
 5850				       desc_idx, *post_ptr);
 5851		drop_it_no_recycle:
 5852			/* Other statistics kept track of by card. */
 5853			tp->rx_dropped++;
 5854			goto next_pkt;
 5855		}
 5856
 5857		prefetch(data + TG3_RX_OFFSET(tp));
 5858		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
 5859		      ETH_FCS_LEN;
 5860
 5861		if (len > TG3_RX_COPY_THRESH(tp)) {
 5862			int skb_size;
 5863			unsigned int frag_size;
 5864
 5865			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
 5866						    *post_ptr, &frag_size);
 5867			if (skb_size < 0)
 5868				goto drop_it;
 5869
 5870			pci_unmap_single(tp->pdev, dma_addr, skb_size,
 5871					 PCI_DMA_FROMDEVICE);
 5872
 5873			skb = build_skb(data, frag_size);
 5874			if (!skb) {
 5875				tg3_frag_free(frag_size != 0, data);
 5876				goto drop_it_no_recycle;
 5877			}
 5878			skb_reserve(skb, TG3_RX_OFFSET(tp));
 5879			/* Ensure that the update to the data happens
 5880			 * after the usage of the old DMA mapping.
 5881			 */
 5882			smp_wmb();
 5883
 5884			ri->data = NULL;
 5885
 5886		} else {
 5887			tg3_recycle_rx(tnapi, tpr, opaque_key,
 5888				       desc_idx, *post_ptr);
 5889
 5890			skb = netdev_alloc_skb(tp->dev,
 5891					       len + TG3_RAW_IP_ALIGN);
 5892			if (skb == NULL)
 5893				goto drop_it_no_recycle;
 5894
 5895			skb_reserve(skb, TG3_RAW_IP_ALIGN);
 5896			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 5897			memcpy(skb->data,
 5898			       data + TG3_RX_OFFSET(tp),
 5899			       len);
 5900			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 5901		}
 5902
 5903		skb_put(skb, len);
 5904		if ((tp->dev->features & NETIF_F_RXCSUM) &&
 5905		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
 5906		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
 5907		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
 5908			skb->ip_summed = CHECKSUM_UNNECESSARY;
 5909		else
 5910			skb_checksum_none_assert(skb);
 5911
 5912		skb->protocol = eth_type_trans(skb, tp->dev);
 5913
 5914		if (len > (tp->dev->mtu + ETH_HLEN) &&
 5915		    skb->protocol != htons(ETH_P_8021Q)) {
 5916			dev_kfree_skb(skb);
 5917			goto drop_it_no_recycle;
 5918		}
 5919
 5920		if (desc->type_flags & RXD_FLAG_VLAN &&
 5921		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
 5922			__vlan_hwaccel_put_tag(skb,
 5923					       desc->err_vlan & RXD_VLAN_MASK);
 5924
 5925		napi_gro_receive(&tnapi->napi, skb);
 5926
 5927		received++;
 5928		budget--;
 5929
 5930next_pkt:
 5931		(*post_ptr)++;
 5932
 5933		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
 5934			tpr->rx_std_prod_idx = std_prod_idx &
 5935					       tp->rx_std_ring_mask;
 5936			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 5937				     tpr->rx_std_prod_idx);
 5938			work_mask &= ~RXD_OPAQUE_RING_STD;
 5939			rx_std_posted = 0;
 5940		}
 5941next_pkt_nopost:
 5942		sw_idx++;
 5943		sw_idx &= tp->rx_ret_ring_mask;
 5944
 5945		/* Refresh hw_idx to see if there is new work */
 5946		if (sw_idx == hw_idx) {
 5947			hw_idx = *(tnapi->rx_rcb_prod_idx);
 5948			rmb();
 5949		}
 5950	}
 5951
 5952	/* ACK the status ring. */
 5953	tnapi->rx_rcb_ptr = sw_idx;
 5954	tw32_rx_mbox(tnapi->consmbox, sw_idx);
 5955
 5956	/* Refill RX ring(s). */
 5957	if (!tg3_flag(tp, ENABLE_RSS)) {
 5958		/* Sync BD data before updating mailbox */
 5959		wmb();
 5960
 5961		if (work_mask & RXD_OPAQUE_RING_STD) {
 5962			tpr->rx_std_prod_idx = std_prod_idx &
 5963					       tp->rx_std_ring_mask;
 5964			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 5965				     tpr->rx_std_prod_idx);
 5966		}
 5967		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
 5968			tpr->rx_jmb_prod_idx = jmb_prod_idx &
 5969					       tp->rx_jmb_ring_mask;
 5970			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
 5971				     tpr->rx_jmb_prod_idx);
 5972		}
 5973		mmiowb();
 5974	} else if (work_mask) {
 5975		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
 5976		 * updated before the producer indices can be updated.
 5977		 */
 5978		smp_wmb();
 5979
 5980		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
 5981		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
 5982
 5983		if (tnapi != &tp->napi[1]) {
 5984			tp->rx_refill = true;
 5985			napi_schedule(&tp->napi[1].napi);
 5986		}
 5987	}
 5988
 5989	return received;
 5990}
 5991
 5992static void tg3_poll_link(struct tg3 *tp)
 5993{
 5994	/* handle link change and other phy events */
 5995	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
 5996		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
 5997
 5998		if (sblk->status & SD_STATUS_LINK_CHG) {
 5999			sblk->status = SD_STATUS_UPDATED |
 6000				       (sblk->status & ~SD_STATUS_LINK_CHG);
 6001			spin_lock(&tp->lock);
 6002			if (tg3_flag(tp, USE_PHYLIB)) {
 6003				tw32_f(MAC_STATUS,
 6004				     (MAC_STATUS_SYNC_CHANGED |
 6005				      MAC_STATUS_CFG_CHANGED |
 6006				      MAC_STATUS_MI_COMPLETION |
 6007				      MAC_STATUS_LNKSTATE_CHANGED));
 6008				udelay(40);
 6009			} else
 6010				tg3_setup_phy(tp, 0);
 6011			spin_unlock(&tp->lock);
 6012		}
 6013	}
 6014}
 6015
 6016static int tg3_rx_prodring_xfer(struct tg3 *tp,
 6017				struct tg3_rx_prodring_set *dpr,
 6018				struct tg3_rx_prodring_set *spr)
 6019{
 6020	u32 si, di, cpycnt, src_prod_idx;
 6021	int i, err = 0;
 6022
 6023	while (1) {
 6024		src_prod_idx = spr->rx_std_prod_idx;
 6025
 6026		/* Make sure updates to the rx_std_buffers[] entries and the
 6027		 * standard producer index are seen in the correct order.
 6028		 */
 6029		smp_rmb();
 6030
 6031		if (spr->rx_std_cons_idx == src_prod_idx)
 6032			break;
 6033
 6034		if (spr->rx_std_cons_idx < src_prod_idx)
 6035			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
 6036		else
 6037			cpycnt = tp->rx_std_ring_mask + 1 -
 6038				 spr->rx_std_cons_idx;
 6039
 6040		cpycnt = min(cpycnt,
 6041			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
 6042
 6043		si = spr->rx_std_cons_idx;
 6044		di = dpr->rx_std_prod_idx;
 6045
 6046		for (i = di; i < di + cpycnt; i++) {
 6047			if (dpr->rx_std_buffers[i].data) {
 6048				cpycnt = i - di;
 6049				err = -ENOSPC;
 6050				break;
 6051			}
 6052		}
 6053
 6054		if (!cpycnt)
 6055			break;
 6056
 6057		/* Ensure that updates to the rx_std_buffers ring and the
 6058		 * shadowed hardware producer ring from tg3_recycle_skb() are
 6059		 * ordered correctly WRT the skb check above.
 6060		 */
 6061		smp_rmb();
 6062
 6063		memcpy(&dpr->rx_std_buffers[di],
 6064		       &spr->rx_std_buffers[si],
 6065		       cpycnt * sizeof(struct ring_info));
 6066
 6067		for (i = 0; i < cpycnt; i++, di++, si++) {
 6068			struct tg3_rx_buffer_desc *sbd, *dbd;
 6069			sbd = &spr->rx_std[si];
 6070			dbd = &dpr->rx_std[di];
 6071			dbd->addr_hi = sbd->addr_hi;
 6072			dbd->addr_lo = sbd->addr_lo;
 6073		}
 6074
 6075		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
 6076				       tp->rx_std_ring_mask;
 6077		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
 6078				       tp->rx_std_ring_mask;
 6079	}
 6080
 6081	while (1) {
 6082		src_prod_idx = spr->rx_jmb_prod_idx;
 6083
 6084		/* Make sure updates to the rx_jmb_buffers[] entries and
 6085		 * the jumbo producer index are seen in the correct order.
 6086		 */
 6087		smp_rmb();
 6088
 6089		if (spr->rx_jmb_cons_idx == src_prod_idx)
 6090			break;
 6091
 6092		if (spr->rx_jmb_cons_idx < src_prod_idx)
 6093			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
 6094		else
 6095			cpycnt = tp->rx_jmb_ring_mask + 1 -
 6096				 spr->rx_jmb_cons_idx;
 6097
 6098		cpycnt = min(cpycnt,
 6099			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
 6100
 6101		si = spr->rx_jmb_cons_idx;
 6102		di = dpr->rx_jmb_prod_idx;
 6103
 6104		for (i = di; i < di + cpycnt; i++) {
 6105			if (dpr->rx_jmb_buffers[i].data) {
 6106				cpycnt = i - di;
 6107				err = -ENOSPC;
 6108				break;
 6109			}
 6110		}
 6111
 6112		if (!cpycnt)
 6113			break;
 6114
 6115		/* Ensure that updates to the rx_jmb_buffers ring and the
 6116		 * shadowed hardware producer ring from tg3_recycle_skb() are
 6117		 * ordered correctly WRT the skb check above.
 6118		 */
 6119		smp_rmb();
 6120
 6121		memcpy(&dpr->rx_jmb_buffers[di],
 6122		       &spr->rx_jmb_buffers[si],
 6123		       cpycnt * sizeof(struct ring_info));
 6124
 6125		for (i = 0; i < cpycnt; i++, di++, si++) {
 6126			struct tg3_rx_buffer_desc *sbd, *dbd;
 6127			sbd = &spr->rx_jmb[si].std;
 6128			dbd = &dpr->rx_jmb[di].std;
 6129			dbd->addr_hi = sbd->addr_hi;
 6130			dbd->addr_lo = sbd->addr_lo;
 6131		}
 6132
 6133		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
 6134				       tp->rx_jmb_ring_mask;
 6135		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
 6136				       tp->rx_jmb_ring_mask;
 6137	}
 6138
 6139	return err;
 6140}
 6141
 6142static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
 6143{
 6144	struct tg3 *tp = tnapi->tp;
 6145
 6146	/* run TX completion thread */
 6147	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
 6148		tg3_tx(tnapi);
 6149		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 6150			return work_done;
 6151	}
 6152
 6153	if (!tnapi->rx_rcb_prod_idx)
 6154		return work_done;
 6155
 6156	/* run RX thread, within the bounds set by NAPI.
 6157	 * All RX "locking" is done by ensuring outside
 6158	 * code synchronizes with tg3->napi.poll()
 6159	 */
 6160	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 6161		work_done += tg3_rx(tnapi, budget - work_done);
 6162
 6163	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
 6164		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
 6165		int i, err = 0;
 6166		u32 std_prod_idx = dpr->rx_std_prod_idx;
 6167		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
 6168
 6169		tp->rx_refill = false;
 6170		for (i = 1; i < tp->irq_cnt; i++)
 6171			err |= tg3_rx_prodring_xfer(tp, dpr,
 6172						    &tp->napi[i].prodring);
 6173
 6174		wmb();
 6175
 6176		if (std_prod_idx != dpr->rx_std_prod_idx)
 6177			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
 6178				     dpr->rx_std_prod_idx);
 6179
 6180		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
 6181			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
 6182				     dpr->rx_jmb_prod_idx);
 6183
 6184		mmiowb();
 6185
 6186		if (err)
 6187			tw32_f(HOSTCC_MODE, tp->coal_now);
 6188	}
 6189
 6190	return work_done;
 6191}
 6192
 6193static inline void tg3_reset_task_schedule(struct tg3 *tp)
 6194{
 6195	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
 6196		schedule_work(&tp->reset_task);
 6197}
 6198
 6199static inline void tg3_reset_task_cancel(struct tg3 *tp)
 6200{
 6201	cancel_work_sync(&tp->reset_task);
 6202	tg3_flag_clear(tp, RESET_TASK_PENDING);
 6203	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 6204}
 6205
 6206static int tg3_poll_msix(struct napi_struct *napi, int budget)
 6207{
 6208	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
 6209	struct tg3 *tp = tnapi->tp;
 6210	int work_done = 0;
 6211	struct tg3_hw_status *sblk = tnapi->hw_status;
 6212
 6213	while (1) {
 6214		work_done = tg3_poll_work(tnapi, work_done, budget);
 6215
 6216		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 6217			goto tx_recovery;
 6218
 6219		if (unlikely(work_done >= budget))
 6220			break;
 6221
 6222		/* tp->last_tag is used in tg3_int_reenable() below
 6223		 * to tell the hw how much work has been processed,
 6224		 * so we must read it before checking for more work.
 6225		 */
 6226		tnapi->last_tag = sblk->status_tag;
 6227		tnapi->last_irq_tag = tnapi->last_tag;
 6228		rmb();
 6229
 6230		/* check for RX/TX work to do */
 6231		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
 6232			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
 6233
 6234			/* This test here is not race free, but will reduce
 6235			 * the number of interrupts by looping again.
 6236			 */
 6237			if (tnapi == &tp->napi[1] && tp->rx_refill)
 6238				continue;
 6239
 6240			napi_complete(napi);
 6241			/* Reenable interrupts. */
 6242			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 6243
 6244			/* This test here is synchronized by napi_schedule()
 6245			 * and napi_complete() to close the race condition.
 6246			 */
 6247			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
 6248				tw32(HOSTCC_MODE, tp->coalesce_mode |
 6249						  HOSTCC_MODE_ENABLE |
 6250						  tnapi->coal_now);
 6251			}
 6252			mmiowb();
 6253			break;
 6254		}
 6255	}
 6256
 6257	return work_done;
 6258
 6259tx_recovery:
 6260	/* work_done is guaranteed to be less than budget. */
 6261	napi_complete(napi);
 6262	tg3_reset_task_schedule(tp);
 6263	return work_done;
 6264}
 6265
 6266static void tg3_process_error(struct tg3 *tp)
 6267{
 6268	u32 val;
 6269	bool real_error = false;
 6270
 6271	if (tg3_flag(tp, ERROR_PROCESSED))
 6272		return;
 6273
 6274	/* Check Flow Attention register */
 6275	val = tr32(HOSTCC_FLOW_ATTN);
 6276	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
 6277		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
 6278		real_error = true;
 6279	}
 6280
 6281	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
 6282		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
 6283		real_error = true;
 6284	}
 6285
 6286	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
 6287		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
 6288		real_error = true;
 6289	}
 6290
 6291	if (!real_error)
 6292		return;
 6293
 6294	tg3_dump_state(tp);
 6295
 6296	tg3_flag_set(tp, ERROR_PROCESSED);
 6297	tg3_reset_task_schedule(tp);
 6298}
 6299
 6300static int tg3_poll(struct napi_struct *napi, int budget)
 6301{
 6302	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
 6303	struct tg3 *tp = tnapi->tp;
 6304	int work_done = 0;
 6305	struct tg3_hw_status *sblk = tnapi->hw_status;
 6306
 6307	while (1) {
 6308		if (sblk->status & SD_STATUS_ERROR)
 6309			tg3_process_error(tp);
 6310
 6311		tg3_poll_link(tp);
 6312
 6313		work_done = tg3_poll_work(tnapi, work_done, budget);
 6314
 6315		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
 6316			goto tx_recovery;
 6317
 6318		if (unlikely(work_done >= budget))
 6319			break;
 6320
 6321		if (tg3_flag(tp, TAGGED_STATUS)) {
 6322			/* tp->last_tag is used in tg3_int_reenable() below
 6323			 * to tell the hw how much work has been processed,
 6324			 * so we must read it before checking for more work.
 6325			 */
 6326			tnapi->last_tag = sblk->status_tag;
 6327			tnapi->last_irq_tag = tnapi->last_tag;
 6328			rmb();
 6329		} else
 6330			sblk->status &= ~SD_STATUS_UPDATED;
 6331
 6332		if (likely(!tg3_has_work(tnapi))) {
 6333			napi_complete(napi);
 6334			tg3_int_reenable(tnapi);
 6335			break;
 6336		}
 6337	}
 6338
 6339	return work_done;
 6340
 6341tx_recovery:
 6342	/* work_done is guaranteed to be less than budget. */
 6343	napi_complete(napi);
 6344	tg3_reset_task_schedule(tp);
 6345	return work_done;
 6346}
 6347
 6348static void tg3_napi_disable(struct tg3 *tp)
 6349{
 6350	int i;
 6351
 6352	for (i = tp->irq_cnt - 1; i >= 0; i--)
 6353		napi_disable(&tp->napi[i].napi);
 6354}
 6355
 6356static void tg3_napi_enable(struct tg3 *tp)
 6357{
 6358	int i;
 6359
 6360	for (i = 0; i < tp->irq_cnt; i++)
 6361		napi_enable(&tp->napi[i].napi);
 6362}
 6363
 6364static void tg3_napi_init(struct tg3 *tp)
 6365{
 6366	int i;
 6367
 6368	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
 6369	for (i = 1; i < tp->irq_cnt; i++)
 6370		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
 6371}
 6372
 6373static void tg3_napi_fini(struct tg3 *tp)
 6374{
 6375	int i;
 6376
 6377	for (i = 0; i < tp->irq_cnt; i++)
 6378		netif_napi_del(&tp->napi[i].napi);
 6379}
 6380
 6381static inline void tg3_netif_stop(struct tg3 *tp)
 6382{
 6383	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
 6384	tg3_napi_disable(tp);
 6385	netif_tx_disable(tp->dev);
 6386}
 6387
 6388static inline void tg3_netif_start(struct tg3 *tp)
 6389{
 6390	/* NOTE: unconditional netif_tx_wake_all_queues is only
 6391	 * appropriate so long as all callers are assured to
 6392	 * have free tx slots (such as after tg3_init_hw)
 6393	 */
 6394	netif_tx_wake_all_queues(tp->dev);
 6395
 6396	tg3_napi_enable(tp);
 6397	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
 6398	tg3_enable_ints(tp);
 6399}
 6400
 6401static void tg3_irq_quiesce(struct tg3 *tp)
 6402{
 6403	int i;
 6404
 6405	BUG_ON(tp->irq_sync);
 6406
 6407	tp->irq_sync = 1;
 6408	smp_mb();
 6409
 6410	for (i = 0; i < tp->irq_cnt; i++)
 6411		synchronize_irq(tp->napi[i].irq_vec);
 6412}
 6413
 6414/* Fully shutdown all tg3 driver activity elsewhere in the system.
 6415 * If irq_sync is non-zero, then the IRQ handler must be synchronized
 6416 * with as well.  Most of the time, this is not necessary except when
 6417 * shutting down the device.
 6418 */
 6419static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
 6420{
 6421	spin_lock_bh(&tp->lock);
 6422	if (irq_sync)
 6423		tg3_irq_quiesce(tp);
 6424}
 6425
 6426static inline void tg3_full_unlock(struct tg3 *tp)
 6427{
 6428	spin_unlock_bh(&tp->lock);
 6429}
 6430
 6431/* One-shot MSI handler - Chip automatically disables interrupt
 6432 * after sending MSI so driver doesn't have to do it.
 6433 */
 6434static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
 6435{
 6436	struct tg3_napi *tnapi = dev_id;
 6437	struct tg3 *tp = tnapi->tp;
 6438
 6439	prefetch(tnapi->hw_status);
 6440	if (tnapi->rx_rcb)
 6441		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 6442
 6443	if (likely(!tg3_irq_sync(tp)))
 6444		napi_schedule(&tnapi->napi);
 6445
 6446	return IRQ_HANDLED;
 6447}
 6448
 6449/* MSI ISR - No need to check for interrupt sharing and no need to
 6450 * flush status block and interrupt mailbox. PCI ordering rules
 6451 * guarantee that MSI will arrive after the status block.
 6452 */
 6453static irqreturn_t tg3_msi(int irq, void *dev_id)
 6454{
 6455	struct tg3_napi *tnapi = dev_id;
 6456	struct tg3 *tp = tnapi->tp;
 6457
 6458	prefetch(tnapi->hw_status);
 6459	if (tnapi->rx_rcb)
 6460		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 6461	/*
 6462	 * Writing any value to intr-mbox-0 clears PCI INTA# and
 6463	 * chip-internal interrupt pending events.
 6464	 * Writing non-zero to intr-mbox-0 additional tells the
 6465	 * NIC to stop sending us irqs, engaging "in-intr-handler"
 6466	 * event coalescing.
 6467	 */
 6468	tw32_mailbox(tnapi->int_mbox, 0x00000001);
 6469	if (likely(!tg3_irq_sync(tp)))
 6470		napi_schedule(&tnapi->napi);
 6471
 6472	return IRQ_RETVAL(1);
 6473}
 6474
 6475static irqreturn_t tg3_interrupt(int irq, void *dev_id)
 6476{
 6477	struct tg3_napi *tnapi = dev_id;
 6478	struct tg3 *tp = tnapi->tp;
 6479	struct tg3_hw_status *sblk = tnapi->hw_status;
 6480	unsigned int handled = 1;
 6481
 6482	/* In INTx mode, it is possible for the interrupt to arrive at
 6483	 * the CPU before the status block posted prior to the interrupt.
 6484	 * Reading the PCI State register will confirm whether the
 6485	 * interrupt is ours and will flush the status block.
 6486	 */
 6487	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
 6488		if (tg3_flag(tp, CHIP_RESETTING) ||
 6489		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 6490			handled = 0;
 6491			goto out;
 6492		}
 6493	}
 6494
 6495	/*
 6496	 * Writing any value to intr-mbox-0 clears PCI INTA# and
 6497	 * chip-internal interrupt pending events.
 6498	 * Writing non-zero to intr-mbox-0 additional tells the
 6499	 * NIC to stop sending us irqs, engaging "in-intr-handler"
 6500	 * event coalescing.
 6501	 *
 6502	 * Flush the mailbox to de-assert the IRQ immediately to prevent
 6503	 * spurious interrupts.  The flush impacts performance but
 6504	 * excessive spurious interrupts can be worse in some cases.
 6505	 */
 6506	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 6507	if (tg3_irq_sync(tp))
 6508		goto out;
 6509	sblk->status &= ~SD_STATUS_UPDATED;
 6510	if (likely(tg3_has_work(tnapi))) {
 6511		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 6512		napi_schedule(&tnapi->napi);
 6513	} else {
 6514		/* No work, shared interrupt perhaps?  re-enable
 6515		 * interrupts, and flush that PCI write
 6516		 */
 6517		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
 6518			       0x00000000);
 6519	}
 6520out:
 6521	return IRQ_RETVAL(handled);
 6522}
 6523
 6524static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
 6525{
 6526	struct tg3_napi *tnapi = dev_id;
 6527	struct tg3 *tp = tnapi->tp;
 6528	struct tg3_hw_status *sblk = tnapi->hw_status;
 6529	unsigned int handled = 1;
 6530
 6531	/* In INTx mode, it is possible for the interrupt to arrive at
 6532	 * the CPU before the status block posted prior to the interrupt.
 6533	 * Reading the PCI State register will confirm whether the
 6534	 * interrupt is ours and will flush the status block.
 6535	 */
 6536	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
 6537		if (tg3_flag(tp, CHIP_RESETTING) ||
 6538		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 6539			handled = 0;
 6540			goto out;
 6541		}
 6542	}
 6543
 6544	/*
 6545	 * writing any value to intr-mbox-0 clears PCI INTA# and
 6546	 * chip-internal interrupt pending events.
 6547	 * writing non-zero to intr-mbox-0 additional tells the
 6548	 * NIC to stop sending us irqs, engaging "in-intr-handler"
 6549	 * event coalescing.
 6550	 *
 6551	 * Flush the mailbox to de-assert the IRQ immediately to prevent
 6552	 * spurious interrupts.  The flush impacts performance but
 6553	 * excessive spurious interrupts can be worse in some cases.
 6554	 */
 6555	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
 6556
 6557	/*
 6558	 * In a shared interrupt configuration, sometimes other devices'
 6559	 * interrupts will scream.  We record the current status tag here
 6560	 * so that the above check can report that the screaming interrupts
 6561	 * are unhandled.  Eventually they will be silenced.
 6562	 */
 6563	tnapi->last_irq_tag = sblk->status_tag;
 6564
 6565	if (tg3_irq_sync(tp))
 6566		goto out;
 6567
 6568	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
 6569
 6570	napi_schedule(&tnapi->napi);
 6571
 6572out:
 6573	return IRQ_RETVAL(handled);
 6574}
 6575
 6576/* ISR for interrupt test */
 6577static irqreturn_t tg3_test_isr(int irq, void *dev_id)
 6578{
 6579	struct tg3_napi *tnapi = dev_id;
 6580	struct tg3 *tp = tnapi->tp;
 6581	struct tg3_hw_status *sblk = tnapi->hw_status;
 6582
 6583	if ((sblk->status & SD_STATUS_UPDATED) ||
 6584	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
 6585		tg3_disable_ints(tp);
 6586		return IRQ_RETVAL(1);
 6587	}
 6588	return IRQ_RETVAL(0);
 6589}
 6590
 6591#ifdef CONFIG_NET_POLL_CONTROLLER
 6592static void tg3_poll_controller(struct net_device *dev)
 6593{
 6594	int i;
 6595	struct tg3 *tp = netdev_priv(dev);
 6596
 6597	for (i = 0; i < tp->irq_cnt; i++)
 6598		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
 6599}
 6600#endif
 6601
 6602static void tg3_tx_timeout(struct net_device *dev)
 6603{
 6604	struct tg3 *tp = netdev_priv(dev);
 6605
 6606	if (netif_msg_tx_err(tp)) {
 6607		netdev_err(dev, "transmit timed out, resetting\n");
 6608		tg3_dump_state(tp);
 6609	}
 6610
 6611	tg3_reset_task_schedule(tp);
 6612}
 6613
 6614/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
 6615static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
 6616{
 6617	u32 base = (u32) mapping & 0xffffffff;
 6618
 6619	return (base > 0xffffdcc0) && (base + len + 8 < base);
 6620}
 6621
 6622/* Test for DMA addresses > 40-bit */
 6623static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
 6624					  int len)
 6625{
 6626#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
 6627	if (tg3_flag(tp, 40BIT_DMA_BUG))
 6628		return ((u64) mapping + len) > DMA_BIT_MASK(40);
 6629	return 0;
 6630#else
 6631	return 0;
 6632#endif
 6633}
 6634
 6635static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
 6636				 dma_addr_t mapping, u32 len, u32 flags,
 6637				 u32 mss, u32 vlan)
 6638{
 6639	txbd->addr_hi = ((u64) mapping >> 32);
 6640	txbd->addr_lo = ((u64) mapping & 0xffffffff);
 6641	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
 6642	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
 6643}
 6644
 6645static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
 6646			    dma_addr_t map, u32 len, u32 flags,
 6647			    u32 mss, u32 vlan)
 6648{
 6649	struct tg3 *tp = tnapi->tp;
 6650	bool hwbug = false;
 6651
 6652	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
 6653		hwbug = true;
 6654
 6655	if (tg3_4g_overflow_test(map, len))
 6656		hwbug = true;
 6657
 6658	if (tg3_40bit_overflow_test(tp, map, len))
 6659		hwbug = true;
 6660
 6661	if (tp->dma_limit) {
 6662		u32 prvidx = *entry;
 6663		u32 tmp_flag = flags & ~TXD_FLAG_END;
 6664		while (len > tp->dma_limit && *budget) {
 6665			u32 frag_len = tp->dma_limit;
 6666			len -= tp->dma_limit;
 6667
 6668			/* Avoid the 8byte DMA problem */
 6669			if (len <= 8) {
 6670				len += tp->dma_limit / 2;
 6671				frag_len = tp->dma_limit / 2;
 6672			}
 6673
 6674			tnapi->tx_buffers[*entry].fragmented = true;
 6675
 6676			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 6677				      frag_len, tmp_flag, mss, vlan);
 6678			*budget -= 1;
 6679			prvidx = *entry;
 6680			*entry = NEXT_TX(*entry);
 6681
 6682			map += frag_len;
 6683		}
 6684
 6685		if (len) {
 6686			if (*budget) {
 6687				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 6688					      len, flags, mss, vlan);
 6689				*budget -= 1;
 6690				*entry = NEXT_TX(*entry);
 6691			} else {
 6692				hwbug = true;
 6693				tnapi->tx_buffers[prvidx].fragmented = false;
 6694			}
 6695		}
 6696	} else {
 6697		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 6698			      len, flags, mss, vlan);
 6699		*entry = NEXT_TX(*entry);
 6700	}
 6701
 6702	return hwbug;
 6703}
 6704
 6705static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
 6706{
 6707	int i;
 6708	struct sk_buff *skb;
 6709	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
 6710
 6711	skb = txb->skb;
 6712	txb->skb = NULL;
 6713
 6714	pci_unmap_single(tnapi->tp->pdev,
 6715			 dma_unmap_addr(txb, mapping),
 6716			 skb_headlen(skb),
 6717			 PCI_DMA_TODEVICE);
 6718
 6719	while (txb->fragmented) {
 6720		txb->fragmented = false;
 6721		entry = NEXT_TX(entry);
 6722		txb = &tnapi->tx_buffers[entry];
 6723	}
 6724
 6725	for (i = 0; i <= last; i++) {
 6726		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6727
 6728		entry = NEXT_TX(entry);
 6729		txb = &tnapi->tx_buffers[entry];
 6730
 6731		pci_unmap_page(tnapi->tp->pdev,
 6732			       dma_unmap_addr(txb, mapping),
 6733			       skb_frag_size(frag), PCI_DMA_TODEVICE);
 6734
 6735		while (txb->fragmented) {
 6736			txb->fragmented = false;
 6737			entry = NEXT_TX(entry);
 6738			txb = &tnapi->tx_buffers[entry];
 6739		}
 6740	}
 6741}
 6742
 6743/* Workaround 4GB and 40-bit hardware DMA bugs. */
 6744static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
 6745				       struct sk_buff **pskb,
 6746				       u32 *entry, u32 *budget,
 6747				       u32 base_flags, u32 mss, u32 vlan)
 6748{
 6749	struct tg3 *tp = tnapi->tp;
 6750	struct sk_buff *new_skb, *skb = *pskb;
 6751	dma_addr_t new_addr = 0;
 6752	int ret = 0;
 6753
 6754	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
 6755		new_skb = skb_copy(skb, GFP_ATOMIC);
 6756	else {
 6757		int more_headroom = 4 - ((unsigned long)skb->data & 3);
 6758
 6759		new_skb = skb_copy_expand(skb,
 6760					  skb_headroom(skb) + more_headroom,
 6761					  skb_tailroom(skb), GFP_ATOMIC);
 6762	}
 6763
 6764	if (!new_skb) {
 6765		ret = -1;
 6766	} else {
 6767		/* New SKB is guaranteed to be linear. */
 6768		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
 6769					  PCI_DMA_TODEVICE);
 6770		/* Make sure the mapping succeeded */
 6771		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
 6772			dev_kfree_skb(new_skb);
 6773			ret = -1;
 6774		} else {
 6775			u32 save_entry = *entry;
 6776
 6777			base_flags |= TXD_FLAG_END;
 6778
 6779			tnapi->tx_buffers[*entry].skb = new_skb;
 6780			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
 6781					   mapping, new_addr);
 6782
 6783			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
 6784					    new_skb->len, base_flags,
 6785					    mss, vlan)) {
 6786				tg3_tx_skb_unmap(tnapi, save_entry, -1);
 6787				dev_kfree_skb(new_skb);
 6788				ret = -1;
 6789			}
 6790		}
 6791	}
 6792
 6793	dev_kfree_skb(skb);
 6794	*pskb = new_skb;
 6795	return ret;
 6796}
 6797
 6798static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 6799
 6800/* Use GSO to workaround a rare TSO bug that may be triggered when the
 6801 * TSO header is greater than 80 bytes.
 6802 */
 6803static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
 6804{
 6805	struct sk_buff *segs, *nskb;
 6806	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
 6807
 6808	/* Estimate the number of fragments in the worst case */
 6809	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
 6810		netif_stop_queue(tp->dev);
 6811
 6812		/* netif_tx_stop_queue() must be done before checking
 6813		 * checking tx index in tg3_tx_avail() below, because in
 6814		 * tg3_tx(), we update tx index before checking for
 6815		 * netif_tx_queue_stopped().
 6816		 */
 6817		smp_mb();
 6818		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
 6819			return NETDEV_TX_BUSY;
 6820
 6821		netif_wake_queue(tp->dev);
 6822	}
 6823
 6824	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
 6825	if (IS_ERR(segs))
 6826		goto tg3_tso_bug_end;
 6827
 6828	do {
 6829		nskb = segs;
 6830		segs = segs->next;
 6831		nskb->next = NULL;
 6832		tg3_start_xmit(nskb, tp->dev);
 6833	} while (segs);
 6834
 6835tg3_tso_bug_end:
 6836	dev_kfree_skb(skb);
 6837
 6838	return NETDEV_TX_OK;
 6839}
 6840
 6841/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
 6842 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
 6843 */
 6844static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 6845{
 6846	struct tg3 *tp = netdev_priv(dev);
 6847	u32 len, entry, base_flags, mss, vlan = 0;
 6848	u32 budget;
 6849	int i = -1, would_hit_hwbug;
 6850	dma_addr_t mapping;
 6851	struct tg3_napi *tnapi;
 6852	struct netdev_queue *txq;
 6853	unsigned int last;
 6854
 6855	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 6856	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
 6857	if (tg3_flag(tp, ENABLE_TSS))
 6858		tnapi++;
 6859
 6860	budget = tg3_tx_avail(tnapi);
 6861
 6862	/* We are running in BH disabled context with netif_tx_lock
 6863	 * and TX reclaim runs via tp->napi.poll inside of a software
 6864	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
 6865	 * no IRQ context deadlocks to worry about either.  Rejoice!
 6866	 */
 6867	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
 6868		if (!netif_tx_queue_stopped(txq)) {
 6869			netif_tx_stop_queue(txq);
 6870
 6871			/* This is a hard error, log it. */
 6872			netdev_err(dev,
 6873				   "BUG! Tx Ring full when queue awake!\n");
 6874		}
 6875		return NETDEV_TX_BUSY;
 6876	}
 6877
 6878	entry = tnapi->tx_prod;
 6879	base_flags = 0;
 6880	if (skb->ip_summed == CHECKSUM_PARTIAL)
 6881		base_flags |= TXD_FLAG_TCPUDP_CSUM;
 6882
 6883	mss = skb_shinfo(skb)->gso_size;
 6884	if (mss) {
 6885		struct iphdr *iph;
 6886		u32 tcp_opt_len, hdr_len;
 6887
 6888		if (skb_header_cloned(skb) &&
 6889		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
 6890			goto drop;
 6891
 6892		iph = ip_hdr(skb);
 6893		tcp_opt_len = tcp_optlen(skb);
 6894
 6895		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 6896
 6897		if (!skb_is_gso_v6(skb)) {
 6898			iph->check = 0;
 6899			iph->tot_len = htons(mss + hdr_len);
 6900		}
 6901
 6902		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
 6903		    tg3_flag(tp, TSO_BUG))
 6904			return tg3_tso_bug(tp, skb);
 6905
 6906		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
 6907			       TXD_FLAG_CPU_POST_DMA);
 6908
 6909		if (tg3_flag(tp, HW_TSO_1) ||
 6910		    tg3_flag(tp, HW_TSO_2) ||
 6911		    tg3_flag(tp, HW_TSO_3)) {
 6912			tcp_hdr(skb)->check = 0;
 6913			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
 6914		} else
 6915			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
 6916								 iph->daddr, 0,
 6917								 IPPROTO_TCP,
 6918								 0);
 6919
 6920		if (tg3_flag(tp, HW_TSO_3)) {
 6921			mss |= (hdr_len & 0xc) << 12;
 6922			if (hdr_len & 0x10)
 6923				base_flags |= 0x00000010;
 6924			base_flags |= (hdr_len & 0x3e0) << 5;
 6925		} else if (tg3_flag(tp, HW_TSO_2))
 6926			mss |= hdr_len << 9;
 6927		else if (tg3_flag(tp, HW_TSO_1) ||
 6928			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 6929			if (tcp_opt_len || iph->ihl > 5) {
 6930				int tsflags;
 6931
 6932				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
 6933				mss |= (tsflags << 11);
 6934			}
 6935		} else {
 6936			if (tcp_opt_len || iph->ihl > 5) {
 6937				int tsflags;
 6938
 6939				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
 6940				base_flags |= tsflags << 12;
 6941			}
 6942		}
 6943	}
 6944
 6945	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
 6946	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
 6947		base_flags |= TXD_FLAG_JMB_PKT;
 6948
 6949	if (vlan_tx_tag_present(skb)) {
 6950		base_flags |= TXD_FLAG_VLAN;
 6951		vlan = vlan_tx_tag_get(skb);
 6952	}
 6953
 6954	len = skb_headlen(skb);
 6955
 6956	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
 6957	if (pci_dma_mapping_error(tp->pdev, mapping))
 6958		goto drop;
 6959
 6960
 6961	tnapi->tx_buffers[entry].skb = skb;
 6962	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 6963
 6964	would_hit_hwbug = 0;
 6965
 6966	if (tg3_flag(tp, 5701_DMA_BUG))
 6967		would_hit_hwbug = 1;
 6968
 6969	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
 6970			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
 6971			    mss, vlan)) {
 6972		would_hit_hwbug = 1;
 6973	} else if (skb_shinfo(skb)->nr_frags > 0) {
 6974		u32 tmp_mss = mss;
 6975
 6976		if (!tg3_flag(tp, HW_TSO_1) &&
 6977		    !tg3_flag(tp, HW_TSO_2) &&
 6978		    !tg3_flag(tp, HW_TSO_3))
 6979			tmp_mss = 0;
 6980
 6981		/* Now loop through additional data
 6982		 * fragments, and queue them.
 6983		 */
 6984		last = skb_shinfo(skb)->nr_frags - 1;
 6985		for (i = 0; i <= last; i++) {
 6986			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 6987
 6988			len = skb_frag_size(frag);
 6989			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
 6990						   len, DMA_TO_DEVICE);
 6991
 6992			tnapi->tx_buffers[entry].skb = NULL;
 6993			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
 6994					   mapping);
 6995			if (dma_mapping_error(&tp->pdev->dev, mapping))
 6996				goto dma_error;
 6997
 6998			if (!budget ||
 6999			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
 7000					    len, base_flags |
 7001					    ((i == last) ? TXD_FLAG_END : 0),
 7002					    tmp_mss, vlan)) {
 7003				would_hit_hwbug = 1;
 7004				break;
 7005			}
 7006		}
 7007	}
 7008
 7009	if (would_hit_hwbug) {
 7010		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 7011
 7012		/* If the workaround fails due to memory/mapping
 7013		 * failure, silently drop this packet.
 7014		 */
 7015		entry = tnapi->tx_prod;
 7016		budget = tg3_tx_avail(tnapi);
 7017		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
 7018						base_flags, mss, vlan))
 7019			goto drop_nofree;
 7020	}
 7021
 7022	skb_tx_timestamp(skb);
 7023	netdev_tx_sent_queue(txq, skb->len);
 7024
 7025	/* Sync BD data before updating mailbox */
 7026	wmb();
 7027
 7028	/* Packets are ready, update Tx producer idx local and on card. */
 7029	tw32_tx_mbox(tnapi->prodmbox, entry);
 7030
 7031	tnapi->tx_prod = entry;
 7032	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
 7033		netif_tx_stop_queue(txq);
 7034
 7035		/* netif_tx_stop_queue() must be done before checking
 7036		 * checking tx index in tg3_tx_avail() below, because in
 7037		 * tg3_tx(), we update tx index before checking for
 7038		 * netif_tx_queue_stopped().
 7039		 */
 7040		smp_mb();
 7041		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
 7042			netif_tx_wake_queue(txq);
 7043	}
 7044
 7045	mmiowb();
 7046	return NETDEV_TX_OK;
 7047
 7048dma_error:
 7049	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
 7050	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
 7051drop:
 7052	dev_kfree_skb(skb);
 7053drop_nofree:
 7054	tp->tx_dropped++;
 7055	return NETDEV_TX_OK;
 7056}
 7057
 7058static void tg3_mac_loopback(struct tg3 *tp, bool enable)
 7059{
 7060	if (enable) {
 7061		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
 7062				  MAC_MODE_PORT_MODE_MASK);
 7063
 7064		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
 7065
 7066		if (!tg3_flag(tp, 5705_PLUS))
 7067			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 7068
 7069		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
 7070			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
 7071		else
 7072			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
 7073	} else {
 7074		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
 7075
 7076		if (tg3_flag(tp, 5705_PLUS) ||
 7077		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
 7078		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
 7079			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
 7080	}
 7081
 7082	tw32(MAC_MODE, tp->mac_mode);
 7083	udelay(40);
 7084}
 7085
 7086static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
 7087{
 7088	u32 val, bmcr, mac_mode, ptest = 0;
 7089
 7090	tg3_phy_toggle_apd(tp, false);
 7091	tg3_phy_toggle_automdix(tp, 0);
 7092
 7093	if (extlpbk && tg3_phy_set_extloopbk(tp))
 7094		return -EIO;
 7095
 7096	bmcr = BMCR_FULLDPLX;
 7097	switch (speed) {
 7098	case SPEED_10:
 7099		break;
 7100	case SPEED_100:
 7101		bmcr |= BMCR_SPEED100;
 7102		break;
 7103	case SPEED_1000:
 7104	default:
 7105		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 7106			speed = SPEED_100;
 7107			bmcr |= BMCR_SPEED100;
 7108		} else {
 7109			speed = SPEED_1000;
 7110			bmcr |= BMCR_SPEED1000;
 7111		}
 7112	}
 7113
 7114	if (extlpbk) {
 7115		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 7116			tg3_readphy(tp, MII_CTRL1000, &val);
 7117			val |= CTL1000_AS_MASTER |
 7118			       CTL1000_ENABLE_MASTER;
 7119			tg3_writephy(tp, MII_CTRL1000, val);
 7120		} else {
 7121			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
 7122				MII_TG3_FET_PTEST_TRIM_2;
 7123			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
 7124		}
 7125	} else
 7126		bmcr |= BMCR_LOOPBACK;
 7127
 7128	tg3_writephy(tp, MII_BMCR, bmcr);
 7129
 7130	/* The write needs to be flushed for the FETs */
 7131	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
 7132		tg3_readphy(tp, MII_BMCR, &bmcr);
 7133
 7134	udelay(40);
 7135
 7136	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
 7137	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
 7138		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
 7139			     MII_TG3_FET_PTEST_FRC_TX_LINK |
 7140			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
 7141
 7142		/* The write needs to be flushed for the AC131 */
 7143		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
 7144	}
 7145
 7146	/* Reset to prevent losing 1st rx packet intermittently */
 7147	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 7148	    tg3_flag(tp, 5780_CLASS)) {
 7149		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 7150		udelay(10);
 7151		tw32_f(MAC_RX_MODE, tp->rx_mode);
 7152	}
 7153
 7154	mac_mode = tp->mac_mode &
 7155		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 7156	if (speed == SPEED_1000)
 7157		mac_mode |= MAC_MODE_PORT_MODE_GMII;
 7158	else
 7159		mac_mode |= MAC_MODE_PORT_MODE_MII;
 7160
 7161	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
 7162		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
 7163
 7164		if (masked_phy_id == TG3_PHY_ID_BCM5401)
 7165			mac_mode &= ~MAC_MODE_LINK_POLARITY;
 7166		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
 7167			mac_mode |= MAC_MODE_LINK_POLARITY;
 7168
 7169		tg3_writephy(tp, MII_TG3_EXT_CTRL,
 7170			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 7171	}
 7172
 7173	tw32(MAC_MODE, mac_mode);
 7174	udelay(40);
 7175
 7176	return 0;
 7177}
 7178
 7179static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
 7180{
 7181	struct tg3 *tp = netdev_priv(dev);
 7182
 7183	if (features & NETIF_F_LOOPBACK) {
 7184		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
 7185			return;
 7186
 7187		spin_lock_bh(&tp->lock);
 7188		tg3_mac_loopback(tp, true);
 7189		netif_carrier_on(tp->dev);
 7190		spin_unlock_bh(&tp->lock);
 7191		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
 7192	} else {
 7193		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
 7194			return;
 7195
 7196		spin_lock_bh(&tp->lock);
 7197		tg3_mac_loopback(tp, false);
 7198		/* Force link status check */
 7199		tg3_setup_phy(tp, 1);
 7200		spin_unlock_bh(&tp->lock);
 7201		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
 7202	}
 7203}
 7204
 7205static netdev_features_t tg3_fix_features(struct net_device *dev,
 7206	netdev_features_t features)
 7207{
 7208	struct tg3 *tp = netdev_priv(dev);
 7209
 7210	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
 7211		features &= ~NETIF_F_ALL_TSO;
 7212
 7213	return features;
 7214}
 7215
 7216static int tg3_set_features(struct net_device *dev, netdev_features_t features)
 7217{
 7218	netdev_features_t changed = dev->features ^ features;
 7219
 7220	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
 7221		tg3_set_loopback(dev, features);
 7222
 7223	return 0;
 7224}
 7225
 7226static void tg3_rx_prodring_free(struct tg3 *tp,
 7227				 struct tg3_rx_prodring_set *tpr)
 7228{
 7229	int i;
 7230
 7231	if (tpr != &tp->napi[0].prodring) {
 7232		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
 7233		     i = (i + 1) & tp->rx_std_ring_mask)
 7234			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
 7235					tp->rx_pkt_map_sz);
 7236
 7237		if (tg3_flag(tp, JUMBO_CAPABLE)) {
 7238			for (i = tpr->rx_jmb_cons_idx;
 7239			     i != tpr->rx_jmb_prod_idx;
 7240			     i = (i + 1) & tp->rx_jmb_ring_mask) {
 7241				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
 7242						TG3_RX_JMB_MAP_SZ);
 7243			}
 7244		}
 7245
 7246		return;
 7247	}
 7248
 7249	for (i = 0; i <= tp->rx_std_ring_mask; i++)
 7250		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
 7251				tp->rx_pkt_map_sz);
 7252
 7253	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 7254		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
 7255			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
 7256					TG3_RX_JMB_MAP_SZ);
 7257	}
 7258}
 7259
 7260/* Initialize rx rings for packet processing.
 7261 *
 7262 * The chip has been shut down and the driver detached from
 7263 * the networking, so no interrupts or new tx packets will
 7264 * end up in the driver.  tp->{tx,}lock are held and thus
 7265 * we may not sleep.
 7266 */
 7267static int tg3_rx_prodring_alloc(struct tg3 *tp,
 7268				 struct tg3_rx_prodring_set *tpr)
 7269{
 7270	u32 i, rx_pkt_dma_sz;
 7271
 7272	tpr->rx_std_cons_idx = 0;
 7273	tpr->rx_std_prod_idx = 0;
 7274	tpr->rx_jmb_cons_idx = 0;
 7275	tpr->rx_jmb_prod_idx = 0;
 7276
 7277	if (tpr != &tp->napi[0].prodring) {
 7278		memset(&tpr->rx_std_buffers[0], 0,
 7279		       TG3_RX_STD_BUFF_RING_SIZE(tp));
 7280		if (tpr->rx_jmb_buffers)
 7281			memset(&tpr->rx_jmb_buffers[0], 0,
 7282			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
 7283		goto done;
 7284	}
 7285
 7286	/* Zero out all descriptors. */
 7287	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
 7288
 7289	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
 7290	if (tg3_flag(tp, 5780_CLASS) &&
 7291	    tp->dev->mtu > ETH_DATA_LEN)
 7292		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
 7293	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
 7294
 7295	/* Initialize invariants of the rings, we only set this
 7296	 * stuff once.  This works because the card does not
 7297	 * write into the rx buffer posting rings.
 7298	 */
 7299	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
 7300		struct tg3_rx_buffer_desc *rxd;
 7301
 7302		rxd = &tpr->rx_std[i];
 7303		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
 7304		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
 7305		rxd->opaque = (RXD_OPAQUE_RING_STD |
 7306			       (i << RXD_OPAQUE_INDEX_SHIFT));
 7307	}
 7308
 7309	/* Now allocate fresh SKBs for each rx ring. */
 7310	for (i = 0; i < tp->rx_pending; i++) {
 7311		unsigned int frag_size;
 7312
 7313		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
 7314				      &frag_size) < 0) {
 7315			netdev_warn(tp->dev,
 7316				    "Using a smaller RX standard ring. Only "
 7317				    "%d out of %d buffers were allocated "
 7318				    "successfully\n", i, tp->rx_pending);
 7319			if (i == 0)
 7320				goto initfail;
 7321			tp->rx_pending = i;
 7322			break;
 7323		}
 7324	}
 7325
 7326	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 7327		goto done;
 7328
 7329	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
 7330
 7331	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
 7332		goto done;
 7333
 7334	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
 7335		struct tg3_rx_buffer_desc *rxd;
 7336
 7337		rxd = &tpr->rx_jmb[i].std;
 7338		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
 7339		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
 7340				  RXD_FLAG_JUMBO;
 7341		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
 7342		       (i << RXD_OPAQUE_INDEX_SHIFT));
 7343	}
 7344
 7345	for (i = 0; i < tp->rx_jumbo_pending; i++) {
 7346		unsigned int frag_size;
 7347
 7348		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
 7349				      &frag_size) < 0) {
 7350			netdev_warn(tp->dev,
 7351				    "Using a smaller RX jumbo ring. Only %d "
 7352				    "out of %d buffers were allocated "
 7353				    "successfully\n", i, tp->rx_jumbo_pending);
 7354			if (i == 0)
 7355				goto initfail;
 7356			tp->rx_jumbo_pending = i;
 7357			break;
 7358		}
 7359	}
 7360
 7361done:
 7362	return 0;
 7363
 7364initfail:
 7365	tg3_rx_prodring_free(tp, tpr);
 7366	return -ENOMEM;
 7367}
 7368
 7369static void tg3_rx_prodring_fini(struct tg3 *tp,
 7370				 struct tg3_rx_prodring_set *tpr)
 7371{
 7372	kfree(tpr->rx_std_buffers);
 7373	tpr->rx_std_buffers = NULL;
 7374	kfree(tpr->rx_jmb_buffers);
 7375	tpr->rx_jmb_buffers = NULL;
 7376	if (tpr->rx_std) {
 7377		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
 7378				  tpr->rx_std, tpr->rx_std_mapping);
 7379		tpr->rx_std = NULL;
 7380	}
 7381	if (tpr->rx_jmb) {
 7382		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
 7383				  tpr->rx_jmb, tpr->rx_jmb_mapping);
 7384		tpr->rx_jmb = NULL;
 7385	}
 7386}
 7387
 7388static int tg3_rx_prodring_init(struct tg3 *tp,
 7389				struct tg3_rx_prodring_set *tpr)
 7390{
 7391	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
 7392				      GFP_KERNEL);
 7393	if (!tpr->rx_std_buffers)
 7394		return -ENOMEM;
 7395
 7396	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
 7397					 TG3_RX_STD_RING_BYTES(tp),
 7398					 &tpr->rx_std_mapping,
 7399					 GFP_KERNEL);
 7400	if (!tpr->rx_std)
 7401		goto err_out;
 7402
 7403	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
 7404		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
 7405					      GFP_KERNEL);
 7406		if (!tpr->rx_jmb_buffers)
 7407			goto err_out;
 7408
 7409		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
 7410						 TG3_RX_JMB_RING_BYTES(tp),
 7411						 &tpr->rx_jmb_mapping,
 7412						 GFP_KERNEL);
 7413		if (!tpr->rx_jmb)
 7414			goto err_out;
 7415	}
 7416
 7417	return 0;
 7418
 7419err_out:
 7420	tg3_rx_prodring_fini(tp, tpr);
 7421	return -ENOMEM;
 7422}
 7423
 7424/* Free up pending packets in all rx/tx rings.
 7425 *
 7426 * The chip has been shut down and the driver detached from
 7427 * the networking, so no interrupts or new tx packets will
 7428 * end up in the driver.  tp->{tx,}lock is not held and we are not
 7429 * in an interrupt context and thus may sleep.
 7430 */
 7431static void tg3_free_rings(struct tg3 *tp)
 7432{
 7433	int i, j;
 7434
 7435	for (j = 0; j < tp->irq_cnt; j++) {
 7436		struct tg3_napi *tnapi = &tp->napi[j];
 7437
 7438		tg3_rx_prodring_free(tp, &tnapi->prodring);
 7439
 7440		if (!tnapi->tx_buffers)
 7441			continue;
 7442
 7443		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
 7444			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
 7445
 7446			if (!skb)
 7447				continue;
 7448
 7449			tg3_tx_skb_unmap(tnapi, i,
 7450					 skb_shinfo(skb)->nr_frags - 1);
 7451
 7452			dev_kfree_skb_any(skb);
 7453		}
 7454		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
 7455	}
 7456}
 7457
 7458/* Initialize tx/rx rings for packet processing.
 7459 *
 7460 * The chip has been shut down and the driver detached from
 7461 * the networking, so no interrupts or new tx packets will
 7462 * end up in the driver.  tp->{tx,}lock are held and thus
 7463 * we may not sleep.
 7464 */
 7465static int tg3_init_rings(struct tg3 *tp)
 7466{
 7467	int i;
 7468
 7469	/* Free up all the SKBs. */
 7470	tg3_free_rings(tp);
 7471
 7472	for (i = 0; i < tp->irq_cnt; i++) {
 7473		struct tg3_napi *tnapi = &tp->napi[i];
 7474
 7475		tnapi->last_tag = 0;
 7476		tnapi->last_irq_tag = 0;
 7477		tnapi->hw_status->status = 0;
 7478		tnapi->hw_status->status_tag = 0;
 7479		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 7480
 7481		tnapi->tx_prod = 0;
 7482		tnapi->tx_cons = 0;
 7483		if (tnapi->tx_ring)
 7484			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
 7485
 7486		tnapi->rx_rcb_ptr = 0;
 7487		if (tnapi->rx_rcb)
 7488			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 7489
 7490		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
 7491			tg3_free_rings(tp);
 7492			return -ENOMEM;
 7493		}
 7494	}
 7495
 7496	return 0;
 7497}
 7498
 7499/*
 7500 * Must not be invoked with interrupt sources disabled and
 7501 * the hardware shutdown down.
 7502 */
 7503static void tg3_free_consistent(struct tg3 *tp)
 7504{
 7505	int i;
 7506
 7507	for (i = 0; i < tp->irq_cnt; i++) {
 7508		struct tg3_napi *tnapi = &tp->napi[i];
 7509
 7510		if (tnapi->tx_ring) {
 7511			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
 7512				tnapi->tx_ring, tnapi->tx_desc_mapping);
 7513			tnapi->tx_ring = NULL;
 7514		}
 7515
 7516		kfree(tnapi->tx_buffers);
 7517		tnapi->tx_buffers = NULL;
 7518
 7519		if (tnapi->rx_rcb) {
 7520			dma_free_coherent(&tp->pdev->dev,
 7521					  TG3_RX_RCB_RING_BYTES(tp),
 7522					  tnapi->rx_rcb,
 7523					  tnapi->rx_rcb_mapping);
 7524			tnapi->rx_rcb = NULL;
 7525		}
 7526
 7527		tg3_rx_prodring_fini(tp, &tnapi->prodring);
 7528
 7529		if (tnapi->hw_status) {
 7530			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
 7531					  tnapi->hw_status,
 7532					  tnapi->status_mapping);
 7533			tnapi->hw_status = NULL;
 7534		}
 7535	}
 7536
 7537	if (tp->hw_stats) {
 7538		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
 7539				  tp->hw_stats, tp->stats_mapping);
 7540		tp->hw_stats = NULL;
 7541	}
 7542}
 7543
 7544/*
 7545 * Must not be invoked with interrupt sources disabled and
 7546 * the hardware shutdown down.  Can sleep.
 7547 */
 7548static int tg3_alloc_consistent(struct tg3 *tp)
 7549{
 7550	int i;
 7551
 7552	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
 7553					  sizeof(struct tg3_hw_stats),
 7554					  &tp->stats_mapping,
 7555					  GFP_KERNEL);
 7556	if (!tp->hw_stats)
 7557		goto err_out;
 7558
 7559	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 7560
 7561	for (i = 0; i < tp->irq_cnt; i++) {
 7562		struct tg3_napi *tnapi = &tp->napi[i];
 7563		struct tg3_hw_status *sblk;
 7564
 7565		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
 7566						      TG3_HW_STATUS_SIZE,
 7567						      &tnapi->status_mapping,
 7568						      GFP_KERNEL);
 7569		if (!tnapi->hw_status)
 7570			goto err_out;
 7571
 7572		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 7573		sblk = tnapi->hw_status;
 7574
 7575		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
 7576			goto err_out;
 7577
 7578		/* If multivector TSS is enabled, vector 0 does not handle
 7579		 * tx interrupts.  Don't allocate any resources for it.
 7580		 */
 7581		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
 7582		    (i && tg3_flag(tp, ENABLE_TSS))) {
 7583			tnapi->tx_buffers = kzalloc(
 7584					       sizeof(struct tg3_tx_ring_info) *
 7585					       TG3_TX_RING_SIZE, GFP_KERNEL);
 7586			if (!tnapi->tx_buffers)
 7587				goto err_out;
 7588
 7589			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
 7590							    TG3_TX_RING_BYTES,
 7591							&tnapi->tx_desc_mapping,
 7592							    GFP_KERNEL);
 7593			if (!tnapi->tx_ring)
 7594				goto err_out;
 7595		}
 7596
 7597		/*
 7598		 * When RSS is enabled, the status block format changes
 7599		 * slightly.  The "rx_jumbo_consumer", "reserved",
 7600		 * and "rx_mini_consumer" members get mapped to the
 7601		 * other three rx return ring producer indexes.
 7602		 */
 7603		switch (i) {
 7604		default:
 7605			if (tg3_flag(tp, ENABLE_RSS)) {
 7606				tnapi->rx_rcb_prod_idx = NULL;
 7607				break;
 7608			}
 7609			/* Fall through */
 7610		case 1:
 7611			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
 7612			break;
 7613		case 2:
 7614			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
 7615			break;
 7616		case 3:
 7617			tnapi->rx_rcb_prod_idx = &sblk->reserved;
 7618			break;
 7619		case 4:
 7620			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
 7621			break;
 7622		}
 7623
 7624		/*
 7625		 * If multivector RSS is enabled, vector 0 does not handle
 7626		 * rx or tx interrupts.  Don't allocate any resources for it.
 7627		 */
 7628		if (!i && tg3_flag(tp, ENABLE_RSS))
 7629			continue;
 7630
 7631		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
 7632						   TG3_RX_RCB_RING_BYTES(tp),
 7633						   &tnapi->rx_rcb_mapping,
 7634						   GFP_KERNEL);
 7635		if (!tnapi->rx_rcb)
 7636			goto err_out;
 7637
 7638		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 7639	}
 7640
 7641	return 0;
 7642
 7643err_out:
 7644	tg3_free_consistent(tp);
 7645	return -ENOMEM;
 7646}
 7647
 7648#define MAX_WAIT_CNT 1000
 7649
 7650/* To stop a block, clear the enable bit and poll till it
 7651 * clears.  tp->lock is held.
 7652 */
 7653static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
 7654{
 7655	unsigned int i;
 7656	u32 val;
 7657
 7658	if (tg3_flag(tp, 5705_PLUS)) {
 7659		switch (ofs) {
 7660		case RCVLSC_MODE:
 7661		case DMAC_MODE:
 7662		case MBFREE_MODE:
 7663		case BUFMGR_MODE:
 7664		case MEMARB_MODE:
 7665			/* We can't enable/disable these bits of the
 7666			 * 5705/5750, just say success.
 7667			 */
 7668			return 0;
 7669
 7670		default:
 7671			break;
 7672		}
 7673	}
 7674
 7675	val = tr32(ofs);
 7676	val &= ~enable_bit;
 7677	tw32_f(ofs, val);
 7678
 7679	for (i = 0; i < MAX_WAIT_CNT; i++) {
 7680		udelay(100);
 7681		val = tr32(ofs);
 7682		if ((val & enable_bit) == 0)
 7683			break;
 7684	}
 7685
 7686	if (i == MAX_WAIT_CNT && !silent) {
 7687		dev_err(&tp->pdev->dev,
 7688			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
 7689			ofs, enable_bit);
 7690		return -ENODEV;
 7691	}
 7692
 7693	return 0;
 7694}
 7695
 7696/* tp->lock is held. */
 7697static int tg3_abort_hw(struct tg3 *tp, int silent)
 7698{
 7699	int i, err;
 7700
 7701	tg3_disable_ints(tp);
 7702
 7703	tp->rx_mode &= ~RX_MODE_ENABLE;
 7704	tw32_f(MAC_RX_MODE, tp->rx_mode);
 7705	udelay(10);
 7706
 7707	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
 7708	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
 7709	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
 7710	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
 7711	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
 7712	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
 7713
 7714	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
 7715	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
 7716	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
 7717	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
 7718	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
 7719	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
 7720	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
 7721
 7722	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
 7723	tw32_f(MAC_MODE, tp->mac_mode);
 7724	udelay(40);
 7725
 7726	tp->tx_mode &= ~TX_MODE_ENABLE;
 7727	tw32_f(MAC_TX_MODE, tp->tx_mode);
 7728
 7729	for (i = 0; i < MAX_WAIT_CNT; i++) {
 7730		udelay(100);
 7731		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
 7732			break;
 7733	}
 7734	if (i >= MAX_WAIT_CNT) {
 7735		dev_err(&tp->pdev->dev,
 7736			"%s timed out, TX_MODE_ENABLE will not clear "
 7737			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
 7738		err |= -ENODEV;
 7739	}
 7740
 7741	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
 7742	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
 7743	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
 7744
 7745	tw32(FTQ_RESET, 0xffffffff);
 7746	tw32(FTQ_RESET, 0x00000000);
 7747
 7748	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
 7749	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 7750
 7751	for (i = 0; i < tp->irq_cnt; i++) {
 7752		struct tg3_napi *tnapi = &tp->napi[i];
 7753		if (tnapi->hw_status)
 7754			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 7755	}
 7756
 7757	return err;
 7758}
 7759
 7760/* Save PCI command register before chip reset */
 7761static void tg3_save_pci_state(struct tg3 *tp)
 7762{
 7763	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
 7764}
 7765
 7766/* Restore PCI state after chip reset */
 7767static void tg3_restore_pci_state(struct tg3 *tp)
 7768{
 7769	u32 val;
 7770
 7771	/* Re-enable indirect register accesses. */
 7772	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
 7773			       tp->misc_host_ctrl);
 7774
 7775	/* Set MAX PCI retry to zero. */
 7776	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
 7777	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
 7778	    tg3_flag(tp, PCIX_MODE))
 7779		val |= PCISTATE_RETRY_SAME_DMA;
 7780	/* Allow reads and writes to the APE register and memory space. */
 7781	if (tg3_flag(tp, ENABLE_APE))
 7782		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 7783		       PCISTATE_ALLOW_APE_SHMEM_WR |
 7784		       PCISTATE_ALLOW_APE_PSPACE_WR;
 7785	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
 7786
 7787	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
 7788
 7789	if (!tg3_flag(tp, PCI_EXPRESS)) {
 7790		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
 7791				      tp->pci_cacheline_sz);
 7792		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
 7793				      tp->pci_lat_timer);
 7794	}
 7795
 7796	/* Make sure PCI-X relaxed ordering bit is clear. */
 7797	if (tg3_flag(tp, PCIX_MODE)) {
 7798		u16 pcix_cmd;
 7799
 7800		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 7801				     &pcix_cmd);
 7802		pcix_cmd &= ~PCI_X_CMD_ERO;
 7803		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 7804				      pcix_cmd);
 7805	}
 7806
 7807	if (tg3_flag(tp, 5780_CLASS)) {
 7808
 7809		/* Chip reset on 5780 will reset MSI enable bit,
 7810		 * so need to restore it.
 7811		 */
 7812		if (tg3_flag(tp, USING_MSI)) {
 7813			u16 ctrl;
 7814
 7815			pci_read_config_word(tp->pdev,
 7816					     tp->msi_cap + PCI_MSI_FLAGS,
 7817					     &ctrl);
 7818			pci_write_config_word(tp->pdev,
 7819					      tp->msi_cap + PCI_MSI_FLAGS,
 7820					      ctrl | PCI_MSI_FLAGS_ENABLE);
 7821			val = tr32(MSGINT_MODE);
 7822			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
 7823		}
 7824	}
 7825}
 7826
 7827/* tp->lock is held. */
 7828static int tg3_chip_reset(struct tg3 *tp)
 7829{
 7830	u32 val;
 7831	void (*write_op)(struct tg3 *, u32, u32);
 7832	int i, err;
 7833
 7834	tg3_nvram_lock(tp);
 7835
 7836	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
 7837
 7838	/* No matching tg3_nvram_unlock() after this because
 7839	 * chip reset below will undo the nvram lock.
 7840	 */
 7841	tp->nvram_lock_cnt = 0;
 7842
 7843	/* GRC_MISC_CFG core clock reset will clear the memory
 7844	 * enable bit in PCI register 4 and the MSI enable bit
 7845	 * on some chips, so we save relevant registers here.
 7846	 */
 7847	tg3_save_pci_state(tp);
 7848
 7849	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 7850	    tg3_flag(tp, 5755_PLUS))
 7851		tw32(GRC_FASTBOOT_PC, 0);
 7852
 7853	/*
 7854	 * We must avoid the readl() that normally takes place.
 7855	 * It locks machines, causes machine checks, and other
 7856	 * fun things.  So, temporarily disable the 5701
 7857	 * hardware workaround, while we do the reset.
 7858	 */
 7859	write_op = tp->write32;
 7860	if (write_op == tg3_write_flush_reg32)
 7861		tp->write32 = tg3_write32;
 7862
 7863	/* Prevent the irq handler from reading or writing PCI registers
 7864	 * during chip reset when the memory enable bit in the PCI command
 7865	 * register may be cleared.  The chip does not generate interrupt
 7866	 * at this time, but the irq handler may still be called due to irq
 7867	 * sharing or irqpoll.
 7868	 */
 7869	tg3_flag_set(tp, CHIP_RESETTING);
 7870	for (i = 0; i < tp->irq_cnt; i++) {
 7871		struct tg3_napi *tnapi = &tp->napi[i];
 7872		if (tnapi->hw_status) {
 7873			tnapi->hw_status->status = 0;
 7874			tnapi->hw_status->status_tag = 0;
 7875		}
 7876		tnapi->last_tag = 0;
 7877		tnapi->last_irq_tag = 0;
 7878	}
 7879	smp_mb();
 7880
 7881	for (i = 0; i < tp->irq_cnt; i++)
 7882		synchronize_irq(tp->napi[i].irq_vec);
 7883
 7884	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
 7885		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
 7886		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 7887	}
 7888
 7889	/* do the reset */
 7890	val = GRC_MISC_CFG_CORECLK_RESET;
 7891
 7892	if (tg3_flag(tp, PCI_EXPRESS)) {
 7893		/* Force PCIe 1.0a mode */
 7894		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 7895		    !tg3_flag(tp, 57765_PLUS) &&
 7896		    tr32(TG3_PCIE_PHY_TSTCTL) ==
 7897		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
 7898			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
 7899
 7900		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
 7901			tw32(GRC_MISC_CFG, (1 << 29));
 7902			val |= (1 << 29);
 7903		}
 7904	}
 7905
 7906	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
 7907		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
 7908		tw32(GRC_VCPU_EXT_CTRL,
 7909		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
 7910	}
 7911
 7912	/* Manage gphy power for all CPMU absent PCIe devices. */
 7913	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
 7914		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
 7915
 7916	tw32(GRC_MISC_CFG, val);
 7917
 7918	/* restore 5701 hardware bug workaround write method */
 7919	tp->write32 = write_op;
 7920
 7921	/* Unfortunately, we have to delay before the PCI read back.
 7922	 * Some 575X chips even will not respond to a PCI cfg access
 7923	 * when the reset command is given to the chip.
 7924	 *
 7925	 * How do these hardware designers expect things to work
 7926	 * properly if the PCI write is posted for a long period
 7927	 * of time?  It is always necessary to have some method by
 7928	 * which a register read back can occur to push the write
 7929	 * out which does the reset.
 7930	 *
 7931	 * For most tg3 variants the trick below was working.
 7932	 * Ho hum...
 7933	 */
 7934	udelay(120);
 7935
 7936	/* Flush PCI posted writes.  The normal MMIO registers
 7937	 * are inaccessible at this time so this is the only
 7938	 * way to make this reliably (actually, this is no longer
 7939	 * the case, see above).  I tried to use indirect
 7940	 * register read/write but this upset some 5701 variants.
 7941	 */
 7942	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
 7943
 7944	udelay(120);
 7945
 7946	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
 7947		u16 val16;
 7948
 7949		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
 7950			int i;
 7951			u32 cfg_val;
 7952
 7953			/* Wait for link training to complete.  */
 7954			for (i = 0; i < 5000; i++)
 7955				udelay(100);
 7956
 7957			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
 7958			pci_write_config_dword(tp->pdev, 0xc4,
 7959					       cfg_val | (1 << 15));
 7960		}
 7961
 7962		/* Clear the "no snoop" and "relaxed ordering" bits. */
 7963		pci_read_config_word(tp->pdev,
 7964				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
 7965				     &val16);
 7966		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
 7967			   PCI_EXP_DEVCTL_NOSNOOP_EN);
 7968		/*
 7969		 * Older PCIe devices only support the 128 byte
 7970		 * MPS setting.  Enforce the restriction.
 7971		 */
 7972		if (!tg3_flag(tp, CPMU_PRESENT))
 7973			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
 7974		pci_write_config_word(tp->pdev,
 7975				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
 7976				      val16);
 7977
 7978		/* Clear error status */
 7979		pci_write_config_word(tp->pdev,
 7980				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
 7981				      PCI_EXP_DEVSTA_CED |
 7982				      PCI_EXP_DEVSTA_NFED |
 7983				      PCI_EXP_DEVSTA_FED |
 7984				      PCI_EXP_DEVSTA_URD);
 7985	}
 7986
 7987	tg3_restore_pci_state(tp);
 7988
 7989	tg3_flag_clear(tp, CHIP_RESETTING);
 7990	tg3_flag_clear(tp, ERROR_PROCESSED);
 7991
 7992	val = 0;
 7993	if (tg3_flag(tp, 5780_CLASS))
 7994		val = tr32(MEMARB_MODE);
 7995	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 7996
 7997	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
 7998		tg3_stop_fw(tp);
 7999		tw32(0x5000, 0x400);
 8000	}
 8001
 8002	tw32(GRC_MODE, tp->grc_mode);
 8003
 8004	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
 8005		val = tr32(0xc4);
 8006
 8007		tw32(0xc4, val | (1 << 15));
 8008	}
 8009
 8010	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
 8011	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 8012		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
 8013		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
 8014			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
 8015		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 8016	}
 8017
 8018	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 8019		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
 8020		val = tp->mac_mode;
 8021	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 8022		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
 8023		val = tp->mac_mode;
 8024	} else
 8025		val = 0;
 8026
 8027	tw32_f(MAC_MODE, val);
 8028	udelay(40);
 8029
 8030	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
 8031
 8032	err = tg3_poll_fw(tp);
 8033	if (err)
 8034		return err;
 8035
 8036	tg3_mdio_start(tp);
 8037
 8038	if (tg3_flag(tp, PCI_EXPRESS) &&
 8039	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
 8040	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
 8041	    !tg3_flag(tp, 57765_PLUS)) {
 8042		val = tr32(0x7c00);
 8043
 8044		tw32(0x7c00, val | (1 << 25));
 8045	}
 8046
 8047	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 8048		val = tr32(TG3_CPMU_CLCK_ORIDE);
 8049		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
 8050	}
 8051
 8052	/* Reprobe ASF enable state.  */
 8053	tg3_flag_clear(tp, ENABLE_ASF);
 8054	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
 8055	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
 8056	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
 8057		u32 nic_cfg;
 8058
 8059		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
 8060		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
 8061			tg3_flag_set(tp, ENABLE_ASF);
 8062			tp->last_event_jiffies = jiffies;
 8063			if (tg3_flag(tp, 5750_PLUS))
 8064				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
 8065		}
 8066	}
 8067
 8068	return 0;
 8069}
 8070
 8071static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
 8072static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
 8073
 8074/* tp->lock is held. */
 8075static int tg3_halt(struct tg3 *tp, int kind, int silent)
 8076{
 8077	int err;
 8078
 8079	tg3_stop_fw(tp);
 8080
 8081	tg3_write_sig_pre_reset(tp, kind);
 8082
 8083	tg3_abort_hw(tp, silent);
 8084	err = tg3_chip_reset(tp);
 8085
 8086	__tg3_set_mac_addr(tp, 0);
 8087
 8088	tg3_write_sig_legacy(tp, kind);
 8089	tg3_write_sig_post_reset(tp, kind);
 8090
 8091	if (tp->hw_stats) {
 8092		/* Save the stats across chip resets... */
 8093		tg3_get_nstats(tp, &tp->net_stats_prev);
 8094		tg3_get_estats(tp, &tp->estats_prev);
 8095
 8096		/* And make sure the next sample is new data */
 8097		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
 8098	}
 8099
 8100	if (err)
 8101		return err;
 8102
 8103	return 0;
 8104}
 8105
 8106static int tg3_set_mac_addr(struct net_device *dev, void *p)
 8107{
 8108	struct tg3 *tp = netdev_priv(dev);
 8109	struct sockaddr *addr = p;
 8110	int err = 0, skip_mac_1 = 0;
 8111
 8112	if (!is_valid_ether_addr(addr->sa_data))
 8113		return -EADDRNOTAVAIL;
 8114
 8115	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 8116
 8117	if (!netif_running(dev))
 8118		return 0;
 8119
 8120	if (tg3_flag(tp, ENABLE_ASF)) {
 8121		u32 addr0_high, addr0_low, addr1_high, addr1_low;
 8122
 8123		addr0_high = tr32(MAC_ADDR_0_HIGH);
 8124		addr0_low = tr32(MAC_ADDR_0_LOW);
 8125		addr1_high = tr32(MAC_ADDR_1_HIGH);
 8126		addr1_low = tr32(MAC_ADDR_1_LOW);
 8127
 8128		/* Skip MAC addr 1 if ASF is using it. */
 8129		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
 8130		    !(addr1_high == 0 && addr1_low == 0))
 8131			skip_mac_1 = 1;
 8132	}
 8133	spin_lock_bh(&tp->lock);
 8134	__tg3_set_mac_addr(tp, skip_mac_1);
 8135	spin_unlock_bh(&tp->lock);
 8136
 8137	return err;
 8138}
 8139
 8140/* tp->lock is held. */
 8141static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
 8142			   dma_addr_t mapping, u32 maxlen_flags,
 8143			   u32 nic_addr)
 8144{
 8145	tg3_write_mem(tp,
 8146		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
 8147		      ((u64) mapping >> 32));
 8148	tg3_write_mem(tp,
 8149		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
 8150		      ((u64) mapping & 0xffffffff));
 8151	tg3_write_mem(tp,
 8152		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
 8153		       maxlen_flags);
 8154
 8155	if (!tg3_flag(tp, 5705_PLUS))
 8156		tg3_write_mem(tp,
 8157			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
 8158			      nic_addr);
 8159}
 8160
 8161static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
 8162{
 8163	int i;
 8164
 8165	if (!tg3_flag(tp, ENABLE_TSS)) {
 8166		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
 8167		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
 8168		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
 8169	} else {
 8170		tw32(HOSTCC_TXCOL_TICKS, 0);
 8171		tw32(HOSTCC_TXMAX_FRAMES, 0);
 8172		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
 8173	}
 8174
 8175	if (!tg3_flag(tp, ENABLE_RSS)) {
 8176		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
 8177		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
 8178		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
 8179	} else {
 8180		tw32(HOSTCC_RXCOL_TICKS, 0);
 8181		tw32(HOSTCC_RXMAX_FRAMES, 0);
 8182		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
 8183	}
 8184
 8185	if (!tg3_flag(tp, 5705_PLUS)) {
 8186		u32 val = ec->stats_block_coalesce_usecs;
 8187
 8188		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
 8189		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
 8190
 8191		if (!netif_carrier_ok(tp->dev))
 8192			val = 0;
 8193
 8194		tw32(HOSTCC_STAT_COAL_TICKS, val);
 8195	}
 8196
 8197	for (i = 0; i < tp->irq_cnt - 1; i++) {
 8198		u32 reg;
 8199
 8200		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
 8201		tw32(reg, ec->rx_coalesce_usecs);
 8202		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
 8203		tw32(reg, ec->rx_max_coalesced_frames);
 8204		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
 8205		tw32(reg, ec->rx_max_coalesced_frames_irq);
 8206
 8207		if (tg3_flag(tp, ENABLE_TSS)) {
 8208			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
 8209			tw32(reg, ec->tx_coalesce_usecs);
 8210			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
 8211			tw32(reg, ec->tx_max_coalesced_frames);
 8212			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
 8213			tw32(reg, ec->tx_max_coalesced_frames_irq);
 8214		}
 8215	}
 8216
 8217	for (; i < tp->irq_max - 1; i++) {
 8218		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
 8219		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
 8220		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
 8221
 8222		if (tg3_flag(tp, ENABLE_TSS)) {
 8223			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
 8224			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
 8225			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
 8226		}
 8227	}
 8228}
 8229
 8230/* tp->lock is held. */
 8231static void tg3_rings_reset(struct tg3 *tp)
 8232{
 8233	int i;
 8234	u32 stblk, txrcb, rxrcb, limit;
 8235	struct tg3_napi *tnapi = &tp->napi[0];
 8236
 8237	/* Disable all transmit rings but the first. */
 8238	if (!tg3_flag(tp, 5705_PLUS))
 8239		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
 8240	else if (tg3_flag(tp, 5717_PLUS))
 8241		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
 8242	else if (tg3_flag(tp, 57765_CLASS))
 8243		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
 8244	else
 8245		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 8246
 8247	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
 8248	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
 8249		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
 8250			      BDINFO_FLAGS_DISABLED);
 8251
 8252
 8253	/* Disable all receive return rings but the first. */
 8254	if (tg3_flag(tp, 5717_PLUS))
 8255		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
 8256	else if (!tg3_flag(tp, 5705_PLUS))
 8257		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
 8258	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 8259		 tg3_flag(tp, 57765_CLASS))
 8260		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
 8261	else
 8262		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
 8263
 8264	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
 8265	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
 8266		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
 8267			      BDINFO_FLAGS_DISABLED);
 8268
 8269	/* Disable interrupts */
 8270	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
 8271	tp->napi[0].chk_msi_cnt = 0;
 8272	tp->napi[0].last_rx_cons = 0;
 8273	tp->napi[0].last_tx_cons = 0;
 8274
 8275	/* Zero mailbox registers. */
 8276	if (tg3_flag(tp, SUPPORT_MSIX)) {
 8277		for (i = 1; i < tp->irq_max; i++) {
 8278			tp->napi[i].tx_prod = 0;
 8279			tp->napi[i].tx_cons = 0;
 8280			if (tg3_flag(tp, ENABLE_TSS))
 8281				tw32_mailbox(tp->napi[i].prodmbox, 0);
 8282			tw32_rx_mbox(tp->napi[i].consmbox, 0);
 8283			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
 8284			tp->napi[i].chk_msi_cnt = 0;
 8285			tp->napi[i].last_rx_cons = 0;
 8286			tp->napi[i].last_tx_cons = 0;
 8287		}
 8288		if (!tg3_flag(tp, ENABLE_TSS))
 8289			tw32_mailbox(tp->napi[0].prodmbox, 0);
 8290	} else {
 8291		tp->napi[0].tx_prod = 0;
 8292		tp->napi[0].tx_cons = 0;
 8293		tw32_mailbox(tp->napi[0].prodmbox, 0);
 8294		tw32_rx_mbox(tp->napi[0].consmbox, 0);
 8295	}
 8296
 8297	/* Make sure the NIC-based send BD rings are disabled. */
 8298	if (!tg3_flag(tp, 5705_PLUS)) {
 8299		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
 8300		for (i = 0; i < 16; i++)
 8301			tw32_tx_mbox(mbox + i * 8, 0);
 8302	}
 8303
 8304	txrcb = NIC_SRAM_SEND_RCB;
 8305	rxrcb = NIC_SRAM_RCV_RET_RCB;
 8306
 8307	/* Clear status block in ram. */
 8308	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 8309
 8310	/* Set status block DMA address */
 8311	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
 8312	     ((u64) tnapi->status_mapping >> 32));
 8313	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
 8314	     ((u64) tnapi->status_mapping & 0xffffffff));
 8315
 8316	if (tnapi->tx_ring) {
 8317		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
 8318			       (TG3_TX_RING_SIZE <<
 8319				BDINFO_FLAGS_MAXLEN_SHIFT),
 8320			       NIC_SRAM_TX_BUFFER_DESC);
 8321		txrcb += TG3_BDINFO_SIZE;
 8322	}
 8323
 8324	if (tnapi->rx_rcb) {
 8325		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
 8326			       (tp->rx_ret_ring_mask + 1) <<
 8327				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
 8328		rxrcb += TG3_BDINFO_SIZE;
 8329	}
 8330
 8331	stblk = HOSTCC_STATBLCK_RING1;
 8332
 8333	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
 8334		u64 mapping = (u64)tnapi->status_mapping;
 8335		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
 8336		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
 8337
 8338		/* Clear status block in ram. */
 8339		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
 8340
 8341		if (tnapi->tx_ring) {
 8342			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
 8343				       (TG3_TX_RING_SIZE <<
 8344					BDINFO_FLAGS_MAXLEN_SHIFT),
 8345				       NIC_SRAM_TX_BUFFER_DESC);
 8346			txrcb += TG3_BDINFO_SIZE;
 8347		}
 8348
 8349		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
 8350			       ((tp->rx_ret_ring_mask + 1) <<
 8351				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
 8352
 8353		stblk += 8;
 8354		rxrcb += TG3_BDINFO_SIZE;
 8355	}
 8356}
 8357
 8358static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
 8359{
 8360	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
 8361
 8362	if (!tg3_flag(tp, 5750_PLUS) ||
 8363	    tg3_flag(tp, 5780_CLASS) ||
 8364	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
 8365	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
 8366	    tg3_flag(tp, 57765_PLUS))
 8367		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
 8368	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
 8369		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
 8370		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
 8371	else
 8372		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
 8373
 8374	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
 8375	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
 8376
 8377	val = min(nic_rep_thresh, host_rep_thresh);
 8378	tw32(RCVBDI_STD_THRESH, val);
 8379
 8380	if (tg3_flag(tp, 57765_PLUS))
 8381		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
 8382
 8383	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
 8384		return;
 8385
 8386	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
 8387
 8388	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
 8389
 8390	val = min(bdcache_maxcnt / 2, host_rep_thresh);
 8391	tw32(RCVBDI_JUMBO_THRESH, val);
 8392
 8393	if (tg3_flag(tp, 57765_PLUS))
 8394		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
 8395}
 8396
 8397static inline u32 calc_crc(unsigned char *buf, int len)
 8398{
 8399	u32 reg;
 8400	u32 tmp;
 8401	int j, k;
 8402
 8403	reg = 0xffffffff;
 8404
 8405	for (j = 0; j < len; j++) {
 8406		reg ^= buf[j];
 8407
 8408		for (k = 0; k < 8; k++) {
 8409			tmp = reg & 0x01;
 8410
 8411			reg >>= 1;
 8412
 8413			if (tmp)
 8414				reg ^= 0xedb88320;
 8415		}
 8416	}
 8417
 8418	return ~reg;
 8419}
 8420
 8421static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
 8422{
 8423	/* accept or reject all multicast frames */
 8424	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
 8425	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
 8426	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
 8427	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
 8428}
 8429
 8430static void __tg3_set_rx_mode(struct net_device *dev)
 8431{
 8432	struct tg3 *tp = netdev_priv(dev);
 8433	u32 rx_mode;
 8434
 8435	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
 8436				  RX_MODE_KEEP_VLAN_TAG);
 8437
 8438#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
 8439	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
 8440	 * flag clear.
 8441	 */
 8442	if (!tg3_flag(tp, ENABLE_ASF))
 8443		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
 8444#endif
 8445
 8446	if (dev->flags & IFF_PROMISC) {
 8447		/* Promiscuous mode. */
 8448		rx_mode |= RX_MODE_PROMISC;
 8449	} else if (dev->flags & IFF_ALLMULTI) {
 8450		/* Accept all multicast. */
 8451		tg3_set_multi(tp, 1);
 8452	} else if (netdev_mc_empty(dev)) {
 8453		/* Reject all multicast. */
 8454		tg3_set_multi(tp, 0);
 8455	} else {
 8456		/* Accept one or more multicast(s). */
 8457		struct netdev_hw_addr *ha;
 8458		u32 mc_filter[4] = { 0, };
 8459		u32 regidx;
 8460		u32 bit;
 8461		u32 crc;
 8462
 8463		netdev_for_each_mc_addr(ha, dev) {
 8464			crc = calc_crc(ha->addr, ETH_ALEN);
 8465			bit = ~crc & 0x7f;
 8466			regidx = (bit & 0x60) >> 5;
 8467			bit &= 0x1f;
 8468			mc_filter[regidx] |= (1 << bit);
 8469		}
 8470
 8471		tw32(MAC_HASH_REG_0, mc_filter[0]);
 8472		tw32(MAC_HASH_REG_1, mc_filter[1]);
 8473		tw32(MAC_HASH_REG_2, mc_filter[2]);
 8474		tw32(MAC_HASH_REG_3, mc_filter[3]);
 8475	}
 8476
 8477	if (rx_mode != tp->rx_mode) {
 8478		tp->rx_mode = rx_mode;
 8479		tw32_f(MAC_RX_MODE, rx_mode);
 8480		udelay(10);
 8481	}
 8482}
 8483
 8484static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
 8485{
 8486	int i;
 8487
 8488	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
 8489		tp->rss_ind_tbl[i] =
 8490			ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
 8491}
 8492
 8493static void tg3_rss_check_indir_tbl(struct tg3 *tp)
 8494{
 8495	int i;
 8496
 8497	if (!tg3_flag(tp, SUPPORT_MSIX))
 8498		return;
 8499
 8500	if (tp->irq_cnt <= 2) {
 8501		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
 8502		return;
 8503	}
 8504
 8505	/* Validate table against current IRQ count */
 8506	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
 8507		if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
 8508			break;
 8509	}
 8510
 8511	if (i != TG3_RSS_INDIR_TBL_SIZE)
 8512		tg3_rss_init_dflt_indir_tbl(tp);
 8513}
 8514
 8515static void tg3_rss_write_indir_tbl(struct tg3 *tp)
 8516{
 8517	int i = 0;
 8518	u32 reg = MAC_RSS_INDIR_TBL_0;
 8519
 8520	while (i < TG3_RSS_INDIR_TBL_SIZE) {
 8521		u32 val = tp->rss_ind_tbl[i];
 8522		i++;
 8523		for (; i % 8; i++) {
 8524			val <<= 4;
 8525			val |= tp->rss_ind_tbl[i];
 8526		}
 8527		tw32(reg, val);
 8528		reg += 4;
 8529	}
 8530}
 8531
 8532/* tp->lock is held. */
 8533static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 8534{
 8535	u32 val, rdmac_mode;
 8536	int i, err, limit;
 8537	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
 8538
 8539	tg3_disable_ints(tp);
 8540
 8541	tg3_stop_fw(tp);
 8542
 8543	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
 8544
 8545	if (tg3_flag(tp, INIT_COMPLETE))
 8546		tg3_abort_hw(tp, 1);
 8547
 8548	/* Enable MAC control of LPI */
 8549	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
 8550		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
 8551		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
 8552		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
 8553
 8554		tw32_f(TG3_CPMU_EEE_CTRL,
 8555		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
 8556
 8557		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
 8558		      TG3_CPMU_EEEMD_LPI_IN_TX |
 8559		      TG3_CPMU_EEEMD_LPI_IN_RX |
 8560		      TG3_CPMU_EEEMD_EEE_ENABLE;
 8561
 8562		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
 8563			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
 8564
 8565		if (tg3_flag(tp, ENABLE_APE))
 8566			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
 8567
 8568		tw32_f(TG3_CPMU_EEE_MODE, val);
 8569
 8570		tw32_f(TG3_CPMU_EEE_DBTMR1,
 8571		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
 8572		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
 8573
 8574		tw32_f(TG3_CPMU_EEE_DBTMR2,
 8575		       TG3_CPMU_DBTMR2_APE_TX_2047US |
 8576		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
 8577	}
 8578
 8579	if (reset_phy)
 8580		tg3_phy_reset(tp);
 8581
 8582	err = tg3_chip_reset(tp);
 8583	if (err)
 8584		return err;
 8585
 8586	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
 8587
 8588	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
 8589		val = tr32(TG3_CPMU_CTRL);
 8590		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
 8591		tw32(TG3_CPMU_CTRL, val);
 8592
 8593		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
 8594		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
 8595		val |= CPMU_LSPD_10MB_MACCLK_6_25;
 8596		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
 8597
 8598		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
 8599		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
 8600		val |= CPMU_LNK_AWARE_MACCLK_6_25;
 8601		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
 8602
 8603		val = tr32(TG3_CPMU_HST_ACC);
 8604		val &= ~CPMU_HST_ACC_MACCLK_MASK;
 8605		val |= CPMU_HST_ACC_MACCLK_6_25;
 8606		tw32(TG3_CPMU_HST_ACC, val);
 8607	}
 8608
 8609	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
 8610		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
 8611		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
 8612		       PCIE_PWR_MGMT_L1_THRESH_4MS;
 8613		tw32(PCIE_PWR_MGMT_THRESH, val);
 8614
 8615		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
 8616		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
 8617
 8618		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
 8619
 8620		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
 8621		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
 8622	}
 8623
 8624	if (tg3_flag(tp, L1PLLPD_EN)) {
 8625		u32 grc_mode = tr32(GRC_MODE);
 8626
 8627		/* Access the lower 1K of PL PCIE block registers. */
 8628		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
 8629		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
 8630
 8631		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
 8632		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
 8633		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
 8634
 8635		tw32(GRC_MODE, grc_mode);
 8636	}
 8637
 8638	if (tg3_flag(tp, 57765_CLASS)) {
 8639		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
 8640			u32 grc_mode = tr32(GRC_MODE);
 8641
 8642			/* Access the lower 1K of PL PCIE block registers. */
 8643			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
 8644			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
 8645
 8646			val = tr32(TG3_PCIE_TLDLPL_PORT +
 8647				   TG3_PCIE_PL_LO_PHYCTL5);
 8648			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
 8649			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
 8650
 8651			tw32(GRC_MODE, grc_mode);
 8652		}
 8653
 8654		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
 8655			u32 grc_mode = tr32(GRC_MODE);
 8656
 8657			/* Access the lower 1K of DL PCIE block registers. */
 8658			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
 8659			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
 8660
 8661			val = tr32(TG3_PCIE_TLDLPL_PORT +
 8662				   TG3_PCIE_DL_LO_FTSMAX);
 8663			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
 8664			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
 8665			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
 8666
 8667			tw32(GRC_MODE, grc_mode);
 8668		}
 8669
 8670		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
 8671		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
 8672		val |= CPMU_LSPD_10MB_MACCLK_6_25;
 8673		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
 8674	}
 8675
 8676	/* This works around an issue with Athlon chipsets on
 8677	 * B3 tigon3 silicon.  This bit has no effect on any
 8678	 * other revision.  But do not set this on PCI Express
 8679	 * chips and don't even touch the clocks if the CPMU is present.
 8680	 */
 8681	if (!tg3_flag(tp, CPMU_PRESENT)) {
 8682		if (!tg3_flag(tp, PCI_EXPRESS))
 8683			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
 8684		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 8685	}
 8686
 8687	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
 8688	    tg3_flag(tp, PCIX_MODE)) {
 8689		val = tr32(TG3PCI_PCISTATE);
 8690		val |= PCISTATE_RETRY_SAME_DMA;
 8691		tw32(TG3PCI_PCISTATE, val);
 8692	}
 8693
 8694	if (tg3_flag(tp, ENABLE_APE)) {
 8695		/* Allow reads and writes to the
 8696		 * APE register and memory space.
 8697		 */
 8698		val = tr32(TG3PCI_PCISTATE);
 8699		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
 8700		       PCISTATE_ALLOW_APE_SHMEM_WR |
 8701		       PCISTATE_ALLOW_APE_PSPACE_WR;
 8702		tw32(TG3PCI_PCISTATE, val);
 8703	}
 8704
 8705	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
 8706		/* Enable some hw fixes.  */
 8707		val = tr32(TG3PCI_MSI_DATA);
 8708		val |= (1 << 26) | (1 << 28) | (1 << 29);
 8709		tw32(TG3PCI_MSI_DATA, val);
 8710	}
 8711
 8712	/* Descriptor ring init may make accesses to the
 8713	 * NIC SRAM area to setup the TX descriptors, so we
 8714	 * can only do this after the hardware has been
 8715	 * successfully reset.
 8716	 */
 8717	err = tg3_init_rings(tp);
 8718	if (err)
 8719		return err;
 8720
 8721	if (tg3_flag(tp, 57765_PLUS)) {
 8722		val = tr32(TG3PCI_DMA_RW_CTRL) &
 8723		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
 8724		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
 8725			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
 8726		if (!tg3_flag(tp, 57765_CLASS) &&
 8727		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
 8728			val |= DMA_RWCTRL_TAGGED_STAT_WA;
 8729		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
 8730	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
 8731		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
 8732		/* This value is determined during the probe time DMA
 8733		 * engine test, tg3_test_dma.
 8734		 */
 8735		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 8736	}
 8737
 8738	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
 8739			  GRC_MODE_4X_NIC_SEND_RINGS |
 8740			  GRC_MODE_NO_TX_PHDR_CSUM |
 8741			  GRC_MODE_NO_RX_PHDR_CSUM);
 8742	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
 8743
 8744	/* Pseudo-header checksum is done by hardware logic and not
 8745	 * the offload processers, so make the chip do the pseudo-
 8746	 * header checksums on receive.  For transmit it is more
 8747	 * convenient to do the pseudo-header checksum in software
 8748	 * as Linux does that on transmit for us in all cases.
 8749	 */
 8750	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
 8751
 8752	tw32(GRC_MODE,
 8753	     tp->grc_mode |
 8754	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
 8755
 8756	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
 8757	val = tr32(GRC_MISC_CFG);
 8758	val &= ~0xff;
 8759	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
 8760	tw32(GRC_MISC_CFG, val);
 8761
 8762	/* Initialize MBUF/DESC pool. */
 8763	if (tg3_flag(tp, 5750_PLUS)) {
 8764		/* Do nothing.  */
 8765	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
 8766		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
 8767		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
 8768			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
 8769		else
 8770			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
 8771		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
 8772		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
 8773	} else if (tg3_flag(tp, TSO_CAPABLE)) {
 8774		int fw_len;
 8775
 8776		fw_len = tp->fw_len;
 8777		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
 8778		tw32(BUFMGR_MB_POOL_ADDR,
 8779		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
 8780		tw32(BUFMGR_MB_POOL_SIZE,
 8781		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
 8782	}
 8783
 8784	if (tp->dev->mtu <= ETH_DATA_LEN) {
 8785		tw32(BUFMGR_MB_RDMA_LOW_WATER,
 8786		     tp->bufmgr_config.mbuf_read_dma_low_water);
 8787		tw32(BUFMGR_MB_MACRX_LOW_WATER,
 8788		     tp->bufmgr_config.mbuf_mac_rx_low_water);
 8789		tw32(BUFMGR_MB_HIGH_WATER,
 8790		     tp->bufmgr_config.mbuf_high_water);
 8791	} else {
 8792		tw32(BUFMGR_MB_RDMA_LOW_WATER,
 8793		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
 8794		tw32(BUFMGR_MB_MACRX_LOW_WATER,
 8795		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
 8796		tw32(BUFMGR_MB_HIGH_WATER,
 8797		     tp->bufmgr_config.mbuf_high_water_jumbo);
 8798	}
 8799	tw32(BUFMGR_DMA_LOW_WATER,
 8800	     tp->bufmgr_config.dma_low_water);
 8801	tw32(BUFMGR_DMA_HIGH_WATER,
 8802	     tp->bufmgr_config.dma_high_water);
 8803
 8804	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
 8805	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
 8806		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
 8807	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 8808	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 8809	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
 8810		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
 8811	tw32(BUFMGR_MODE, val);
 8812	for (i = 0; i < 2000; i++) {
 8813		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
 8814			break;
 8815		udelay(10);
 8816	}
 8817	if (i >= 2000) {
 8818		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
 8819		return -ENODEV;
 8820	}
 8821
 8822	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
 8823		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
 8824
 8825	tg3_setup_rxbd_thresholds(tp);
 8826
 8827	/* Initialize TG3_BDINFO's at:
 8828	 *  RCVDBDI_STD_BD:	standard eth size rx ring
 8829	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
 8830	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
 8831	 *
 8832	 * like so:
 8833	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
 8834	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
 8835	 *                              ring attribute flags
 8836	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
 8837	 *
 8838	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
 8839	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
 8840	 *
 8841	 * The size of each ring is fixed in the firmware, but the location is
 8842	 * configurable.
 8843	 */
 8844	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
 8845	     ((u64) tpr->rx_std_mapping >> 32));
 8846	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 8847	     ((u64) tpr->rx_std_mapping & 0xffffffff));
 8848	if (!tg3_flag(tp, 5717_PLUS))
 8849		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
 8850		     NIC_SRAM_RX_BUFFER_DESC);
 8851
 8852	/* Disable the mini ring */
 8853	if (!tg3_flag(tp, 5705_PLUS))
 8854		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
 8855		     BDINFO_FLAGS_DISABLED);
 8856
 8857	/* Program the jumbo buffer descriptor ring control
 8858	 * blocks on those devices that have them.
 8859	 */
 8860	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
 8861	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
 8862
 8863		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
 8864			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
 8865			     ((u64) tpr->rx_jmb_mapping >> 32));
 8866			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
 8867			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
 8868			val = TG3_RX_JMB_RING_SIZE(tp) <<
 8869			      BDINFO_FLAGS_MAXLEN_SHIFT;
 8870			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 8871			     val | BDINFO_FLAGS_USE_EXT_RECV);
 8872			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
 8873			    tg3_flag(tp, 57765_CLASS))
 8874				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
 8875				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
 8876		} else {
 8877			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
 8878			     BDINFO_FLAGS_DISABLED);
 8879		}
 8880
 8881		if (tg3_flag(tp, 57765_PLUS)) {
 8882			val = TG3_RX_STD_RING_SIZE(tp);
 8883			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
 8884			val |= (TG3_RX_STD_DMA_SZ << 2);
 8885		} else
 8886			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
 8887	} else
 8888		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
 8889
 8890	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
 8891
 8892	tpr->rx_std_prod_idx = tp->rx_pending;
 8893	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
 8894
 8895	tpr->rx_jmb_prod_idx =
 8896		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
 8897	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 8898
 8899	tg3_rings_reset(tp);
 8900
 8901	/* Initialize MAC address and backoff seed. */
 8902	__tg3_set_mac_addr(tp, 0);
 8903
 8904	/* MTU + ethernet header + FCS + optional VLAN tag */
 8905	tw32(MAC_RX_MTU_SIZE,
 8906	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
 8907
 8908	/* The slot time is changed by tg3_setup_phy if we
 8909	 * run at gigabit with half duplex.
 8910	 */
 8911	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
 8912	      (6 << TX_LENGTHS_IPG_SHIFT) |
 8913	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
 8914
 8915	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 8916		val |= tr32(MAC_TX_LENGTHS) &
 8917		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
 8918			TX_LENGTHS_CNT_DWN_VAL_MSK);
 8919
 8920	tw32(MAC_TX_LENGTHS, val);
 8921
 8922	/* Receive rules. */
 8923	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
 8924	tw32(RCVLPC_CONFIG, 0x0181);
 8925
 8926	/* Calculate RDMAC_MODE setting early, we need it to determine
 8927	 * the RCVLPC_STATE_ENABLE mask.
 8928	 */
 8929	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
 8930		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
 8931		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
 8932		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
 8933		      RDMAC_MODE_LNGREAD_ENAB);
 8934
 8935	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
 8936		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 8937
 8938	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 8939	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 8940	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 8941		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
 8942			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
 8943			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
 8944
 8945	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 8946	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 8947		if (tg3_flag(tp, TSO_CAPABLE) &&
 8948		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
 8949			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
 8950		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 8951			   !tg3_flag(tp, IS_5788)) {
 8952			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 8953		}
 8954	}
 8955
 8956	if (tg3_flag(tp, PCI_EXPRESS))
 8957		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
 8958
 8959	if (tg3_flag(tp, HW_TSO_1) ||
 8960	    tg3_flag(tp, HW_TSO_2) ||
 8961	    tg3_flag(tp, HW_TSO_3))
 8962		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
 8963
 8964	if (tg3_flag(tp, 57765_PLUS) ||
 8965	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 8966	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
 8967		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
 8968
 8969	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
 8970		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
 8971
 8972	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
 8973	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
 8974	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
 8975	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 8976	    tg3_flag(tp, 57765_PLUS)) {
 8977		val = tr32(TG3_RDMA_RSRVCTRL_REG);
 8978		if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
 8979			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
 8980				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
 8981				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
 8982			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
 8983			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
 8984			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
 8985		}
 8986		tw32(TG3_RDMA_RSRVCTRL_REG,
 8987		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
 8988	}
 8989
 8990	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 8991	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 8992		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
 8993		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
 8994		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
 8995		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
 8996	}
 8997
 8998	/* Receive/send statistics. */
 8999	if (tg3_flag(tp, 5750_PLUS)) {
 9000		val = tr32(RCVLPC_STATS_ENABLE);
 9001		val &= ~RCVLPC_STATSENAB_DACK_FIX;
 9002		tw32(RCVLPC_STATS_ENABLE, val);
 9003	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
 9004		   tg3_flag(tp, TSO_CAPABLE)) {
 9005		val = tr32(RCVLPC_STATS_ENABLE);
 9006		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
 9007		tw32(RCVLPC_STATS_ENABLE, val);
 9008	} else {
 9009		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
 9010	}
 9011	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
 9012	tw32(SNDDATAI_STATSENAB, 0xffffff);
 9013	tw32(SNDDATAI_STATSCTRL,
 9014	     (SNDDATAI_SCTRL_ENABLE |
 9015	      SNDDATAI_SCTRL_FASTUPD));
 9016
 9017	/* Setup host coalescing engine. */
 9018	tw32(HOSTCC_MODE, 0);
 9019	for (i = 0; i < 2000; i++) {
 9020		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
 9021			break;
 9022		udelay(10);
 9023	}
 9024
 9025	__tg3_set_coalesce(tp, &tp->coal);
 9026
 9027	if (!tg3_flag(tp, 5705_PLUS)) {
 9028		/* Status/statistics block address.  See tg3_timer,
 9029		 * the tg3_periodic_fetch_stats call there, and
 9030		 * tg3_get_stats to see how this works for 5705/5750 chips.
 9031		 */
 9032		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
 9033		     ((u64) tp->stats_mapping >> 32));
 9034		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
 9035		     ((u64) tp->stats_mapping & 0xffffffff));
 9036		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
 9037
 9038		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
 9039
 9040		/* Clear statistics and status block memory areas */
 9041		for (i = NIC_SRAM_STATS_BLK;
 9042		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
 9043		     i += sizeof(u32)) {
 9044			tg3_write_mem(tp, i, 0);
 9045			udelay(40);
 9046		}
 9047	}
 9048
 9049	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
 9050
 9051	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
 9052	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
 9053	if (!tg3_flag(tp, 5705_PLUS))
 9054		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
 9055
 9056	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
 9057		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
 9058		/* reset to prevent losing 1st rx packet intermittently */
 9059		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 9060		udelay(10);
 9061	}
 9062
 9063	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
 9064			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
 9065			MAC_MODE_FHDE_ENABLE;
 9066	if (tg3_flag(tp, ENABLE_APE))
 9067		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
 9068	if (!tg3_flag(tp, 5705_PLUS) &&
 9069	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 9070	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
 9071		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
 9072	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
 9073	udelay(40);
 9074
 9075	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
 9076	 * If TG3_FLAG_IS_NIC is zero, we should read the
 9077	 * register to preserve the GPIO settings for LOMs. The GPIOs,
 9078	 * whether used as inputs or outputs, are set by boot code after
 9079	 * reset.
 9080	 */
 9081	if (!tg3_flag(tp, IS_NIC)) {
 9082		u32 gpio_mask;
 9083
 9084		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
 9085			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
 9086			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
 9087
 9088		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
 9089			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
 9090				     GRC_LCLCTRL_GPIO_OUTPUT3;
 9091
 9092		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
 9093			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
 9094
 9095		tp->grc_local_ctrl &= ~gpio_mask;
 9096		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
 9097
 9098		/* GPIO1 must be driven high for eeprom write protect */
 9099		if (tg3_flag(tp, EEPROM_WRITE_PROT))
 9100			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
 9101					       GRC_LCLCTRL_GPIO_OUTPUT1);
 9102	}
 9103	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 9104	udelay(100);
 9105
 9106	if (tg3_flag(tp, USING_MSIX)) {
 9107		val = tr32(MSGINT_MODE);
 9108		val |= MSGINT_MODE_ENABLE;
 9109		if (tp->irq_cnt > 1)
 9110			val |= MSGINT_MODE_MULTIVEC_EN;
 9111		if (!tg3_flag(tp, 1SHOT_MSI))
 9112			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
 9113		tw32(MSGINT_MODE, val);
 9114	}
 9115
 9116	if (!tg3_flag(tp, 5705_PLUS)) {
 9117		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
 9118		udelay(40);
 9119	}
 9120
 9121	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
 9122	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
 9123	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
 9124	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
 9125	       WDMAC_MODE_LNGREAD_ENAB);
 9126
 9127	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
 9128	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
 9129		if (tg3_flag(tp, TSO_CAPABLE) &&
 9130		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
 9131		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
 9132			/* nothing */
 9133		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
 9134			   !tg3_flag(tp, IS_5788)) {
 9135			val |= WDMAC_MODE_RX_ACCEL;
 9136		}
 9137	}
 9138
 9139	/* Enable host coalescing bug fix */
 9140	if (tg3_flag(tp, 5755_PLUS))
 9141		val |= WDMAC_MODE_STATUS_TAG_FIX;
 9142
 9143	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
 9144		val |= WDMAC_MODE_BURST_ALL_DATA;
 9145
 9146	tw32_f(WDMAC_MODE, val);
 9147	udelay(40);
 9148
 9149	if (tg3_flag(tp, PCIX_MODE)) {
 9150		u16 pcix_cmd;
 9151
 9152		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 9153				     &pcix_cmd);
 9154		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
 9155			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
 9156			pcix_cmd |= PCI_X_CMD_READ_2K;
 9157		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
 9158			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
 9159			pcix_cmd |= PCI_X_CMD_READ_2K;
 9160		}
 9161		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
 9162				      pcix_cmd);
 9163	}
 9164
 9165	tw32_f(RDMAC_MODE, rdmac_mode);
 9166	udelay(40);
 9167
 9168	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
 9169	if (!tg3_flag(tp, 5705_PLUS))
 9170		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
 9171
 9172	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
 9173		tw32(SNDDATAC_MODE,
 9174		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
 9175	else
 9176		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
 9177
 9178	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
 9179	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
 9180	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
 9181	if (tg3_flag(tp, LRG_PROD_RING_CAP))
 9182		val |= RCVDBDI_MODE_LRG_RING_SZ;
 9183	tw32(RCVDBDI_MODE, val);
 9184	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
 9185	if (tg3_flag(tp, HW_TSO_1) ||
 9186	    tg3_flag(tp, HW_TSO_2) ||
 9187	    tg3_flag(tp, HW_TSO_3))
 9188		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
 9189	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
 9190	if (tg3_flag(tp, ENABLE_TSS))
 9191		val |= SNDBDI_MODE_MULTI_TXQ_EN;
 9192	tw32(SNDBDI_MODE, val);
 9193	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
 9194
 9195	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
 9196		err = tg3_load_5701_a0_firmware_fix(tp);
 9197		if (err)
 9198			return err;
 9199	}
 9200
 9201	if (tg3_flag(tp, TSO_CAPABLE)) {
 9202		err = tg3_load_tso_firmware(tp);
 9203		if (err)
 9204			return err;
 9205	}
 9206
 9207	tp->tx_mode = TX_MODE_ENABLE;
 9208
 9209	if (tg3_flag(tp, 5755_PLUS) ||
 9210	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
 9211		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
 9212
 9213	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 9214		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
 9215		tp->tx_mode &= ~val;
 9216		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
 9217	}
 9218
 9219	tw32_f(MAC_TX_MODE, tp->tx_mode);
 9220	udelay(100);
 9221
 9222	if (tg3_flag(tp, ENABLE_RSS)) {
 9223		tg3_rss_write_indir_tbl(tp);
 9224
 9225		/* Setup the "secret" hash key. */
 9226		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
 9227		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
 9228		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
 9229		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
 9230		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
 9231		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
 9232		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
 9233		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
 9234		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
 9235		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
 9236	}
 9237
 9238	tp->rx_mode = RX_MODE_ENABLE;
 9239	if (tg3_flag(tp, 5755_PLUS))
 9240		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
 9241
 9242	if (tg3_flag(tp, ENABLE_RSS))
 9243		tp->rx_mode |= RX_MODE_RSS_ENABLE |
 9244			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
 9245			       RX_MODE_RSS_IPV6_HASH_EN |
 9246			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
 9247			       RX_MODE_RSS_IPV4_HASH_EN |
 9248			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
 9249
 9250	tw32_f(MAC_RX_MODE, tp->rx_mode);
 9251	udelay(10);
 9252
 9253	tw32(MAC_LED_CTRL, tp->led_ctrl);
 9254
 9255	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
 9256	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 9257		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
 9258		udelay(10);
 9259	}
 9260	tw32_f(MAC_RX_MODE, tp->rx_mode);
 9261	udelay(10);
 9262
 9263	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
 9264		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
 9265			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
 9266			/* Set drive transmission level to 1.2V  */
 9267			/* only if the signal pre-emphasis bit is not set  */
 9268			val = tr32(MAC_SERDES_CFG);
 9269			val &= 0xfffff000;
 9270			val |= 0x880;
 9271			tw32(MAC_SERDES_CFG, val);
 9272		}
 9273		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
 9274			tw32(MAC_SERDES_CFG, 0x616000);
 9275	}
 9276
 9277	/* Prevent chip from dropping frames when flow control
 9278	 * is enabled.
 9279	 */
 9280	if (tg3_flag(tp, 57765_CLASS))
 9281		val = 1;
 9282	else
 9283		val = 2;
 9284	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
 9285
 9286	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
 9287	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
 9288		/* Use hardware link auto-negotiation */
 9289		tg3_flag_set(tp, HW_AUTONEG);
 9290	}
 9291
 9292	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 9293	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
 9294		u32 tmp;
 9295
 9296		tmp = tr32(SERDES_RX_CTRL);
 9297		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
 9298		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
 9299		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
 9300		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 9301	}
 9302
 9303	if (!tg3_flag(tp, USE_PHYLIB)) {
 9304		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
 9305			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
 9306
 9307		err = tg3_setup_phy(tp, 0);
 9308		if (err)
 9309			return err;
 9310
 9311		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
 9312		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
 9313			u32 tmp;
 9314
 9315			/* Clear CRC stats. */
 9316			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
 9317				tg3_writephy(tp, MII_TG3_TEST1,
 9318					     tmp | MII_TG3_TEST1_CRC_EN);
 9319				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
 9320			}
 9321		}
 9322	}
 9323
 9324	__tg3_set_rx_mode(tp->dev);
 9325
 9326	/* Initialize receive rules. */
 9327	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
 9328	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
 9329	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
 9330	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
 9331
 9332	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
 9333		limit = 8;
 9334	else
 9335		limit = 16;
 9336	if (tg3_flag(tp, ENABLE_ASF))
 9337		limit -= 4;
 9338	switch (limit) {
 9339	case 16:
 9340		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
 9341	case 15:
 9342		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
 9343	case 14:
 9344		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
 9345	case 13:
 9346		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
 9347	case 12:
 9348		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
 9349	case 11:
 9350		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
 9351	case 10:
 9352		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
 9353	case 9:
 9354		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
 9355	case 8:
 9356		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
 9357	case 7:
 9358		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
 9359	case 6:
 9360		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
 9361	case 5:
 9362		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
 9363	case 4:
 9364		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
 9365	case 3:
 9366		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
 9367	case 2:
 9368	case 1:
 9369
 9370	default:
 9371		break;
 9372	}
 9373
 9374	if (tg3_flag(tp, ENABLE_APE))
 9375		/* Write our heartbeat update interval to APE. */
 9376		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
 9377				APE_HOST_HEARTBEAT_INT_DISABLE);
 9378
 9379	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
 9380
 9381	return 0;
 9382}
 9383
 9384/* Called at device open time to get the chip ready for
 9385 * packet processing.  Invoked with tp->lock held.
 9386 */
 9387static int tg3_init_hw(struct tg3 *tp, int reset_phy)
 9388{
 9389	tg3_switch_clocks(tp);
 9390
 9391	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 9392
 9393	return tg3_reset_hw(tp, reset_phy);
 9394}
 9395
 9396#define TG3_STAT_ADD32(PSTAT, REG) \
 9397do {	u32 __val = tr32(REG); \
 9398	(PSTAT)->low += __val; \
 9399	if ((PSTAT)->low < __val) \
 9400		(PSTAT)->high += 1; \
 9401} while (0)
 9402
 9403static void tg3_periodic_fetch_stats(struct tg3 *tp)
 9404{
 9405	struct tg3_hw_stats *sp = tp->hw_stats;
 9406
 9407	if (!netif_carrier_ok(tp->dev))
 9408		return;
 9409
 9410	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
 9411	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
 9412	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
 9413	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
 9414	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
 9415	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
 9416	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
 9417	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
 9418	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
 9419	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
 9420	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
 9421	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
 9422	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
 9423
 9424	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
 9425	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
 9426	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
 9427	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
 9428	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
 9429	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
 9430	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
 9431	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
 9432	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
 9433	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
 9434	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
 9435	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
 9436	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
 9437	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
 9438
 9439	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
 9440	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
 9441	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
 9442	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
 9443		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
 9444	} else {
 9445		u32 val = tr32(HOSTCC_FLOW_ATTN);
 9446		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
 9447		if (val) {
 9448			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
 9449			sp->rx_discards.low += val;
 9450			if (sp->rx_discards.low < val)
 9451				sp->rx_discards.high += 1;
 9452		}
 9453		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
 9454	}
 9455	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
 9456}
 9457
 9458static void tg3_chk_missed_msi(struct tg3 *tp)
 9459{
 9460	u32 i;
 9461
 9462	for (i = 0; i < tp->irq_cnt; i++) {
 9463		struct tg3_napi *tnapi = &tp->napi[i];
 9464
 9465		if (tg3_has_work(tnapi)) {
 9466			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
 9467			    tnapi->last_tx_cons == tnapi->tx_cons) {
 9468				if (tnapi->chk_msi_cnt < 1) {
 9469					tnapi->chk_msi_cnt++;
 9470					return;
 9471				}
 9472				tg3_msi(0, tnapi);
 9473			}
 9474		}
 9475		tnapi->chk_msi_cnt = 0;
 9476		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
 9477		tnapi->last_tx_cons = tnapi->tx_cons;
 9478	}
 9479}
 9480
 9481static void tg3_timer(unsigned long __opaque)
 9482{
 9483	struct tg3 *tp = (struct tg3 *) __opaque;
 9484
 9485	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
 9486		goto restart_timer;
 9487
 9488	spin_lock(&tp->lock);
 9489
 9490	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
 9491	    tg3_flag(tp, 57765_CLASS))
 9492		tg3_chk_missed_msi(tp);
 9493
 9494	if (!tg3_flag(tp, TAGGED_STATUS)) {
 9495		/* All of this garbage is because when using non-tagged
 9496		 * IRQ status the mailbox/status_block protocol the chip
 9497		 * uses with the cpu is race prone.
 9498		 */
 9499		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
 9500			tw32(GRC_LOCAL_CTRL,
 9501			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
 9502		} else {
 9503			tw32(HOSTCC_MODE, tp->coalesce_mode |
 9504			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
 9505		}
 9506
 9507		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
 9508			spin_unlock(&tp->lock);
 9509			tg3_reset_task_schedule(tp);
 9510			goto restart_timer;
 9511		}
 9512	}
 9513
 9514	/* This part only runs once per second. */
 9515	if (!--tp->timer_counter) {
 9516		if (tg3_flag(tp, 5705_PLUS))
 9517			tg3_periodic_fetch_stats(tp);
 9518
 9519		if (tp->setlpicnt && !--tp->setlpicnt)
 9520			tg3_phy_eee_enable(tp);
 9521
 9522		if (tg3_flag(tp, USE_LINKCHG_REG)) {
 9523			u32 mac_stat;
 9524			int phy_event;
 9525
 9526			mac_stat = tr32(MAC_STATUS);
 9527
 9528			phy_event = 0;
 9529			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
 9530				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
 9531					phy_event = 1;
 9532			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
 9533				phy_event = 1;
 9534
 9535			if (phy_event)
 9536				tg3_setup_phy(tp, 0);
 9537		} else if (tg3_flag(tp, POLL_SERDES)) {
 9538			u32 mac_stat = tr32(MAC_STATUS);
 9539			int need_setup = 0;
 9540
 9541			if (netif_carrier_ok(tp->dev) &&
 9542			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
 9543				need_setup = 1;
 9544			}
 9545			if (!netif_carrier_ok(tp->dev) &&
 9546			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
 9547					 MAC_STATUS_SIGNAL_DET))) {
 9548				need_setup = 1;
 9549			}
 9550			if (need_setup) {
 9551				if (!tp->serdes_counter) {
 9552					tw32_f(MAC_MODE,
 9553					     (tp->mac_mode &
 9554					      ~MAC_MODE_PORT_MODE_MASK));
 9555					udelay(40);
 9556					tw32_f(MAC_MODE, tp->mac_mode);
 9557					udelay(40);
 9558				}
 9559				tg3_setup_phy(tp, 0);
 9560			}
 9561		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
 9562			   tg3_flag(tp, 5780_CLASS)) {
 9563			tg3_serdes_parallel_detect(tp);
 9564		}
 9565
 9566		tp->timer_counter = tp->timer_multiplier;
 9567	}
 9568
 9569	/* Heartbeat is only sent once every 2 seconds.
 9570	 *
 9571	 * The heartbeat is to tell the ASF firmware that the host
 9572	 * driver is still alive.  In the event that the OS crashes,
 9573	 * ASF needs to reset the hardware to free up the FIFO space
 9574	 * that may be filled with rx packets destined for the host.
 9575	 * If the FIFO is full, ASF will no longer function properly.
 9576	 *
 9577	 * Unintended resets have been reported on real time kernels
 9578	 * where the timer doesn't run on time.  Netpoll will also have
 9579	 * same problem.
 9580	 *
 9581	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
 9582	 * to check the ring condition when the heartbeat is expiring
 9583	 * before doing the reset.  This will prevent most unintended
 9584	 * resets.
 9585	 */
 9586	if (!--tp->asf_counter) {
 9587		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
 9588			tg3_wait_for_event_ack(tp);
 9589
 9590			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
 9591				      FWCMD_NICDRV_ALIVE3);
 9592			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
 9593			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
 9594				      TG3_FW_UPDATE_TIMEOUT_SEC);
 9595
 9596			tg3_generate_fw_event(tp);
 9597		}
 9598		tp->asf_counter = tp->asf_multiplier;
 9599	}
 9600
 9601	spin_unlock(&tp->lock);
 9602
 9603restart_timer:
 9604	tp->timer.expires = jiffies + tp->timer_offset;
 9605	add_timer(&tp->timer);
 9606}
 9607
 9608static void __devinit tg3_timer_init(struct tg3 *tp)
 9609{
 9610	if (tg3_flag(tp, TAGGED_STATUS) &&
 9611	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
 9612	    !tg3_flag(tp, 57765_CLASS))
 9613		tp->timer_offset = HZ;
 9614	else
 9615		tp->timer_offset = HZ / 10;
 9616
 9617	BUG_ON(tp->timer_offset > HZ);
 9618
 9619	tp->timer_multiplier = (HZ / tp->timer_offset);
 9620	tp->asf_multiplier = (HZ / tp->timer_offset) *
 9621			     TG3_FW_UPDATE_FREQ_SEC;
 9622
 9623	init_timer(&tp->timer);
 9624	tp->timer.data = (unsigned long) tp;
 9625	tp->timer.function = tg3_timer;
 9626}
 9627
 9628static void tg3_timer_start(struct tg3 *tp)
 9629{
 9630	tp->asf_counter   = tp->asf_multiplier;
 9631	tp->timer_counter = tp->timer_multiplier;
 9632
 9633	tp->timer.expires = jiffies + tp->timer_offset;
 9634	add_timer(&tp->timer);
 9635}
 9636
 9637static void tg3_timer_stop(struct tg3 *tp)
 9638{
 9639	del_timer_sync(&tp->timer);
 9640}
 9641
 9642/* Restart hardware after configuration changes, self-test, etc.
 9643 * Invoked with tp->lock held.
 9644 */
 9645static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
 9646	__releases(tp->lock)
 9647	__acquires(tp->lock)
 9648{
 9649	int err;
 9650
 9651	err = tg3_init_hw(tp, reset_phy);
 9652	if (err) {
 9653		netdev_err(tp->dev,
 9654			   "Failed to re-initialize device, aborting\n");
 9655		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 9656		tg3_full_unlock(tp);
 9657		tg3_timer_stop(tp);
 9658		tp->irq_sync = 0;
 9659		tg3_napi_enable(tp);
 9660		dev_close(tp->dev);
 9661		tg3_full_lock(tp, 0);
 9662	}
 9663	return err;
 9664}
 9665
 9666static void tg3_reset_task(struct work_struct *work)
 9667{
 9668	struct tg3 *tp = container_of(work, struct tg3, reset_task);
 9669	int err;
 9670
 9671	tg3_full_lock(tp, 0);
 9672
 9673	if (!netif_running(tp->dev)) {
 9674		tg3_flag_clear(tp, RESET_TASK_PENDING);
 9675		tg3_full_unlock(tp);
 9676		return;
 9677	}
 9678
 9679	tg3_full_unlock(tp);
 9680
 9681	tg3_phy_stop(tp);
 9682
 9683	tg3_netif_stop(tp);
 9684
 9685	tg3_full_lock(tp, 1);
 9686
 9687	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
 9688		tp->write32_tx_mbox = tg3_write32_tx_mbox;
 9689		tp->write32_rx_mbox = tg3_write_flush_reg32;
 9690		tg3_flag_set(tp, MBOX_WRITE_REORDER);
 9691		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
 9692	}
 9693
 9694	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
 9695	err = tg3_init_hw(tp, 1);
 9696	if (err)
 9697		goto out;
 9698
 9699	tg3_netif_start(tp);
 9700
 9701out:
 9702	tg3_full_unlock(tp);
 9703
 9704	if (!err)
 9705		tg3_phy_start(tp);
 9706
 9707	tg3_flag_clear(tp, RESET_TASK_PENDING);
 9708}
 9709
 9710static int tg3_request_irq(struct tg3 *tp, int irq_num)
 9711{
 9712	irq_handler_t fn;
 9713	unsigned long flags;
 9714	char *name;
 9715	struct tg3_napi *tnapi = &tp->napi[irq_num];
 9716
 9717	if (tp->irq_cnt == 1)
 9718		name = tp->dev->name;
 9719	else {
 9720		name = &tnapi->irq_lbl[0];
 9721		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
 9722		name[IFNAMSIZ-1] = 0;
 9723	}
 9724
 9725	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
 9726		fn = tg3_msi;
 9727		if (tg3_flag(tp, 1SHOT_MSI))
 9728			fn = tg3_msi_1shot;
 9729		flags = 0;
 9730	} else {
 9731		fn = tg3_interrupt;
 9732		if (tg3_flag(tp, TAGGED_STATUS))
 9733			fn = tg3_interrupt_tagged;
 9734		flags = IRQF_SHARED;
 9735	}
 9736
 9737	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
 9738}
 9739
 9740static int tg3_test_interrupt(struct tg3 *tp)
 9741{
 9742	struct tg3_napi *tnapi = &tp->napi[0];
 9743	struct net_device *dev = tp->dev;
 9744	int err, i, intr_ok = 0;
 9745	u32 val;
 9746
 9747	if (!netif_running(dev))
 9748		return -ENODEV;
 9749
 9750	tg3_disable_ints(tp);
 9751
 9752	free_irq(tnapi->irq_vec, tnapi);
 9753
 9754	/*
 9755	 * Turn off MSI one shot mode.  Otherwise this test has no
 9756	 * observable way to know whether the interrupt was delivered.
 9757	 */
 9758	if (tg3_flag(tp, 57765_PLUS)) {
 9759		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
 9760		tw32(MSGINT_MODE, val);
 9761	}
 9762
 9763	err = request_irq(tnapi->irq_vec, tg3_test_isr,
 9764			  IRQF_SHARED, dev->name, tnapi);
 9765	if (err)
 9766		return err;
 9767
 9768	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
 9769	tg3_enable_ints(tp);
 9770
 9771	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
 9772	       tnapi->coal_now);
 9773
 9774	for (i = 0; i < 5; i++) {
 9775		u32 int_mbox, misc_host_ctrl;
 9776
 9777		int_mbox = tr32_mailbox(tnapi->int_mbox);
 9778		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
 9779
 9780		if ((int_mbox != 0) ||
 9781		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
 9782			intr_ok = 1;
 9783			break;
 9784		}
 9785
 9786		if (tg3_flag(tp, 57765_PLUS) &&
 9787		    tnapi->hw_status->status_tag != tnapi->last_tag)
 9788			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 9789
 9790		msleep(10);
 9791	}
 9792
 9793	tg3_disable_ints(tp);
 9794
 9795	free_irq(tnapi->irq_vec, tnapi);
 9796
 9797	err = tg3_request_irq(tp, 0);
 9798
 9799	if (err)
 9800		return err;
 9801
 9802	if (intr_ok) {
 9803		/* Reenable MSI one shot mode. */
 9804		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
 9805			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
 9806			tw32(MSGINT_MODE, val);
 9807		}
 9808		return 0;
 9809	}
 9810
 9811	return -EIO;
 9812}
 9813
 9814/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
 9815 * successfully restored
 9816 */
 9817static int tg3_test_msi(struct tg3 *tp)
 9818{
 9819	int err;
 9820	u16 pci_cmd;
 9821
 9822	if (!tg3_flag(tp, USING_MSI))
 9823		return 0;
 9824
 9825	/* Turn off SERR reporting in case MSI terminates with Master
 9826	 * Abort.
 9827	 */
 9828	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
 9829	pci_write_config_word(tp->pdev, PCI_COMMAND,
 9830			      pci_cmd & ~PCI_COMMAND_SERR);
 9831
 9832	err = tg3_test_interrupt(tp);
 9833
 9834	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
 9835
 9836	if (!err)
 9837		return 0;
 9838
 9839	/* other failures */
 9840	if (err != -EIO)
 9841		return err;
 9842
 9843	/* MSI test failed, go back to INTx mode */
 9844	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
 9845		    "to INTx mode. Please report this failure to the PCI "
 9846		    "maintainer and include system chipset information\n");
 9847
 9848	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
 9849
 9850	pci_disable_msi(tp->pdev);
 9851
 9852	tg3_flag_clear(tp, USING_MSI);
 9853	tp->napi[0].irq_vec = tp->pdev->irq;
 9854
 9855	err = tg3_request_irq(tp, 0);
 9856	if (err)
 9857		return err;
 9858
 9859	/* Need to reset the chip because the MSI cycle may have terminated
 9860	 * with Master Abort.
 9861	 */
 9862	tg3_full_lock(tp, 1);
 9863
 9864	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
 9865	err = tg3_init_hw(tp, 1);
 9866
 9867	tg3_full_unlock(tp);
 9868
 9869	if (err)
 9870		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
 9871
 9872	return err;
 9873}
 9874
 9875static int tg3_request_firmware(struct tg3 *tp)
 9876{
 9877	const __be32 *fw_data;
 9878
 9879	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
 9880		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
 9881			   tp->fw_needed);
 9882		return -ENOENT;
 9883	}
 9884
 9885	fw_data = (void *)tp->fw->data;
 9886
 9887	/* Firmware blob starts with version numbers, followed by
 9888	 * start address and _full_ length including BSS sections
 9889	 * (which must be longer than the actual data, of course
 9890	 */
 9891
 9892	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
 9893	if (tp->fw_len < (tp->fw->size - 12)) {
 9894		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
 9895			   tp->fw_len, tp->fw_needed);
 9896		release_firmware(tp->fw);
 9897		tp->fw = NULL;
 9898		return -EINVAL;
 9899	}
 9900
 9901	/* We no longer need firmware; we have it. */
 9902	tp->fw_needed = NULL;
 9903	return 0;
 9904}
 9905
 9906static bool tg3_enable_msix(struct tg3 *tp)
 9907{
 9908	int i, rc;
 9909	struct msix_entry msix_ent[tp->irq_max];
 9910
 9911	tp->irq_cnt = num_online_cpus();
 9912	if (tp->irq_cnt > 1) {
 9913		/* We want as many rx rings enabled as there are cpus.
 9914		 * In multiqueue MSI-X mode, the first MSI-X vector
 9915		 * only deals with link interrupts, etc, so we add
 9916		 * one to the number of vectors we are requesting.
 9917		 */
 9918		tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
 9919	}
 9920
 9921	for (i = 0; i < tp->irq_max; i++) {
 9922		msix_ent[i].entry  = i;
 9923		msix_ent[i].vector = 0;
 9924	}
 9925
 9926	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
 9927	if (rc < 0) {
 9928		return false;
 9929	} else if (rc != 0) {
 9930		if (pci_enable_msix(tp->pdev, msix_ent, rc))
 9931			return false;
 9932		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
 9933			      tp->irq_cnt, rc);
 9934		tp->irq_cnt = rc;
 9935	}
 9936
 9937	for (i = 0; i < tp->irq_max; i++)
 9938		tp->napi[i].irq_vec = msix_ent[i].vector;
 9939
 9940	netif_set_real_num_tx_queues(tp->dev, 1);
 9941	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
 9942	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
 9943		pci_disable_msix(tp->pdev);
 9944		return false;
 9945	}
 9946
 9947	if (tp->irq_cnt > 1) {
 9948		tg3_flag_set(tp, ENABLE_RSS);
 9949
 9950		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
 9951		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
 9952			tg3_flag_set(tp, ENABLE_TSS);
 9953			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
 9954		}
 9955	}
 9956
 9957	return true;
 9958}
 9959
 9960static void tg3_ints_init(struct tg3 *tp)
 9961{
 9962	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
 9963	    !tg3_flag(tp, TAGGED_STATUS)) {
 9964		/* All MSI supporting chips should support tagged
 9965		 * status.  Assert that this is the case.
 9966		 */
 9967		netdev_warn(tp->dev,
 9968			    "MSI without TAGGED_STATUS? Not using MSI\n");
 9969		goto defcfg;
 9970	}
 9971
 9972	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
 9973		tg3_flag_set(tp, USING_MSIX);
 9974	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
 9975		tg3_flag_set(tp, USING_MSI);
 9976
 9977	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
 9978		u32 msi_mode = tr32(MSGINT_MODE);
 9979		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
 9980			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
 9981		if (!tg3_flag(tp, 1SHOT_MSI))
 9982			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
 9983		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
 9984	}
 9985defcfg:
 9986	if (!tg3_flag(tp, USING_MSIX)) {
 9987		tp->irq_cnt = 1;
 9988		tp->napi[0].irq_vec = tp->pdev->irq;
 9989		netif_set_real_num_tx_queues(tp->dev, 1);
 9990		netif_set_real_num_rx_queues(tp->dev, 1);
 9991	}
 9992}
 9993
 9994static void tg3_ints_fini(struct tg3 *tp)
 9995{
 9996	if (tg3_flag(tp, USING_MSIX))
 9997		pci_disable_msix(tp->pdev);
 9998	else if (tg3_flag(tp, USING_MSI))
 9999		pci_disable_msi(tp->pdev);
10000	tg3_flag_clear(tp, USING_MSI);
10001	tg3_flag_clear(tp, USING_MSIX);
10002	tg3_flag_clear(tp, ENABLE_RSS);
10003	tg3_flag_clear(tp, ENABLE_TSS);
10004}
10005
10006static int tg3_open(struct net_device *dev)
10007{
10008	struct tg3 *tp = netdev_priv(dev);
10009	int i, err;
10010
10011	if (tp->fw_needed) {
10012		err = tg3_request_firmware(tp);
10013		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10014			if (err)
10015				return err;
10016		} else if (err) {
10017			netdev_warn(tp->dev, "TSO capability disabled\n");
10018			tg3_flag_clear(tp, TSO_CAPABLE);
10019		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
10020			netdev_notice(tp->dev, "TSO capability restored\n");
10021			tg3_flag_set(tp, TSO_CAPABLE);
10022		}
10023	}
10024
10025	netif_carrier_off(tp->dev);
10026
10027	err = tg3_power_up(tp);
10028	if (err)
10029		return err;
10030
10031	tg3_full_lock(tp, 0);
10032
10033	tg3_disable_ints(tp);
10034	tg3_flag_clear(tp, INIT_COMPLETE);
10035
10036	tg3_full_unlock(tp);
10037
10038	/*
10039	 * Setup interrupts first so we know how
10040	 * many NAPI resources to allocate
10041	 */
10042	tg3_ints_init(tp);
10043
10044	tg3_rss_check_indir_tbl(tp);
10045
10046	/* The placement of this call is tied
10047	 * to the setup and use of Host TX descriptors.
10048	 */
10049	err = tg3_alloc_consistent(tp);
10050	if (err)
10051		goto err_out1;
10052
10053	tg3_napi_init(tp);
10054
10055	tg3_napi_enable(tp);
10056
10057	for (i = 0; i < tp->irq_cnt; i++) {
10058		struct tg3_napi *tnapi = &tp->napi[i];
10059		err = tg3_request_irq(tp, i);
10060		if (err) {
10061			for (i--; i >= 0; i--) {
10062				tnapi = &tp->napi[i];
10063				free_irq(tnapi->irq_vec, tnapi);
10064			}
10065			goto err_out2;
10066		}
10067	}
10068
10069	tg3_full_lock(tp, 0);
10070
10071	err = tg3_init_hw(tp, 1);
10072	if (err) {
10073		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10074		tg3_free_rings(tp);
10075	}
10076
10077	tg3_full_unlock(tp);
10078
10079	if (err)
10080		goto err_out3;
10081
10082	if (tg3_flag(tp, USING_MSI)) {
10083		err = tg3_test_msi(tp);
10084
10085		if (err) {
10086			tg3_full_lock(tp, 0);
10087			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10088			tg3_free_rings(tp);
10089			tg3_full_unlock(tp);
10090
10091			goto err_out2;
10092		}
10093
10094		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10095			u32 val = tr32(PCIE_TRANSACTION_CFG);
10096
10097			tw32(PCIE_TRANSACTION_CFG,
10098			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10099		}
10100	}
10101
10102	tg3_phy_start(tp);
10103
10104	tg3_full_lock(tp, 0);
10105
10106	tg3_timer_start(tp);
10107	tg3_flag_set(tp, INIT_COMPLETE);
10108	tg3_enable_ints(tp);
10109
10110	tg3_full_unlock(tp);
10111
10112	netif_tx_start_all_queues(dev);
10113
10114	/*
10115	 * Reset loopback feature if it was turned on while the device was down
10116	 * make sure that it's installed properly now.
10117	 */
10118	if (dev->features & NETIF_F_LOOPBACK)
10119		tg3_set_loopback(dev, dev->features);
10120
10121	return 0;
10122
10123err_out3:
10124	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10125		struct tg3_napi *tnapi = &tp->napi[i];
10126		free_irq(tnapi->irq_vec, tnapi);
10127	}
10128
10129err_out2:
10130	tg3_napi_disable(tp);
10131	tg3_napi_fini(tp);
10132	tg3_free_consistent(tp);
10133
10134err_out1:
10135	tg3_ints_fini(tp);
10136	tg3_frob_aux_power(tp, false);
10137	pci_set_power_state(tp->pdev, PCI_D3hot);
10138	return err;
10139}
10140
10141static int tg3_close(struct net_device *dev)
10142{
10143	int i;
10144	struct tg3 *tp = netdev_priv(dev);
10145
10146	tg3_napi_disable(tp);
10147	tg3_reset_task_cancel(tp);
10148
10149	netif_tx_stop_all_queues(dev);
10150
10151	tg3_timer_stop(tp);
10152
10153	tg3_phy_stop(tp);
10154
10155	tg3_full_lock(tp, 1);
10156
10157	tg3_disable_ints(tp);
10158
10159	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10160	tg3_free_rings(tp);
10161	tg3_flag_clear(tp, INIT_COMPLETE);
10162
10163	tg3_full_unlock(tp);
10164
10165	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10166		struct tg3_napi *tnapi = &tp->napi[i];
10167		free_irq(tnapi->irq_vec, tnapi);
10168	}
10169
10170	tg3_ints_fini(tp);
10171
10172	/* Clear stats across close / open calls */
10173	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10174	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10175
10176	tg3_napi_fini(tp);
10177
10178	tg3_free_consistent(tp);
10179
10180	tg3_power_down(tp);
10181
10182	netif_carrier_off(tp->dev);
10183
10184	return 0;
10185}
10186
10187static inline u64 get_stat64(tg3_stat64_t *val)
10188{
10189       return ((u64)val->high << 32) | ((u64)val->low);
10190}
10191
10192static u64 tg3_calc_crc_errors(struct tg3 *tp)
10193{
10194	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10195
10196	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10197	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10198	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10199		u32 val;
10200
10201		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10202			tg3_writephy(tp, MII_TG3_TEST1,
10203				     val | MII_TG3_TEST1_CRC_EN);
10204			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10205		} else
10206			val = 0;
10207
10208		tp->phy_crc_errors += val;
10209
10210		return tp->phy_crc_errors;
10211	}
10212
10213	return get_stat64(&hw_stats->rx_fcs_errors);
10214}
10215
10216#define ESTAT_ADD(member) \
10217	estats->member =	old_estats->member + \
10218				get_stat64(&hw_stats->member)
10219
10220static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10221{
10222	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10223	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10224
10225	ESTAT_ADD(rx_octets);
10226	ESTAT_ADD(rx_fragments);
10227	ESTAT_ADD(rx_ucast_packets);
10228	ESTAT_ADD(rx_mcast_packets);
10229	ESTAT_ADD(rx_bcast_packets);
10230	ESTAT_ADD(rx_fcs_errors);
10231	ESTAT_ADD(rx_align_errors);
10232	ESTAT_ADD(rx_xon_pause_rcvd);
10233	ESTAT_ADD(rx_xoff_pause_rcvd);
10234	ESTAT_ADD(rx_mac_ctrl_rcvd);
10235	ESTAT_ADD(rx_xoff_entered);
10236	ESTAT_ADD(rx_frame_too_long_errors);
10237	ESTAT_ADD(rx_jabbers);
10238	ESTAT_ADD(rx_undersize_packets);
10239	ESTAT_ADD(rx_in_length_errors);
10240	ESTAT_ADD(rx_out_length_errors);
10241	ESTAT_ADD(rx_64_or_less_octet_packets);
10242	ESTAT_ADD(rx_65_to_127_octet_packets);
10243	ESTAT_ADD(rx_128_to_255_octet_packets);
10244	ESTAT_ADD(rx_256_to_511_octet_packets);
10245	ESTAT_ADD(rx_512_to_1023_octet_packets);
10246	ESTAT_ADD(rx_1024_to_1522_octet_packets);
10247	ESTAT_ADD(rx_1523_to_2047_octet_packets);
10248	ESTAT_ADD(rx_2048_to_4095_octet_packets);
10249	ESTAT_ADD(rx_4096_to_8191_octet_packets);
10250	ESTAT_ADD(rx_8192_to_9022_octet_packets);
10251
10252	ESTAT_ADD(tx_octets);
10253	ESTAT_ADD(tx_collisions);
10254	ESTAT_ADD(tx_xon_sent);
10255	ESTAT_ADD(tx_xoff_sent);
10256	ESTAT_ADD(tx_flow_control);
10257	ESTAT_ADD(tx_mac_errors);
10258	ESTAT_ADD(tx_single_collisions);
10259	ESTAT_ADD(tx_mult_collisions);
10260	ESTAT_ADD(tx_deferred);
10261	ESTAT_ADD(tx_excessive_collisions);
10262	ESTAT_ADD(tx_late_collisions);
10263	ESTAT_ADD(tx_collide_2times);
10264	ESTAT_ADD(tx_collide_3times);
10265	ESTAT_ADD(tx_collide_4times);
10266	ESTAT_ADD(tx_collide_5times);
10267	ESTAT_ADD(tx_collide_6times);
10268	ESTAT_ADD(tx_collide_7times);
10269	ESTAT_ADD(tx_collide_8times);
10270	ESTAT_ADD(tx_collide_9times);
10271	ESTAT_ADD(tx_collide_10times);
10272	ESTAT_ADD(tx_collide_11times);
10273	ESTAT_ADD(tx_collide_12times);
10274	ESTAT_ADD(tx_collide_13times);
10275	ESTAT_ADD(tx_collide_14times);
10276	ESTAT_ADD(tx_collide_15times);
10277	ESTAT_ADD(tx_ucast_packets);
10278	ESTAT_ADD(tx_mcast_packets);
10279	ESTAT_ADD(tx_bcast_packets);
10280	ESTAT_ADD(tx_carrier_sense_errors);
10281	ESTAT_ADD(tx_discards);
10282	ESTAT_ADD(tx_errors);
10283
10284	ESTAT_ADD(dma_writeq_full);
10285	ESTAT_ADD(dma_write_prioq_full);
10286	ESTAT_ADD(rxbds_empty);
10287	ESTAT_ADD(rx_discards);
10288	ESTAT_ADD(rx_errors);
10289	ESTAT_ADD(rx_threshold_hit);
10290
10291	ESTAT_ADD(dma_readq_full);
10292	ESTAT_ADD(dma_read_prioq_full);
10293	ESTAT_ADD(tx_comp_queue_full);
10294
10295	ESTAT_ADD(ring_set_send_prod_index);
10296	ESTAT_ADD(ring_status_update);
10297	ESTAT_ADD(nic_irqs);
10298	ESTAT_ADD(nic_avoided_irqs);
10299	ESTAT_ADD(nic_tx_threshold_hit);
10300
10301	ESTAT_ADD(mbuf_lwm_thresh_hit);
10302}
10303
10304static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10305{
10306	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10307	struct tg3_hw_stats *hw_stats = tp->hw_stats;
10308
10309	stats->rx_packets = old_stats->rx_packets +
10310		get_stat64(&hw_stats->rx_ucast_packets) +
10311		get_stat64(&hw_stats->rx_mcast_packets) +
10312		get_stat64(&hw_stats->rx_bcast_packets);
10313
10314	stats->tx_packets = old_stats->tx_packets +
10315		get_stat64(&hw_stats->tx_ucast_packets) +
10316		get_stat64(&hw_stats->tx_mcast_packets) +
10317		get_stat64(&hw_stats->tx_bcast_packets);
10318
10319	stats->rx_bytes = old_stats->rx_bytes +
10320		get_stat64(&hw_stats->rx_octets);
10321	stats->tx_bytes = old_stats->tx_bytes +
10322		get_stat64(&hw_stats->tx_octets);
10323
10324	stats->rx_errors = old_stats->rx_errors +
10325		get_stat64(&hw_stats->rx_errors);
10326	stats->tx_errors = old_stats->tx_errors +
10327		get_stat64(&hw_stats->tx_errors) +
10328		get_stat64(&hw_stats->tx_mac_errors) +
10329		get_stat64(&hw_stats->tx_carrier_sense_errors) +
10330		get_stat64(&hw_stats->tx_discards);
10331
10332	stats->multicast = old_stats->multicast +
10333		get_stat64(&hw_stats->rx_mcast_packets);
10334	stats->collisions = old_stats->collisions +
10335		get_stat64(&hw_stats->tx_collisions);
10336
10337	stats->rx_length_errors = old_stats->rx_length_errors +
10338		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10339		get_stat64(&hw_stats->rx_undersize_packets);
10340
10341	stats->rx_over_errors = old_stats->rx_over_errors +
10342		get_stat64(&hw_stats->rxbds_empty);
10343	stats->rx_frame_errors = old_stats->rx_frame_errors +
10344		get_stat64(&hw_stats->rx_align_errors);
10345	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10346		get_stat64(&hw_stats->tx_discards);
10347	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10348		get_stat64(&hw_stats->tx_carrier_sense_errors);
10349
10350	stats->rx_crc_errors = old_stats->rx_crc_errors +
10351		tg3_calc_crc_errors(tp);
10352
10353	stats->rx_missed_errors = old_stats->rx_missed_errors +
10354		get_stat64(&hw_stats->rx_discards);
10355
10356	stats->rx_dropped = tp->rx_dropped;
10357	stats->tx_dropped = tp->tx_dropped;
10358}
10359
10360static int tg3_get_regs_len(struct net_device *dev)
10361{
10362	return TG3_REG_BLK_SIZE;
10363}
10364
10365static void tg3_get_regs(struct net_device *dev,
10366		struct ethtool_regs *regs, void *_p)
10367{
10368	struct tg3 *tp = netdev_priv(dev);
10369
10370	regs->version = 0;
10371
10372	memset(_p, 0, TG3_REG_BLK_SIZE);
10373
10374	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10375		return;
10376
10377	tg3_full_lock(tp, 0);
10378
10379	tg3_dump_legacy_regs(tp, (u32 *)_p);
10380
10381	tg3_full_unlock(tp);
10382}
10383
10384static int tg3_get_eeprom_len(struct net_device *dev)
10385{
10386	struct tg3 *tp = netdev_priv(dev);
10387
10388	return tp->nvram_size;
10389}
10390
10391static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10392{
10393	struct tg3 *tp = netdev_priv(dev);
10394	int ret;
10395	u8  *pd;
10396	u32 i, offset, len, b_offset, b_count;
10397	__be32 val;
10398
10399	if (tg3_flag(tp, NO_NVRAM))
10400		return -EINVAL;
10401
10402	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10403		return -EAGAIN;
10404
10405	offset = eeprom->offset;
10406	len = eeprom->len;
10407	eeprom->len = 0;
10408
10409	eeprom->magic = TG3_EEPROM_MAGIC;
10410
10411	if (offset & 3) {
10412		/* adjustments to start on required 4 byte boundary */
10413		b_offset = offset & 3;
10414		b_count = 4 - b_offset;
10415		if (b_count > len) {
10416			/* i.e. offset=1 len=2 */
10417			b_count = len;
10418		}
10419		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10420		if (ret)
10421			return ret;
10422		memcpy(data, ((char *)&val) + b_offset, b_count);
10423		len -= b_count;
10424		offset += b_count;
10425		eeprom->len += b_count;
10426	}
10427
10428	/* read bytes up to the last 4 byte boundary */
10429	pd = &data[eeprom->len];
10430	for (i = 0; i < (len - (len & 3)); i += 4) {
10431		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10432		if (ret) {
10433			eeprom->len += i;
10434			return ret;
10435		}
10436		memcpy(pd + i, &val, 4);
10437	}
10438	eeprom->len += i;
10439
10440	if (len & 3) {
10441		/* read last bytes not ending on 4 byte boundary */
10442		pd = &data[eeprom->len];
10443		b_count = len & 3;
10444		b_offset = offset + len - b_count;
10445		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10446		if (ret)
10447			return ret;
10448		memcpy(pd, &val, b_count);
10449		eeprom->len += b_count;
10450	}
10451	return 0;
10452}
10453
10454static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10455{
10456	struct tg3 *tp = netdev_priv(dev);
10457	int ret;
10458	u32 offset, len, b_offset, odd_len;
10459	u8 *buf;
10460	__be32 start, end;
10461
10462	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10463		return -EAGAIN;
10464
10465	if (tg3_flag(tp, NO_NVRAM) ||
10466	    eeprom->magic != TG3_EEPROM_MAGIC)
10467		return -EINVAL;
10468
10469	offset = eeprom->offset;
10470	len = eeprom->len;
10471
10472	if ((b_offset = (offset & 3))) {
10473		/* adjustments to start on required 4 byte boundary */
10474		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10475		if (ret)
10476			return ret;
10477		len += b_offset;
10478		offset &= ~3;
10479		if (len < 4)
10480			len = 4;
10481	}
10482
10483	odd_len = 0;
10484	if (len & 3) {
10485		/* adjustments to end on required 4 byte boundary */
10486		odd_len = 1;
10487		len = (len + 3) & ~3;
10488		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10489		if (ret)
10490			return ret;
10491	}
10492
10493	buf = data;
10494	if (b_offset || odd_len) {
10495		buf = kmalloc(len, GFP_KERNEL);
10496		if (!buf)
10497			return -ENOMEM;
10498		if (b_offset)
10499			memcpy(buf, &start, 4);
10500		if (odd_len)
10501			memcpy(buf+len-4, &end, 4);
10502		memcpy(buf + b_offset, data, eeprom->len);
10503	}
10504
10505	ret = tg3_nvram_write_block(tp, offset, len, buf);
10506
10507	if (buf != data)
10508		kfree(buf);
10509
10510	return ret;
10511}
10512
10513static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10514{
10515	struct tg3 *tp = netdev_priv(dev);
10516
10517	if (tg3_flag(tp, USE_PHYLIB)) {
10518		struct phy_device *phydev;
10519		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10520			return -EAGAIN;
10521		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10522		return phy_ethtool_gset(phydev, cmd);
10523	}
10524
10525	cmd->supported = (SUPPORTED_Autoneg);
10526
10527	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10528		cmd->supported |= (SUPPORTED_1000baseT_Half |
10529				   SUPPORTED_1000baseT_Full);
10530
10531	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10532		cmd->supported |= (SUPPORTED_100baseT_Half |
10533				  SUPPORTED_100baseT_Full |
10534				  SUPPORTED_10baseT_Half |
10535				  SUPPORTED_10baseT_Full |
10536				  SUPPORTED_TP);
10537		cmd->port = PORT_TP;
10538	} else {
10539		cmd->supported |= SUPPORTED_FIBRE;
10540		cmd->port = PORT_FIBRE;
10541	}
10542
10543	cmd->advertising = tp->link_config.advertising;
10544	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10545		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10546			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10547				cmd->advertising |= ADVERTISED_Pause;
10548			} else {
10549				cmd->advertising |= ADVERTISED_Pause |
10550						    ADVERTISED_Asym_Pause;
10551			}
10552		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10553			cmd->advertising |= ADVERTISED_Asym_Pause;
10554		}
10555	}
10556	if (netif_running(dev) && netif_carrier_ok(dev)) {
10557		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10558		cmd->duplex = tp->link_config.active_duplex;
10559		cmd->lp_advertising = tp->link_config.rmt_adv;
10560		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10561			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10562				cmd->eth_tp_mdix = ETH_TP_MDI_X;
10563			else
10564				cmd->eth_tp_mdix = ETH_TP_MDI;
10565		}
10566	} else {
10567		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10568		cmd->duplex = DUPLEX_UNKNOWN;
10569		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10570	}
10571	cmd->phy_address = tp->phy_addr;
10572	cmd->transceiver = XCVR_INTERNAL;
10573	cmd->autoneg = tp->link_config.autoneg;
10574	cmd->maxtxpkt = 0;
10575	cmd->maxrxpkt = 0;
10576	return 0;
10577}
10578
10579static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10580{
10581	struct tg3 *tp = netdev_priv(dev);
10582	u32 speed = ethtool_cmd_speed(cmd);
10583
10584	if (tg3_flag(tp, USE_PHYLIB)) {
10585		struct phy_device *phydev;
10586		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10587			return -EAGAIN;
10588		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10589		return phy_ethtool_sset(phydev, cmd);
10590	}
10591
10592	if (cmd->autoneg != AUTONEG_ENABLE &&
10593	    cmd->autoneg != AUTONEG_DISABLE)
10594		return -EINVAL;
10595
10596	if (cmd->autoneg == AUTONEG_DISABLE &&
10597	    cmd->duplex != DUPLEX_FULL &&
10598	    cmd->duplex != DUPLEX_HALF)
10599		return -EINVAL;
10600
10601	if (cmd->autoneg == AUTONEG_ENABLE) {
10602		u32 mask = ADVERTISED_Autoneg |
10603			   ADVERTISED_Pause |
10604			   ADVERTISED_Asym_Pause;
10605
10606		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10607			mask |= ADVERTISED_1000baseT_Half |
10608				ADVERTISED_1000baseT_Full;
10609
10610		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10611			mask |= ADVERTISED_100baseT_Half |
10612				ADVERTISED_100baseT_Full |
10613				ADVERTISED_10baseT_Half |
10614				ADVERTISED_10baseT_Full |
10615				ADVERTISED_TP;
10616		else
10617			mask |= ADVERTISED_FIBRE;
10618
10619		if (cmd->advertising & ~mask)
10620			return -EINVAL;
10621
10622		mask &= (ADVERTISED_1000baseT_Half |
10623			 ADVERTISED_1000baseT_Full |
10624			 ADVERTISED_100baseT_Half |
10625			 ADVERTISED_100baseT_Full |
10626			 ADVERTISED_10baseT_Half |
10627			 ADVERTISED_10baseT_Full);
10628
10629		cmd->advertising &= mask;
10630	} else {
10631		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10632			if (speed != SPEED_1000)
10633				return -EINVAL;
10634
10635			if (cmd->duplex != DUPLEX_FULL)
10636				return -EINVAL;
10637		} else {
10638			if (speed != SPEED_100 &&
10639			    speed != SPEED_10)
10640				return -EINVAL;
10641		}
10642	}
10643
10644	tg3_full_lock(tp, 0);
10645
10646	tp->link_config.autoneg = cmd->autoneg;
10647	if (cmd->autoneg == AUTONEG_ENABLE) {
10648		tp->link_config.advertising = (cmd->advertising |
10649					      ADVERTISED_Autoneg);
10650		tp->link_config.speed = SPEED_UNKNOWN;
10651		tp->link_config.duplex = DUPLEX_UNKNOWN;
10652	} else {
10653		tp->link_config.advertising = 0;
10654		tp->link_config.speed = speed;
10655		tp->link_config.duplex = cmd->duplex;
10656	}
10657
10658	if (netif_running(dev))
10659		tg3_setup_phy(tp, 1);
10660
10661	tg3_full_unlock(tp);
10662
10663	return 0;
10664}
10665
10666static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10667{
10668	struct tg3 *tp = netdev_priv(dev);
10669
10670	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10671	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10672	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10673	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10674}
10675
10676static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10677{
10678	struct tg3 *tp = netdev_priv(dev);
10679
10680	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10681		wol->supported = WAKE_MAGIC;
10682	else
10683		wol->supported = 0;
10684	wol->wolopts = 0;
10685	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10686		wol->wolopts = WAKE_MAGIC;
10687	memset(&wol->sopass, 0, sizeof(wol->sopass));
10688}
10689
10690static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10691{
10692	struct tg3 *tp = netdev_priv(dev);
10693	struct device *dp = &tp->pdev->dev;
10694
10695	if (wol->wolopts & ~WAKE_MAGIC)
10696		return -EINVAL;
10697	if ((wol->wolopts & WAKE_MAGIC) &&
10698	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10699		return -EINVAL;
10700
10701	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10702
10703	spin_lock_bh(&tp->lock);
10704	if (device_may_wakeup(dp))
10705		tg3_flag_set(tp, WOL_ENABLE);
10706	else
10707		tg3_flag_clear(tp, WOL_ENABLE);
10708	spin_unlock_bh(&tp->lock);
10709
10710	return 0;
10711}
10712
10713static u32 tg3_get_msglevel(struct net_device *dev)
10714{
10715	struct tg3 *tp = netdev_priv(dev);
10716	return tp->msg_enable;
10717}
10718
10719static void tg3_set_msglevel(struct net_device *dev, u32 value)
10720{
10721	struct tg3 *tp = netdev_priv(dev);
10722	tp->msg_enable = value;
10723}
10724
10725static int tg3_nway_reset(struct net_device *dev)
10726{
10727	struct tg3 *tp = netdev_priv(dev);
10728	int r;
10729
10730	if (!netif_running(dev))
10731		return -EAGAIN;
10732
10733	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10734		return -EINVAL;
10735
10736	if (tg3_flag(tp, USE_PHYLIB)) {
10737		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10738			return -EAGAIN;
10739		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10740	} else {
10741		u32 bmcr;
10742
10743		spin_lock_bh(&tp->lock);
10744		r = -EINVAL;
10745		tg3_readphy(tp, MII_BMCR, &bmcr);
10746		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10747		    ((bmcr & BMCR_ANENABLE) ||
10748		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10749			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10750						   BMCR_ANENABLE);
10751			r = 0;
10752		}
10753		spin_unlock_bh(&tp->lock);
10754	}
10755
10756	return r;
10757}
10758
10759static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10760{
10761	struct tg3 *tp = netdev_priv(dev);
10762
10763	ering->rx_max_pending = tp->rx_std_ring_mask;
10764	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10765		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10766	else
10767		ering->rx_jumbo_max_pending = 0;
10768
10769	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10770
10771	ering->rx_pending = tp->rx_pending;
10772	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10773		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10774	else
10775		ering->rx_jumbo_pending = 0;
10776
10777	ering->tx_pending = tp->napi[0].tx_pending;
10778}
10779
10780static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10781{
10782	struct tg3 *tp = netdev_priv(dev);
10783	int i, irq_sync = 0, err = 0;
10784
10785	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10786	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10787	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10788	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10789	    (tg3_flag(tp, TSO_BUG) &&
10790	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10791		return -EINVAL;
10792
10793	if (netif_running(dev)) {
10794		tg3_phy_stop(tp);
10795		tg3_netif_stop(tp);
10796		irq_sync = 1;
10797	}
10798
10799	tg3_full_lock(tp, irq_sync);
10800
10801	tp->rx_pending = ering->rx_pending;
10802
10803	if (tg3_flag(tp, MAX_RXPEND_64) &&
10804	    tp->rx_pending > 63)
10805		tp->rx_pending = 63;
10806	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10807
10808	for (i = 0; i < tp->irq_max; i++)
10809		tp->napi[i].tx_pending = ering->tx_pending;
10810
10811	if (netif_running(dev)) {
10812		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10813		err = tg3_restart_hw(tp, 1);
10814		if (!err)
10815			tg3_netif_start(tp);
10816	}
10817
10818	tg3_full_unlock(tp);
10819
10820	if (irq_sync && !err)
10821		tg3_phy_start(tp);
10822
10823	return err;
10824}
10825
10826static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10827{
10828	struct tg3 *tp = netdev_priv(dev);
10829
10830	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10831
10832	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10833		epause->rx_pause = 1;
10834	else
10835		epause->rx_pause = 0;
10836
10837	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10838		epause->tx_pause = 1;
10839	else
10840		epause->tx_pause = 0;
10841}
10842
10843static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10844{
10845	struct tg3 *tp = netdev_priv(dev);
10846	int err = 0;
10847
10848	if (tg3_flag(tp, USE_PHYLIB)) {
10849		u32 newadv;
10850		struct phy_device *phydev;
10851
10852		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10853
10854		if (!(phydev->supported & SUPPORTED_Pause) ||
10855		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10856		     (epause->rx_pause != epause->tx_pause)))
10857			return -EINVAL;
10858
10859		tp->link_config.flowctrl = 0;
10860		if (epause->rx_pause) {
10861			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10862
10863			if (epause->tx_pause) {
10864				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10865				newadv = ADVERTISED_Pause;
10866			} else
10867				newadv = ADVERTISED_Pause |
10868					 ADVERTISED_Asym_Pause;
10869		} else if (epause->tx_pause) {
10870			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10871			newadv = ADVERTISED_Asym_Pause;
10872		} else
10873			newadv = 0;
10874
10875		if (epause->autoneg)
10876			tg3_flag_set(tp, PAUSE_AUTONEG);
10877		else
10878			tg3_flag_clear(tp, PAUSE_AUTONEG);
10879
10880		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10881			u32 oldadv = phydev->advertising &
10882				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10883			if (oldadv != newadv) {
10884				phydev->advertising &=
10885					~(ADVERTISED_Pause |
10886					  ADVERTISED_Asym_Pause);
10887				phydev->advertising |= newadv;
10888				if (phydev->autoneg) {
10889					/*
10890					 * Always renegotiate the link to
10891					 * inform our link partner of our
10892					 * flow control settings, even if the
10893					 * flow control is forced.  Let
10894					 * tg3_adjust_link() do the final
10895					 * flow control setup.
10896					 */
10897					return phy_start_aneg(phydev);
10898				}
10899			}
10900
10901			if (!epause->autoneg)
10902				tg3_setup_flow_control(tp, 0, 0);
10903		} else {
10904			tp->link_config.advertising &=
10905					~(ADVERTISED_Pause |
10906					  ADVERTISED_Asym_Pause);
10907			tp->link_config.advertising |= newadv;
10908		}
10909	} else {
10910		int irq_sync = 0;
10911
10912		if (netif_running(dev)) {
10913			tg3_netif_stop(tp);
10914			irq_sync = 1;
10915		}
10916
10917		tg3_full_lock(tp, irq_sync);
10918
10919		if (epause->autoneg)
10920			tg3_flag_set(tp, PAUSE_AUTONEG);
10921		else
10922			tg3_flag_clear(tp, PAUSE_AUTONEG);
10923		if (epause->rx_pause)
10924			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10925		else
10926			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10927		if (epause->tx_pause)
10928			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10929		else
10930			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10931
10932		if (netif_running(dev)) {
10933			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10934			err = tg3_restart_hw(tp, 1);
10935			if (!err)
10936				tg3_netif_start(tp);
10937		}
10938
10939		tg3_full_unlock(tp);
10940	}
10941
10942	return err;
10943}
10944
10945static int tg3_get_sset_count(struct net_device *dev, int sset)
10946{
10947	switch (sset) {
10948	case ETH_SS_TEST:
10949		return TG3_NUM_TEST;
10950	case ETH_SS_STATS:
10951		return TG3_NUM_STATS;
10952	default:
10953		return -EOPNOTSUPP;
10954	}
10955}
10956
10957static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10958			 u32 *rules __always_unused)
10959{
10960	struct tg3 *tp = netdev_priv(dev);
10961
10962	if (!tg3_flag(tp, SUPPORT_MSIX))
10963		return -EOPNOTSUPP;
10964
10965	switch (info->cmd) {
10966	case ETHTOOL_GRXRINGS:
10967		if (netif_running(tp->dev))
10968			info->data = tp->irq_cnt;
10969		else {
10970			info->data = num_online_cpus();
10971			if (info->data > TG3_IRQ_MAX_VECS_RSS)
10972				info->data = TG3_IRQ_MAX_VECS_RSS;
10973		}
10974
10975		/* The first interrupt vector only
10976		 * handles link interrupts.
10977		 */
10978		info->data -= 1;
10979		return 0;
10980
10981	default:
10982		return -EOPNOTSUPP;
10983	}
10984}
10985
10986static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10987{
10988	u32 size = 0;
10989	struct tg3 *tp = netdev_priv(dev);
10990
10991	if (tg3_flag(tp, SUPPORT_MSIX))
10992		size = TG3_RSS_INDIR_TBL_SIZE;
10993
10994	return size;
10995}
10996
10997static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10998{
10999	struct tg3 *tp = netdev_priv(dev);
11000	int i;
11001
11002	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11003		indir[i] = tp->rss_ind_tbl[i];
11004
11005	return 0;
11006}
11007
11008static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11009{
11010	struct tg3 *tp = netdev_priv(dev);
11011	size_t i;
11012
11013	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11014		tp->rss_ind_tbl[i] = indir[i];
11015
11016	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11017		return 0;
11018
11019	/* It is legal to write the indirection
11020	 * table while the device is running.
11021	 */
11022	tg3_full_lock(tp, 0);
11023	tg3_rss_write_indir_tbl(tp);
11024	tg3_full_unlock(tp);
11025
11026	return 0;
11027}
11028
11029static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11030{
11031	switch (stringset) {
11032	case ETH_SS_STATS:
11033		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11034		break;
11035	case ETH_SS_TEST:
11036		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11037		break;
11038	default:
11039		WARN_ON(1);	/* we need a WARN() */
11040		break;
11041	}
11042}
11043
11044static int tg3_set_phys_id(struct net_device *dev,
11045			    enum ethtool_phys_id_state state)
11046{
11047	struct tg3 *tp = netdev_priv(dev);
11048
11049	if (!netif_running(tp->dev))
11050		return -EAGAIN;
11051
11052	switch (state) {
11053	case ETHTOOL_ID_ACTIVE:
11054		return 1;	/* cycle on/off once per second */
11055
11056	case ETHTOOL_ID_ON:
11057		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11058		     LED_CTRL_1000MBPS_ON |
11059		     LED_CTRL_100MBPS_ON |
11060		     LED_CTRL_10MBPS_ON |
11061		     LED_CTRL_TRAFFIC_OVERRIDE |
11062		     LED_CTRL_TRAFFIC_BLINK |
11063		     LED_CTRL_TRAFFIC_LED);
11064		break;
11065
11066	case ETHTOOL_ID_OFF:
11067		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11068		     LED_CTRL_TRAFFIC_OVERRIDE);
11069		break;
11070
11071	case ETHTOOL_ID_INACTIVE:
11072		tw32(MAC_LED_CTRL, tp->led_ctrl);
11073		break;
11074	}
11075
11076	return 0;
11077}
11078
11079static void tg3_get_ethtool_stats(struct net_device *dev,
11080				   struct ethtool_stats *estats, u64 *tmp_stats)
11081{
11082	struct tg3 *tp = netdev_priv(dev);
11083
11084	if (tp->hw_stats)
11085		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11086	else
11087		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11088}
11089
11090static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11091{
11092	int i;
11093	__be32 *buf;
11094	u32 offset = 0, len = 0;
11095	u32 magic, val;
11096
11097	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11098		return NULL;
11099
11100	if (magic == TG3_EEPROM_MAGIC) {
11101		for (offset = TG3_NVM_DIR_START;
11102		     offset < TG3_NVM_DIR_END;
11103		     offset += TG3_NVM_DIRENT_SIZE) {
11104			if (tg3_nvram_read(tp, offset, &val))
11105				return NULL;
11106
11107			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11108			    TG3_NVM_DIRTYPE_EXTVPD)
11109				break;
11110		}
11111
11112		if (offset != TG3_NVM_DIR_END) {
11113			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11114			if (tg3_nvram_read(tp, offset + 4, &offset))
11115				return NULL;
11116
11117			offset = tg3_nvram_logical_addr(tp, offset);
11118		}
11119	}
11120
11121	if (!offset || !len) {
11122		offset = TG3_NVM_VPD_OFF;
11123		len = TG3_NVM_VPD_LEN;
11124	}
11125
11126	buf = kmalloc(len, GFP_KERNEL);
11127	if (buf == NULL)
11128		return NULL;
11129
11130	if (magic == TG3_EEPROM_MAGIC) {
11131		for (i = 0; i < len; i += 4) {
11132			/* The data is in little-endian format in NVRAM.
11133			 * Use the big-endian read routines to preserve
11134			 * the byte order as it exists in NVRAM.
11135			 */
11136			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11137				goto error;
11138		}
11139	} else {
11140		u8 *ptr;
11141		ssize_t cnt;
11142		unsigned int pos = 0;
11143
11144		ptr = (u8 *)&buf[0];
11145		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11146			cnt = pci_read_vpd(tp->pdev, pos,
11147					   len - pos, ptr);
11148			if (cnt == -ETIMEDOUT || cnt == -EINTR)
11149				cnt = 0;
11150			else if (cnt < 0)
11151				goto error;
11152		}
11153		if (pos != len)
11154			goto error;
11155	}
11156
11157	*vpdlen = len;
11158
11159	return buf;
11160
11161error:
11162	kfree(buf);
11163	return NULL;
11164}
11165
11166#define NVRAM_TEST_SIZE 0x100
11167#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
11168#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
11169#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
11170#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
11171#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
11172#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
11173#define NVRAM_SELFBOOT_HW_SIZE 0x20
11174#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11175
11176static int tg3_test_nvram(struct tg3 *tp)
11177{
11178	u32 csum, magic, len;
11179	__be32 *buf;
11180	int i, j, k, err = 0, size;
11181
11182	if (tg3_flag(tp, NO_NVRAM))
11183		return 0;
11184
11185	if (tg3_nvram_read(tp, 0, &magic) != 0)
11186		return -EIO;
11187
11188	if (magic == TG3_EEPROM_MAGIC)
11189		size = NVRAM_TEST_SIZE;
11190	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11191		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11192		    TG3_EEPROM_SB_FORMAT_1) {
11193			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11194			case TG3_EEPROM_SB_REVISION_0:
11195				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11196				break;
11197			case TG3_EEPROM_SB_REVISION_2:
11198				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11199				break;
11200			case TG3_EEPROM_SB_REVISION_3:
11201				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11202				break;
11203			case TG3_EEPROM_SB_REVISION_4:
11204				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11205				break;
11206			case TG3_EEPROM_SB_REVISION_5:
11207				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11208				break;
11209			case TG3_EEPROM_SB_REVISION_6:
11210				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11211				break;
11212			default:
11213				return -EIO;
11214			}
11215		} else
11216			return 0;
11217	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11218		size = NVRAM_SELFBOOT_HW_SIZE;
11219	else
11220		return -EIO;
11221
11222	buf = kmalloc(size, GFP_KERNEL);
11223	if (buf == NULL)
11224		return -ENOMEM;
11225
11226	err = -EIO;
11227	for (i = 0, j = 0; i < size; i += 4, j++) {
11228		err = tg3_nvram_read_be32(tp, i, &buf[j]);
11229		if (err)
11230			break;
11231	}
11232	if (i < size)
11233		goto out;
11234
11235	/* Selfboot format */
11236	magic = be32_to_cpu(buf[0]);
11237	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11238	    TG3_EEPROM_MAGIC_FW) {
11239		u8 *buf8 = (u8 *) buf, csum8 = 0;
11240
11241		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11242		    TG3_EEPROM_SB_REVISION_2) {
11243			/* For rev 2, the csum doesn't include the MBA. */
11244			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11245				csum8 += buf8[i];
11246			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11247				csum8 += buf8[i];
11248		} else {
11249			for (i = 0; i < size; i++)
11250				csum8 += buf8[i];
11251		}
11252
11253		if (csum8 == 0) {
11254			err = 0;
11255			goto out;
11256		}
11257
11258		err = -EIO;
11259		goto out;
11260	}
11261
11262	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11263	    TG3_EEPROM_MAGIC_HW) {
11264		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11265		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11266		u8 *buf8 = (u8 *) buf;
11267
11268		/* Separate the parity bits and the data bytes.  */
11269		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11270			if ((i == 0) || (i == 8)) {
11271				int l;
11272				u8 msk;
11273
11274				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11275					parity[k++] = buf8[i] & msk;
11276				i++;
11277			} else if (i == 16) {
11278				int l;
11279				u8 msk;
11280
11281				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11282					parity[k++] = buf8[i] & msk;
11283				i++;
11284
11285				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11286					parity[k++] = buf8[i] & msk;
11287				i++;
11288			}
11289			data[j++] = buf8[i];
11290		}
11291
11292		err = -EIO;
11293		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11294			u8 hw8 = hweight8(data[i]);
11295
11296			if ((hw8 & 0x1) && parity[i])
11297				goto out;
11298			else if (!(hw8 & 0x1) && !parity[i])
11299				goto out;
11300		}
11301		err = 0;
11302		goto out;
11303	}
11304
11305	err = -EIO;
11306
11307	/* Bootstrap checksum at offset 0x10 */
11308	csum = calc_crc((unsigned char *) buf, 0x10);
11309	if (csum != le32_to_cpu(buf[0x10/4]))
11310		goto out;
11311
11312	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11313	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11314	if (csum != le32_to_cpu(buf[0xfc/4]))
11315		goto out;
11316
11317	kfree(buf);
11318
11319	buf = tg3_vpd_readblock(tp, &len);
11320	if (!buf)
11321		return -ENOMEM;
11322
11323	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11324	if (i > 0) {
11325		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11326		if (j < 0)
11327			goto out;
11328
11329		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11330			goto out;
11331
11332		i += PCI_VPD_LRDT_TAG_SIZE;
11333		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11334					      PCI_VPD_RO_KEYWORD_CHKSUM);
11335		if (j > 0) {
11336			u8 csum8 = 0;
11337
11338			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11339
11340			for (i = 0; i <= j; i++)
11341				csum8 += ((u8 *)buf)[i];
11342
11343			if (csum8)
11344				goto out;
11345		}
11346	}
11347
11348	err = 0;
11349
11350out:
11351	kfree(buf);
11352	return err;
11353}
11354
11355#define TG3_SERDES_TIMEOUT_SEC	2
11356#define TG3_COPPER_TIMEOUT_SEC	6
11357
11358static int tg3_test_link(struct tg3 *tp)
11359{
11360	int i, max;
11361
11362	if (!netif_running(tp->dev))
11363		return -ENODEV;
11364
11365	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11366		max = TG3_SERDES_TIMEOUT_SEC;
11367	else
11368		max = TG3_COPPER_TIMEOUT_SEC;
11369
11370	for (i = 0; i < max; i++) {
11371		if (netif_carrier_ok(tp->dev))
11372			return 0;
11373
11374		if (msleep_interruptible(1000))
11375			break;
11376	}
11377
11378	return -EIO;
11379}
11380
11381/* Only test the commonly used registers */
11382static int tg3_test_registers(struct tg3 *tp)
11383{
11384	int i, is_5705, is_5750;
11385	u32 offset, read_mask, write_mask, val, save_val, read_val;
11386	static struct {
11387		u16 offset;
11388		u16 flags;
11389#define TG3_FL_5705	0x1
11390#define TG3_FL_NOT_5705	0x2
11391#define TG3_FL_NOT_5788	0x4
11392#define TG3_FL_NOT_5750	0x8
11393		u32 read_mask;
11394		u32 write_mask;
11395	} reg_tbl[] = {
11396		/* MAC Control Registers */
11397		{ MAC_MODE, TG3_FL_NOT_5705,
11398			0x00000000, 0x00ef6f8c },
11399		{ MAC_MODE, TG3_FL_5705,
11400			0x00000000, 0x01ef6b8c },
11401		{ MAC_STATUS, TG3_FL_NOT_5705,
11402			0x03800107, 0x00000000 },
11403		{ MAC_STATUS, TG3_FL_5705,
11404			0x03800100, 0x00000000 },
11405		{ MAC_ADDR_0_HIGH, 0x0000,
11406			0x00000000, 0x0000ffff },
11407		{ MAC_ADDR_0_LOW, 0x0000,
11408			0x00000000, 0xffffffff },
11409		{ MAC_RX_MTU_SIZE, 0x0000,
11410			0x00000000, 0x0000ffff },
11411		{ MAC_TX_MODE, 0x0000,
11412			0x00000000, 0x00000070 },
11413		{ MAC_TX_LENGTHS, 0x0000,
11414			0x00000000, 0x00003fff },
11415		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11416			0x00000000, 0x000007fc },
11417		{ MAC_RX_MODE, TG3_FL_5705,
11418			0x00000000, 0x000007dc },
11419		{ MAC_HASH_REG_0, 0x0000,
11420			0x00000000, 0xffffffff },
11421		{ MAC_HASH_REG_1, 0x0000,
11422			0x00000000, 0xffffffff },
11423		{ MAC_HASH_REG_2, 0x0000,
11424			0x00000000, 0xffffffff },
11425		{ MAC_HASH_REG_3, 0x0000,
11426			0x00000000, 0xffffffff },
11427
11428		/* Receive Data and Receive BD Initiator Control Registers. */
11429		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11430			0x00000000, 0xffffffff },
11431		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11432			0x00000000, 0xffffffff },
11433		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11434			0x00000000, 0x00000003 },
11435		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11436			0x00000000, 0xffffffff },
11437		{ RCVDBDI_STD_BD+0, 0x0000,
11438			0x00000000, 0xffffffff },
11439		{ RCVDBDI_STD_BD+4, 0x0000,
11440			0x00000000, 0xffffffff },
11441		{ RCVDBDI_STD_BD+8, 0x0000,
11442			0x00000000, 0xffff0002 },
11443		{ RCVDBDI_STD_BD+0xc, 0x0000,
11444			0x00000000, 0xffffffff },
11445
11446		/* Receive BD Initiator Control Registers. */
11447		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11448			0x00000000, 0xffffffff },
11449		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11450			0x00000000, 0x000003ff },
11451		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11452			0x00000000, 0xffffffff },
11453
11454		/* Host Coalescing Control Registers. */
11455		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11456			0x00000000, 0x00000004 },
11457		{ HOSTCC_MODE, TG3_FL_5705,
11458			0x00000000, 0x000000f6 },
11459		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11460			0x00000000, 0xffffffff },
11461		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11462			0x00000000, 0x000003ff },
11463		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11464			0x00000000, 0xffffffff },
11465		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11466			0x00000000, 0x000003ff },
11467		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11468			0x00000000, 0xffffffff },
11469		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11470			0x00000000, 0x000000ff },
11471		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11472			0x00000000, 0xffffffff },
11473		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11474			0x00000000, 0x000000ff },
11475		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11476			0x00000000, 0xffffffff },
11477		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11478			0x00000000, 0xffffffff },
11479		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11480			0x00000000, 0xffffffff },
11481		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11482			0x00000000, 0x000000ff },
11483		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11484			0x00000000, 0xffffffff },
11485		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11486			0x00000000, 0x000000ff },
11487		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11488			0x00000000, 0xffffffff },
11489		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11490			0x00000000, 0xffffffff },
11491		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11492			0x00000000, 0xffffffff },
11493		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11494			0x00000000, 0xffffffff },
11495		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11496			0x00000000, 0xffffffff },
11497		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11498			0xffffffff, 0x00000000 },
11499		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11500			0xffffffff, 0x00000000 },
11501
11502		/* Buffer Manager Control Registers. */
11503		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11504			0x00000000, 0x007fff80 },
11505		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11506			0x00000000, 0x007fffff },
11507		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11508			0x00000000, 0x0000003f },
11509		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11510			0x00000000, 0x000001ff },
11511		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11512			0x00000000, 0x000001ff },
11513		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11514			0xffffffff, 0x00000000 },
11515		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11516			0xffffffff, 0x00000000 },
11517
11518		/* Mailbox Registers */
11519		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11520			0x00000000, 0x000001ff },
11521		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11522			0x00000000, 0x000001ff },
11523		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11524			0x00000000, 0x000007ff },
11525		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11526			0x00000000, 0x000001ff },
11527
11528		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11529	};
11530
11531	is_5705 = is_5750 = 0;
11532	if (tg3_flag(tp, 5705_PLUS)) {
11533		is_5705 = 1;
11534		if (tg3_flag(tp, 5750_PLUS))
11535			is_5750 = 1;
11536	}
11537
11538	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11539		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11540			continue;
11541
11542		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11543			continue;
11544
11545		if (tg3_flag(tp, IS_5788) &&
11546		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11547			continue;
11548
11549		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11550			continue;
11551
11552		offset = (u32) reg_tbl[i].offset;
11553		read_mask = reg_tbl[i].read_mask;
11554		write_mask = reg_tbl[i].write_mask;
11555
11556		/* Save the original register content */
11557		save_val = tr32(offset);
11558
11559		/* Determine the read-only value. */
11560		read_val = save_val & read_mask;
11561
11562		/* Write zero to the register, then make sure the read-only bits
11563		 * are not changed and the read/write bits are all zeros.
11564		 */
11565		tw32(offset, 0);
11566
11567		val = tr32(offset);
11568
11569		/* Test the read-only and read/write bits. */
11570		if (((val & read_mask) != read_val) || (val & write_mask))
11571			goto out;
11572
11573		/* Write ones to all the bits defined by RdMask and WrMask, then
11574		 * make sure the read-only bits are not changed and the
11575		 * read/write bits are all ones.
11576		 */
11577		tw32(offset, read_mask | write_mask);
11578
11579		val = tr32(offset);
11580
11581		/* Test the read-only bits. */
11582		if ((val & read_mask) != read_val)
11583			goto out;
11584
11585		/* Test the read/write bits. */
11586		if ((val & write_mask) != write_mask)
11587			goto out;
11588
11589		tw32(offset, save_val);
11590	}
11591
11592	return 0;
11593
11594out:
11595	if (netif_msg_hw(tp))
11596		netdev_err(tp->dev,
11597			   "Register test failed at offset %x\n", offset);
11598	tw32(offset, save_val);
11599	return -EIO;
11600}
11601
11602static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11603{
11604	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11605	int i;
11606	u32 j;
11607
11608	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11609		for (j = 0; j < len; j += 4) {
11610			u32 val;
11611
11612			tg3_write_mem(tp, offset + j, test_pattern[i]);
11613			tg3_read_mem(tp, offset + j, &val);
11614			if (val != test_pattern[i])
11615				return -EIO;
11616		}
11617	}
11618	return 0;
11619}
11620
11621static int tg3_test_memory(struct tg3 *tp)
11622{
11623	static struct mem_entry {
11624		u32 offset;
11625		u32 len;
11626	} mem_tbl_570x[] = {
11627		{ 0x00000000, 0x00b50},
11628		{ 0x00002000, 0x1c000},
11629		{ 0xffffffff, 0x00000}
11630	}, mem_tbl_5705[] = {
11631		{ 0x00000100, 0x0000c},
11632		{ 0x00000200, 0x00008},
11633		{ 0x00004000, 0x00800},
11634		{ 0x00006000, 0x01000},
11635		{ 0x00008000, 0x02000},
11636		{ 0x00010000, 0x0e000},
11637		{ 0xffffffff, 0x00000}
11638	}, mem_tbl_5755[] = {
11639		{ 0x00000200, 0x00008},
11640		{ 0x00004000, 0x00800},
11641		{ 0x00006000, 0x00800},
11642		{ 0x00008000, 0x02000},
11643		{ 0x00010000, 0x0c000},
11644		{ 0xffffffff, 0x00000}
11645	}, mem_tbl_5906[] = {
11646		{ 0x00000200, 0x00008},
11647		{ 0x00004000, 0x00400},
11648		{ 0x00006000, 0x00400},
11649		{ 0x00008000, 0x01000},
11650		{ 0x00010000, 0x01000},
11651		{ 0xffffffff, 0x00000}
11652	}, mem_tbl_5717[] = {
11653		{ 0x00000200, 0x00008},
11654		{ 0x00010000, 0x0a000},
11655		{ 0x00020000, 0x13c00},
11656		{ 0xffffffff, 0x00000}
11657	}, mem_tbl_57765[] = {
11658		{ 0x00000200, 0x00008},
11659		{ 0x00004000, 0x00800},
11660		{ 0x00006000, 0x09800},
11661		{ 0x00010000, 0x0a000},
11662		{ 0xffffffff, 0x00000}
11663	};
11664	struct mem_entry *mem_tbl;
11665	int err = 0;
11666	int i;
11667
11668	if (tg3_flag(tp, 5717_PLUS))
11669		mem_tbl = mem_tbl_5717;
11670	else if (tg3_flag(tp, 57765_CLASS))
11671		mem_tbl = mem_tbl_57765;
11672	else if (tg3_flag(tp, 5755_PLUS))
11673		mem_tbl = mem_tbl_5755;
11674	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11675		mem_tbl = mem_tbl_5906;
11676	else if (tg3_flag(tp, 5705_PLUS))
11677		mem_tbl = mem_tbl_5705;
11678	else
11679		mem_tbl = mem_tbl_570x;
11680
11681	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11682		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11683		if (err)
11684			break;
11685	}
11686
11687	return err;
11688}
11689
11690#define TG3_TSO_MSS		500
11691
11692#define TG3_TSO_IP_HDR_LEN	20
11693#define TG3_TSO_TCP_HDR_LEN	20
11694#define TG3_TSO_TCP_OPT_LEN	12
11695
11696static const u8 tg3_tso_header[] = {
116970x08, 0x00,
116980x45, 0x00, 0x00, 0x00,
116990x00, 0x00, 0x40, 0x00,
117000x40, 0x06, 0x00, 0x00,
117010x0a, 0x00, 0x00, 0x01,
117020x0a, 0x00, 0x00, 0x02,
117030x0d, 0x00, 0xe0, 0x00,
117040x00, 0x00, 0x01, 0x00,
117050x00, 0x00, 0x02, 0x00,
117060x80, 0x10, 0x10, 0x00,
117070x14, 0x09, 0x00, 0x00,
117080x01, 0x01, 0x08, 0x0a,
117090x11, 0x11, 0x11, 0x11,
117100x11, 0x11, 0x11, 0x11,
11711};
11712
11713static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11714{
11715	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11716	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11717	u32 budget;
11718	struct sk_buff *skb;
11719	u8 *tx_data, *rx_data;
11720	dma_addr_t map;
11721	int num_pkts, tx_len, rx_len, i, err;
11722	struct tg3_rx_buffer_desc *desc;
11723	struct tg3_napi *tnapi, *rnapi;
11724	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11725
11726	tnapi = &tp->napi[0];
11727	rnapi = &tp->napi[0];
11728	if (tp->irq_cnt > 1) {
11729		if (tg3_flag(tp, ENABLE_RSS))
11730			rnapi = &tp->napi[1];
11731		if (tg3_flag(tp, ENABLE_TSS))
11732			tnapi = &tp->napi[1];
11733	}
11734	coal_now = tnapi->coal_now | rnapi->coal_now;
11735
11736	err = -EIO;
11737
11738	tx_len = pktsz;
11739	skb = netdev_alloc_skb(tp->dev, tx_len);
11740	if (!skb)
11741		return -ENOMEM;
11742
11743	tx_data = skb_put(skb, tx_len);
11744	memcpy(tx_data, tp->dev->dev_addr, 6);
11745	memset(tx_data + 6, 0x0, 8);
11746
11747	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11748
11749	if (tso_loopback) {
11750		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11751
11752		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11753			      TG3_TSO_TCP_OPT_LEN;
11754
11755		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11756		       sizeof(tg3_tso_header));
11757		mss = TG3_TSO_MSS;
11758
11759		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11760		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11761
11762		/* Set the total length field in the IP header */
11763		iph->tot_len = htons((u16)(mss + hdr_len));
11764
11765		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11766			      TXD_FLAG_CPU_POST_DMA);
11767
11768		if (tg3_flag(tp, HW_TSO_1) ||
11769		    tg3_flag(tp, HW_TSO_2) ||
11770		    tg3_flag(tp, HW_TSO_3)) {
11771			struct tcphdr *th;
11772			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11773			th = (struct tcphdr *)&tx_data[val];
11774			th->check = 0;
11775		} else
11776			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11777
11778		if (tg3_flag(tp, HW_TSO_3)) {
11779			mss |= (hdr_len & 0xc) << 12;
11780			if (hdr_len & 0x10)
11781				base_flags |= 0x00000010;
11782			base_flags |= (hdr_len & 0x3e0) << 5;
11783		} else if (tg3_flag(tp, HW_TSO_2))
11784			mss |= hdr_len << 9;
11785		else if (tg3_flag(tp, HW_TSO_1) ||
11786			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11787			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11788		} else {
11789			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11790		}
11791
11792		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11793	} else {
11794		num_pkts = 1;
11795		data_off = ETH_HLEN;
11796
11797		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11798		    tx_len > VLAN_ETH_FRAME_LEN)
11799			base_flags |= TXD_FLAG_JMB_PKT;
11800	}
11801
11802	for (i = data_off; i < tx_len; i++)
11803		tx_data[i] = (u8) (i & 0xff);
11804
11805	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11806	if (pci_dma_mapping_error(tp->pdev, map)) {
11807		dev_kfree_skb(skb);
11808		return -EIO;
11809	}
11810
11811	val = tnapi->tx_prod;
11812	tnapi->tx_buffers[val].skb = skb;
11813	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11814
11815	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11816	       rnapi->coal_now);
11817
11818	udelay(10);
11819
11820	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11821
11822	budget = tg3_tx_avail(tnapi);
11823	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11824			    base_flags | TXD_FLAG_END, mss, 0)) {
11825		tnapi->tx_buffers[val].skb = NULL;
11826		dev_kfree_skb(skb);
11827		return -EIO;
11828	}
11829
11830	tnapi->tx_prod++;
11831
11832	/* Sync BD data before updating mailbox */
11833	wmb();
11834
11835	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11836	tr32_mailbox(tnapi->prodmbox);
11837
11838	udelay(10);
11839
11840	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11841	for (i = 0; i < 35; i++) {
11842		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11843		       coal_now);
11844
11845		udelay(10);
11846
11847		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11848		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11849		if ((tx_idx == tnapi->tx_prod) &&
11850		    (rx_idx == (rx_start_idx + num_pkts)))
11851			break;
11852	}
11853
11854	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11855	dev_kfree_skb(skb);
11856
11857	if (tx_idx != tnapi->tx_prod)
11858		goto out;
11859
11860	if (rx_idx != rx_start_idx + num_pkts)
11861		goto out;
11862
11863	val = data_off;
11864	while (rx_idx != rx_start_idx) {
11865		desc = &rnapi->rx_rcb[rx_start_idx++];
11866		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11867		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11868
11869		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11870		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11871			goto out;
11872
11873		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11874			 - ETH_FCS_LEN;
11875
11876		if (!tso_loopback) {
11877			if (rx_len != tx_len)
11878				goto out;
11879
11880			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11881				if (opaque_key != RXD_OPAQUE_RING_STD)
11882					goto out;
11883			} else {
11884				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11885					goto out;
11886			}
11887		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11888			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11889			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11890			goto out;
11891		}
11892
11893		if (opaque_key == RXD_OPAQUE_RING_STD) {
11894			rx_data = tpr->rx_std_buffers[desc_idx].data;
11895			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11896					     mapping);
11897		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11898			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11899			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11900					     mapping);
11901		} else
11902			goto out;
11903
11904		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11905					    PCI_DMA_FROMDEVICE);
11906
11907		rx_data += TG3_RX_OFFSET(tp);
11908		for (i = data_off; i < rx_len; i++, val++) {
11909			if (*(rx_data + i) != (u8) (val & 0xff))
11910				goto out;
11911		}
11912	}
11913
11914	err = 0;
11915
11916	/* tg3_free_rings will unmap and free the rx_data */
11917out:
11918	return err;
11919}
11920
11921#define TG3_STD_LOOPBACK_FAILED		1
11922#define TG3_JMB_LOOPBACK_FAILED		2
11923#define TG3_TSO_LOOPBACK_FAILED		4
11924#define TG3_LOOPBACK_FAILED \
11925	(TG3_STD_LOOPBACK_FAILED | \
11926	 TG3_JMB_LOOPBACK_FAILED | \
11927	 TG3_TSO_LOOPBACK_FAILED)
11928
11929static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11930{
11931	int err = -EIO;
11932	u32 eee_cap;
11933	u32 jmb_pkt_sz = 9000;
11934
11935	if (tp->dma_limit)
11936		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11937
11938	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11939	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11940
11941	if (!netif_running(tp->dev)) {
11942		data[0] = TG3_LOOPBACK_FAILED;
11943		data[1] = TG3_LOOPBACK_FAILED;
11944		if (do_extlpbk)
11945			data[2] = TG3_LOOPBACK_FAILED;
11946		goto done;
11947	}
11948
11949	err = tg3_reset_hw(tp, 1);
11950	if (err) {
11951		data[0] = TG3_LOOPBACK_FAILED;
11952		data[1] = TG3_LOOPBACK_FAILED;
11953		if (do_extlpbk)
11954			data[2] = TG3_LOOPBACK_FAILED;
11955		goto done;
11956	}
11957
11958	if (tg3_flag(tp, ENABLE_RSS)) {
11959		int i;
11960
11961		/* Reroute all rx packets to the 1st queue */
11962		for (i = MAC_RSS_INDIR_TBL_0;
11963		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11964			tw32(i, 0x0);
11965	}
11966
11967	/* HW errata - mac loopback fails in some cases on 5780.
11968	 * Normal traffic and PHY loopback are not affected by
11969	 * errata.  Also, the MAC loopback test is deprecated for
11970	 * all newer ASIC revisions.
11971	 */
11972	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11973	    !tg3_flag(tp, CPMU_PRESENT)) {
11974		tg3_mac_loopback(tp, true);
11975
11976		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11977			data[0] |= TG3_STD_LOOPBACK_FAILED;
11978
11979		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11980		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11981			data[0] |= TG3_JMB_LOOPBACK_FAILED;
11982
11983		tg3_mac_loopback(tp, false);
11984	}
11985
11986	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11987	    !tg3_flag(tp, USE_PHYLIB)) {
11988		int i;
11989
11990		tg3_phy_lpbk_set(tp, 0, false);
11991
11992		/* Wait for link */
11993		for (i = 0; i < 100; i++) {
11994			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11995				break;
11996			mdelay(1);
11997		}
11998
11999		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12000			data[1] |= TG3_STD_LOOPBACK_FAILED;
12001		if (tg3_flag(tp, TSO_CAPABLE) &&
12002		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12003			data[1] |= TG3_TSO_LOOPBACK_FAILED;
12004		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12005		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12006			data[1] |= TG3_JMB_LOOPBACK_FAILED;
12007
12008		if (do_extlpbk) {
12009			tg3_phy_lpbk_set(tp, 0, true);
12010
12011			/* All link indications report up, but the hardware
12012			 * isn't really ready for about 20 msec.  Double it
12013			 * to be sure.
12014			 */
12015			mdelay(40);
12016
12017			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12018				data[2] |= TG3_STD_LOOPBACK_FAILED;
12019			if (tg3_flag(tp, TSO_CAPABLE) &&
12020			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12021				data[2] |= TG3_TSO_LOOPBACK_FAILED;
12022			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12023			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12024				data[2] |= TG3_JMB_LOOPBACK_FAILED;
12025		}
12026
12027		/* Re-enable gphy autopowerdown. */
12028		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12029			tg3_phy_toggle_apd(tp, true);
12030	}
12031
12032	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12033
12034done:
12035	tp->phy_flags |= eee_cap;
12036
12037	return err;
12038}
12039
12040static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12041			  u64 *data)
12042{
12043	struct tg3 *tp = netdev_priv(dev);
12044	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12045
12046	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12047	    tg3_power_up(tp)) {
12048		etest->flags |= ETH_TEST_FL_FAILED;
12049		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12050		return;
12051	}
12052
12053	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12054
12055	if (tg3_test_nvram(tp) != 0) {
12056		etest->flags |= ETH_TEST_FL_FAILED;
12057		data[0] = 1;
12058	}
12059	if (!doextlpbk && tg3_test_link(tp)) {
12060		etest->flags |= ETH_TEST_FL_FAILED;
12061		data[1] = 1;
12062	}
12063	if (etest->flags & ETH_TEST_FL_OFFLINE) {
12064		int err, err2 = 0, irq_sync = 0;
12065
12066		if (netif_running(dev)) {
12067			tg3_phy_stop(tp);
12068			tg3_netif_stop(tp);
12069			irq_sync = 1;
12070		}
12071
12072		tg3_full_lock(tp, irq_sync);
12073
12074		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12075		err = tg3_nvram_lock(tp);
12076		tg3_halt_cpu(tp, RX_CPU_BASE);
12077		if (!tg3_flag(tp, 5705_PLUS))
12078			tg3_halt_cpu(tp, TX_CPU_BASE);
12079		if (!err)
12080			tg3_nvram_unlock(tp);
12081
12082		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12083			tg3_phy_reset(tp);
12084
12085		if (tg3_test_registers(tp) != 0) {
12086			etest->flags |= ETH_TEST_FL_FAILED;
12087			data[2] = 1;
12088		}
12089
12090		if (tg3_test_memory(tp) != 0) {
12091			etest->flags |= ETH_TEST_FL_FAILED;
12092			data[3] = 1;
12093		}
12094
12095		if (doextlpbk)
12096			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12097
12098		if (tg3_test_loopback(tp, &data[4], doextlpbk))
12099			etest->flags |= ETH_TEST_FL_FAILED;
12100
12101		tg3_full_unlock(tp);
12102
12103		if (tg3_test_interrupt(tp) != 0) {
12104			etest->flags |= ETH_TEST_FL_FAILED;
12105			data[7] = 1;
12106		}
12107
12108		tg3_full_lock(tp, 0);
12109
12110		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12111		if (netif_running(dev)) {
12112			tg3_flag_set(tp, INIT_COMPLETE);
12113			err2 = tg3_restart_hw(tp, 1);
12114			if (!err2)
12115				tg3_netif_start(tp);
12116		}
12117
12118		tg3_full_unlock(tp);
12119
12120		if (irq_sync && !err2)
12121			tg3_phy_start(tp);
12122	}
12123	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12124		tg3_power_down(tp);
12125
12126}
12127
12128static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12129{
12130	struct mii_ioctl_data *data = if_mii(ifr);
12131	struct tg3 *tp = netdev_priv(dev);
12132	int err;
12133
12134	if (tg3_flag(tp, USE_PHYLIB)) {
12135		struct phy_device *phydev;
12136		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12137			return -EAGAIN;
12138		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12139		return phy_mii_ioctl(phydev, ifr, cmd);
12140	}
12141
12142	switch (cmd) {
12143	case SIOCGMIIPHY:
12144		data->phy_id = tp->phy_addr;
12145
12146		/* fallthru */
12147	case SIOCGMIIREG: {
12148		u32 mii_regval;
12149
12150		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12151			break;			/* We have no PHY */
12152
12153		if (!netif_running(dev))
12154			return -EAGAIN;
12155
12156		spin_lock_bh(&tp->lock);
12157		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12158		spin_unlock_bh(&tp->lock);
12159
12160		data->val_out = mii_regval;
12161
12162		return err;
12163	}
12164
12165	case SIOCSMIIREG:
12166		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12167			break;			/* We have no PHY */
12168
12169		if (!netif_running(dev))
12170			return -EAGAIN;
12171
12172		spin_lock_bh(&tp->lock);
12173		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12174		spin_unlock_bh(&tp->lock);
12175
12176		return err;
12177
12178	default:
12179		/* do nothing */
12180		break;
12181	}
12182	return -EOPNOTSUPP;
12183}
12184
12185static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12186{
12187	struct tg3 *tp = netdev_priv(dev);
12188
12189	memcpy(ec, &tp->coal, sizeof(*ec));
12190	return 0;
12191}
12192
12193static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12194{
12195	struct tg3 *tp = netdev_priv(dev);
12196	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12197	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12198
12199	if (!tg3_flag(tp, 5705_PLUS)) {
12200		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12201		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12202		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12203		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12204	}
12205
12206	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12207	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12208	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12209	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12210	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12211	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12212	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12213	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12214	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12215	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12216		return -EINVAL;
12217
12218	/* No rx interrupts will be generated if both are zero */
12219	if ((ec->rx_coalesce_usecs == 0) &&
12220	    (ec->rx_max_coalesced_frames == 0))
12221		return -EINVAL;
12222
12223	/* No tx interrupts will be generated if both are zero */
12224	if ((ec->tx_coalesce_usecs == 0) &&
12225	    (ec->tx_max_coalesced_frames == 0))
12226		return -EINVAL;
12227
12228	/* Only copy relevant parameters, ignore all others. */
12229	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12230	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12231	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12232	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12233	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12234	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12235	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12236	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12237	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12238
12239	if (netif_running(dev)) {
12240		tg3_full_lock(tp, 0);
12241		__tg3_set_coalesce(tp, &tp->coal);
12242		tg3_full_unlock(tp);
12243	}
12244	return 0;
12245}
12246
12247static const struct ethtool_ops tg3_ethtool_ops = {
12248	.get_settings		= tg3_get_settings,
12249	.set_settings		= tg3_set_settings,
12250	.get_drvinfo		= tg3_get_drvinfo,
12251	.get_regs_len		= tg3_get_regs_len,
12252	.get_regs		= tg3_get_regs,
12253	.get_wol		= tg3_get_wol,
12254	.set_wol		= tg3_set_wol,
12255	.get_msglevel		= tg3_get_msglevel,
12256	.set_msglevel		= tg3_set_msglevel,
12257	.nway_reset		= tg3_nway_reset,
12258	.get_link		= ethtool_op_get_link,
12259	.get_eeprom_len		= tg3_get_eeprom_len,
12260	.get_eeprom		= tg3_get_eeprom,
12261	.set_eeprom		= tg3_set_eeprom,
12262	.get_ringparam		= tg3_get_ringparam,
12263	.set_ringparam		= tg3_set_ringparam,
12264	.get_pauseparam		= tg3_get_pauseparam,
12265	.set_pauseparam		= tg3_set_pauseparam,
12266	.self_test		= tg3_self_test,
12267	.get_strings		= tg3_get_strings,
12268	.set_phys_id		= tg3_set_phys_id,
12269	.get_ethtool_stats	= tg3_get_ethtool_stats,
12270	.get_coalesce		= tg3_get_coalesce,
12271	.set_coalesce		= tg3_set_coalesce,
12272	.get_sset_count		= tg3_get_sset_count,
12273	.get_rxnfc		= tg3_get_rxnfc,
12274	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12275	.get_rxfh_indir		= tg3_get_rxfh_indir,
12276	.set_rxfh_indir		= tg3_set_rxfh_indir,
12277	.get_ts_info		= ethtool_op_get_ts_info,
12278};
12279
12280static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12281						struct rtnl_link_stats64 *stats)
12282{
12283	struct tg3 *tp = netdev_priv(dev);
12284
12285	spin_lock_bh(&tp->lock);
12286	if (!tp->hw_stats) {
12287		spin_unlock_bh(&tp->lock);
12288		return &tp->net_stats_prev;
12289	}
12290
12291	tg3_get_nstats(tp, stats);
12292	spin_unlock_bh(&tp->lock);
12293
12294	return stats;
12295}
12296
12297static void tg3_set_rx_mode(struct net_device *dev)
12298{
12299	struct tg3 *tp = netdev_priv(dev);
12300
12301	if (!netif_running(dev))
12302		return;
12303
12304	tg3_full_lock(tp, 0);
12305	__tg3_set_rx_mode(dev);
12306	tg3_full_unlock(tp);
12307}
12308
12309static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12310			       int new_mtu)
12311{
12312	dev->mtu = new_mtu;
12313
12314	if (new_mtu > ETH_DATA_LEN) {
12315		if (tg3_flag(tp, 5780_CLASS)) {
12316			netdev_update_features(dev);
12317			tg3_flag_clear(tp, TSO_CAPABLE);
12318		} else {
12319			tg3_flag_set(tp, JUMBO_RING_ENABLE);
12320		}
12321	} else {
12322		if (tg3_flag(tp, 5780_CLASS)) {
12323			tg3_flag_set(tp, TSO_CAPABLE);
12324			netdev_update_features(dev);
12325		}
12326		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12327	}
12328}
12329
12330static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12331{
12332	struct tg3 *tp = netdev_priv(dev);
12333	int err, reset_phy = 0;
12334
12335	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12336		return -EINVAL;
12337
12338	if (!netif_running(dev)) {
12339		/* We'll just catch it later when the
12340		 * device is up'd.
12341		 */
12342		tg3_set_mtu(dev, tp, new_mtu);
12343		return 0;
12344	}
12345
12346	tg3_phy_stop(tp);
12347
12348	tg3_netif_stop(tp);
12349
12350	tg3_full_lock(tp, 1);
12351
12352	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12353
12354	tg3_set_mtu(dev, tp, new_mtu);
12355
12356	/* Reset PHY, otherwise the read DMA engine will be in a mode that
12357	 * breaks all requests to 256 bytes.
12358	 */
12359	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12360		reset_phy = 1;
12361
12362	err = tg3_restart_hw(tp, reset_phy);
12363
12364	if (!err)
12365		tg3_netif_start(tp);
12366
12367	tg3_full_unlock(tp);
12368
12369	if (!err)
12370		tg3_phy_start(tp);
12371
12372	return err;
12373}
12374
12375static const struct net_device_ops tg3_netdev_ops = {
12376	.ndo_open		= tg3_open,
12377	.ndo_stop		= tg3_close,
12378	.ndo_start_xmit		= tg3_start_xmit,
12379	.ndo_get_stats64	= tg3_get_stats64,
12380	.ndo_validate_addr	= eth_validate_addr,
12381	.ndo_set_rx_mode	= tg3_set_rx_mode,
12382	.ndo_set_mac_address	= tg3_set_mac_addr,
12383	.ndo_do_ioctl		= tg3_ioctl,
12384	.ndo_tx_timeout		= tg3_tx_timeout,
12385	.ndo_change_mtu		= tg3_change_mtu,
12386	.ndo_fix_features	= tg3_fix_features,
12387	.ndo_set_features	= tg3_set_features,
12388#ifdef CONFIG_NET_POLL_CONTROLLER
12389	.ndo_poll_controller	= tg3_poll_controller,
12390#endif
12391};
12392
12393static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12394{
12395	u32 cursize, val, magic;
12396
12397	tp->nvram_size = EEPROM_CHIP_SIZE;
12398
12399	if (tg3_nvram_read(tp, 0, &magic) != 0)
12400		return;
12401
12402	if ((magic != TG3_EEPROM_MAGIC) &&
12403	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12404	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12405		return;
12406
12407	/*
12408	 * Size the chip by reading offsets at increasing powers of two.
12409	 * When we encounter our validation signature, we know the addressing
12410	 * has wrapped around, and thus have our chip size.
12411	 */
12412	cursize = 0x10;
12413
12414	while (cursize < tp->nvram_size) {
12415		if (tg3_nvram_read(tp, cursize, &val) != 0)
12416			return;
12417
12418		if (val == magic)
12419			break;
12420
12421		cursize <<= 1;
12422	}
12423
12424	tp->nvram_size = cursize;
12425}
12426
12427static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12428{
12429	u32 val;
12430
12431	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12432		return;
12433
12434	/* Selfboot format */
12435	if (val != TG3_EEPROM_MAGIC) {
12436		tg3_get_eeprom_size(tp);
12437		return;
12438	}
12439
12440	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12441		if (val != 0) {
12442			/* This is confusing.  We want to operate on the
12443			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12444			 * call will read from NVRAM and byteswap the data
12445			 * according to the byteswapping settings for all
12446			 * other register accesses.  This ensures the data we
12447			 * want will always reside in the lower 16-bits.
12448			 * However, the data in NVRAM is in LE format, which
12449			 * means the data from the NVRAM read will always be
12450			 * opposite the endianness of the CPU.  The 16-bit
12451			 * byteswap then brings the data to CPU endianness.
12452			 */
12453			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12454			return;
12455		}
12456	}
12457	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12458}
12459
12460static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12461{
12462	u32 nvcfg1;
12463
12464	nvcfg1 = tr32(NVRAM_CFG1);
12465	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12466		tg3_flag_set(tp, FLASH);
12467	} else {
12468		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12469		tw32(NVRAM_CFG1, nvcfg1);
12470	}
12471
12472	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12473	    tg3_flag(tp, 5780_CLASS)) {
12474		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12475		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12476			tp->nvram_jedecnum = JEDEC_ATMEL;
12477			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12478			tg3_flag_set(tp, NVRAM_BUFFERED);
12479			break;
12480		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12481			tp->nvram_jedecnum = JEDEC_ATMEL;
12482			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12483			break;
12484		case FLASH_VENDOR_ATMEL_EEPROM:
12485			tp->nvram_jedecnum = JEDEC_ATMEL;
12486			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12487			tg3_flag_set(tp, NVRAM_BUFFERED);
12488			break;
12489		case FLASH_VENDOR_ST:
12490			tp->nvram_jedecnum = JEDEC_ST;
12491			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12492			tg3_flag_set(tp, NVRAM_BUFFERED);
12493			break;
12494		case FLASH_VENDOR_SAIFUN:
12495			tp->nvram_jedecnum = JEDEC_SAIFUN;
12496			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12497			break;
12498		case FLASH_VENDOR_SST_SMALL:
12499		case FLASH_VENDOR_SST_LARGE:
12500			tp->nvram_jedecnum = JEDEC_SST;
12501			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12502			break;
12503		}
12504	} else {
12505		tp->nvram_jedecnum = JEDEC_ATMEL;
12506		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12507		tg3_flag_set(tp, NVRAM_BUFFERED);
12508	}
12509}
12510
12511static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12512{
12513	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12514	case FLASH_5752PAGE_SIZE_256:
12515		tp->nvram_pagesize = 256;
12516		break;
12517	case FLASH_5752PAGE_SIZE_512:
12518		tp->nvram_pagesize = 512;
12519		break;
12520	case FLASH_5752PAGE_SIZE_1K:
12521		tp->nvram_pagesize = 1024;
12522		break;
12523	case FLASH_5752PAGE_SIZE_2K:
12524		tp->nvram_pagesize = 2048;
12525		break;
12526	case FLASH_5752PAGE_SIZE_4K:
12527		tp->nvram_pagesize = 4096;
12528		break;
12529	case FLASH_5752PAGE_SIZE_264:
12530		tp->nvram_pagesize = 264;
12531		break;
12532	case FLASH_5752PAGE_SIZE_528:
12533		tp->nvram_pagesize = 528;
12534		break;
12535	}
12536}
12537
12538static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12539{
12540	u32 nvcfg1;
12541
12542	nvcfg1 = tr32(NVRAM_CFG1);
12543
12544	/* NVRAM protection for TPM */
12545	if (nvcfg1 & (1 << 27))
12546		tg3_flag_set(tp, PROTECTED_NVRAM);
12547
12548	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12549	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12550	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12551		tp->nvram_jedecnum = JEDEC_ATMEL;
12552		tg3_flag_set(tp, NVRAM_BUFFERED);
12553		break;
12554	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12555		tp->nvram_jedecnum = JEDEC_ATMEL;
12556		tg3_flag_set(tp, NVRAM_BUFFERED);
12557		tg3_flag_set(tp, FLASH);
12558		break;
12559	case FLASH_5752VENDOR_ST_M45PE10:
12560	case FLASH_5752VENDOR_ST_M45PE20:
12561	case FLASH_5752VENDOR_ST_M45PE40:
12562		tp->nvram_jedecnum = JEDEC_ST;
12563		tg3_flag_set(tp, NVRAM_BUFFERED);
12564		tg3_flag_set(tp, FLASH);
12565		break;
12566	}
12567
12568	if (tg3_flag(tp, FLASH)) {
12569		tg3_nvram_get_pagesize(tp, nvcfg1);
12570	} else {
12571		/* For eeprom, set pagesize to maximum eeprom size */
12572		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12573
12574		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12575		tw32(NVRAM_CFG1, nvcfg1);
12576	}
12577}
12578
12579static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12580{
12581	u32 nvcfg1, protect = 0;
12582
12583	nvcfg1 = tr32(NVRAM_CFG1);
12584
12585	/* NVRAM protection for TPM */
12586	if (nvcfg1 & (1 << 27)) {
12587		tg3_flag_set(tp, PROTECTED_NVRAM);
12588		protect = 1;
12589	}
12590
12591	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12592	switch (nvcfg1) {
12593	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12594	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12595	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12596	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12597		tp->nvram_jedecnum = JEDEC_ATMEL;
12598		tg3_flag_set(tp, NVRAM_BUFFERED);
12599		tg3_flag_set(tp, FLASH);
12600		tp->nvram_pagesize = 264;
12601		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12602		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12603			tp->nvram_size = (protect ? 0x3e200 :
12604					  TG3_NVRAM_SIZE_512KB);
12605		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12606			tp->nvram_size = (protect ? 0x1f200 :
12607					  TG3_NVRAM_SIZE_256KB);
12608		else
12609			tp->nvram_size = (protect ? 0x1f200 :
12610					  TG3_NVRAM_SIZE_128KB);
12611		break;
12612	case FLASH_5752VENDOR_ST_M45PE10:
12613	case FLASH_5752VENDOR_ST_M45PE20:
12614	case FLASH_5752VENDOR_ST_M45PE40:
12615		tp->nvram_jedecnum = JEDEC_ST;
12616		tg3_flag_set(tp, NVRAM_BUFFERED);
12617		tg3_flag_set(tp, FLASH);
12618		tp->nvram_pagesize = 256;
12619		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12620			tp->nvram_size = (protect ?
12621					  TG3_NVRAM_SIZE_64KB :
12622					  TG3_NVRAM_SIZE_128KB);
12623		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12624			tp->nvram_size = (protect ?
12625					  TG3_NVRAM_SIZE_64KB :
12626					  TG3_NVRAM_SIZE_256KB);
12627		else
12628			tp->nvram_size = (protect ?
12629					  TG3_NVRAM_SIZE_128KB :
12630					  TG3_NVRAM_SIZE_512KB);
12631		break;
12632	}
12633}
12634
12635static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12636{
12637	u32 nvcfg1;
12638
12639	nvcfg1 = tr32(NVRAM_CFG1);
12640
12641	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12642	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12643	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12644	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12645	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12646		tp->nvram_jedecnum = JEDEC_ATMEL;
12647		tg3_flag_set(tp, NVRAM_BUFFERED);
12648		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12649
12650		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12651		tw32(NVRAM_CFG1, nvcfg1);
12652		break;
12653	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12654	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12655	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12656	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12657		tp->nvram_jedecnum = JEDEC_ATMEL;
12658		tg3_flag_set(tp, NVRAM_BUFFERED);
12659		tg3_flag_set(tp, FLASH);
12660		tp->nvram_pagesize = 264;
12661		break;
12662	case FLASH_5752VENDOR_ST_M45PE10:
12663	case FLASH_5752VENDOR_ST_M45PE20:
12664	case FLASH_5752VENDOR_ST_M45PE40:
12665		tp->nvram_jedecnum = JEDEC_ST;
12666		tg3_flag_set(tp, NVRAM_BUFFERED);
12667		tg3_flag_set(tp, FLASH);
12668		tp->nvram_pagesize = 256;
12669		break;
12670	}
12671}
12672
12673static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12674{
12675	u32 nvcfg1, protect = 0;
12676
12677	nvcfg1 = tr32(NVRAM_CFG1);
12678
12679	/* NVRAM protection for TPM */
12680	if (nvcfg1 & (1 << 27)) {
12681		tg3_flag_set(tp, PROTECTED_NVRAM);
12682		protect = 1;
12683	}
12684
12685	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12686	switch (nvcfg1) {
12687	case FLASH_5761VENDOR_ATMEL_ADB021D:
12688	case FLASH_5761VENDOR_ATMEL_ADB041D:
12689	case FLASH_5761VENDOR_ATMEL_ADB081D:
12690	case FLASH_5761VENDOR_ATMEL_ADB161D:
12691	case FLASH_5761VENDOR_ATMEL_MDB021D:
12692	case FLASH_5761VENDOR_ATMEL_MDB041D:
12693	case FLASH_5761VENDOR_ATMEL_MDB081D:
12694	case FLASH_5761VENDOR_ATMEL_MDB161D:
12695		tp->nvram_jedecnum = JEDEC_ATMEL;
12696		tg3_flag_set(tp, NVRAM_BUFFERED);
12697		tg3_flag_set(tp, FLASH);
12698		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12699		tp->nvram_pagesize = 256;
12700		break;
12701	case FLASH_5761VENDOR_ST_A_M45PE20:
12702	case FLASH_5761VENDOR_ST_A_M45PE40:
12703	case FLASH_5761VENDOR_ST_A_M45PE80:
12704	case FLASH_5761VENDOR_ST_A_M45PE16:
12705	case FLASH_5761VENDOR_ST_M_M45PE20:
12706	case FLASH_5761VENDOR_ST_M_M45PE40:
12707	case FLASH_5761VENDOR_ST_M_M45PE80:
12708	case FLASH_5761VENDOR_ST_M_M45PE16:
12709		tp->nvram_jedecnum = JEDEC_ST;
12710		tg3_flag_set(tp, NVRAM_BUFFERED);
12711		tg3_flag_set(tp, FLASH);
12712		tp->nvram_pagesize = 256;
12713		break;
12714	}
12715
12716	if (protect) {
12717		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12718	} else {
12719		switch (nvcfg1) {
12720		case FLASH_5761VENDOR_ATMEL_ADB161D:
12721		case FLASH_5761VENDOR_ATMEL_MDB161D:
12722		case FLASH_5761VENDOR_ST_A_M45PE16:
12723		case FLASH_5761VENDOR_ST_M_M45PE16:
12724			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12725			break;
12726		case FLASH_5761VENDOR_ATMEL_ADB081D:
12727		case FLASH_5761VENDOR_ATMEL_MDB081D:
12728		case FLASH_5761VENDOR_ST_A_M45PE80:
12729		case FLASH_5761VENDOR_ST_M_M45PE80:
12730			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12731			break;
12732		case FLASH_5761VENDOR_ATMEL_ADB041D:
12733		case FLASH_5761VENDOR_ATMEL_MDB041D:
12734		case FLASH_5761VENDOR_ST_A_M45PE40:
12735		case FLASH_5761VENDOR_ST_M_M45PE40:
12736			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12737			break;
12738		case FLASH_5761VENDOR_ATMEL_ADB021D:
12739		case FLASH_5761VENDOR_ATMEL_MDB021D:
12740		case FLASH_5761VENDOR_ST_A_M45PE20:
12741		case FLASH_5761VENDOR_ST_M_M45PE20:
12742			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12743			break;
12744		}
12745	}
12746}
12747
12748static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12749{
12750	tp->nvram_jedecnum = JEDEC_ATMEL;
12751	tg3_flag_set(tp, NVRAM_BUFFERED);
12752	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12753}
12754
12755static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12756{
12757	u32 nvcfg1;
12758
12759	nvcfg1 = tr32(NVRAM_CFG1);
12760
12761	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12762	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12763	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12764		tp->nvram_jedecnum = JEDEC_ATMEL;
12765		tg3_flag_set(tp, NVRAM_BUFFERED);
12766		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12767
12768		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12769		tw32(NVRAM_CFG1, nvcfg1);
12770		return;
12771	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12772	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12773	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12774	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12775	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12776	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12777	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12778		tp->nvram_jedecnum = JEDEC_ATMEL;
12779		tg3_flag_set(tp, NVRAM_BUFFERED);
12780		tg3_flag_set(tp, FLASH);
12781
12782		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12783		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12784		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12785		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12786			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12787			break;
12788		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12789		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12790			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12791			break;
12792		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12793		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12794			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12795			break;
12796		}
12797		break;
12798	case FLASH_5752VENDOR_ST_M45PE10:
12799	case FLASH_5752VENDOR_ST_M45PE20:
12800	case FLASH_5752VENDOR_ST_M45PE40:
12801		tp->nvram_jedecnum = JEDEC_ST;
12802		tg3_flag_set(tp, NVRAM_BUFFERED);
12803		tg3_flag_set(tp, FLASH);
12804
12805		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12806		case FLASH_5752VENDOR_ST_M45PE10:
12807			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12808			break;
12809		case FLASH_5752VENDOR_ST_M45PE20:
12810			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12811			break;
12812		case FLASH_5752VENDOR_ST_M45PE40:
12813			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12814			break;
12815		}
12816		break;
12817	default:
12818		tg3_flag_set(tp, NO_NVRAM);
12819		return;
12820	}
12821
12822	tg3_nvram_get_pagesize(tp, nvcfg1);
12823	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12824		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12825}
12826
12827
12828static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12829{
12830	u32 nvcfg1;
12831
12832	nvcfg1 = tr32(NVRAM_CFG1);
12833
12834	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12835	case FLASH_5717VENDOR_ATMEL_EEPROM:
12836	case FLASH_5717VENDOR_MICRO_EEPROM:
12837		tp->nvram_jedecnum = JEDEC_ATMEL;
12838		tg3_flag_set(tp, NVRAM_BUFFERED);
12839		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12840
12841		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12842		tw32(NVRAM_CFG1, nvcfg1);
12843		return;
12844	case FLASH_5717VENDOR_ATMEL_MDB011D:
12845	case FLASH_5717VENDOR_ATMEL_ADB011B:
12846	case FLASH_5717VENDOR_ATMEL_ADB011D:
12847	case FLASH_5717VENDOR_ATMEL_MDB021D:
12848	case FLASH_5717VENDOR_ATMEL_ADB021B:
12849	case FLASH_5717VENDOR_ATMEL_ADB021D:
12850	case FLASH_5717VENDOR_ATMEL_45USPT:
12851		tp->nvram_jedecnum = JEDEC_ATMEL;
12852		tg3_flag_set(tp, NVRAM_BUFFERED);
12853		tg3_flag_set(tp, FLASH);
12854
12855		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12856		case FLASH_5717VENDOR_ATMEL_MDB021D:
12857			/* Detect size with tg3_nvram_get_size() */
12858			break;
12859		case FLASH_5717VENDOR_ATMEL_ADB021B:
12860		case FLASH_5717VENDOR_ATMEL_ADB021D:
12861			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12862			break;
12863		default:
12864			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12865			break;
12866		}
12867		break;
12868	case FLASH_5717VENDOR_ST_M_M25PE10:
12869	case FLASH_5717VENDOR_ST_A_M25PE10:
12870	case FLASH_5717VENDOR_ST_M_M45PE10:
12871	case FLASH_5717VENDOR_ST_A_M45PE10:
12872	case FLASH_5717VENDOR_ST_M_M25PE20:
12873	case FLASH_5717VENDOR_ST_A_M25PE20:
12874	case FLASH_5717VENDOR_ST_M_M45PE20:
12875	case FLASH_5717VENDOR_ST_A_M45PE20:
12876	case FLASH_5717VENDOR_ST_25USPT:
12877	case FLASH_5717VENDOR_ST_45USPT:
12878		tp->nvram_jedecnum = JEDEC_ST;
12879		tg3_flag_set(tp, NVRAM_BUFFERED);
12880		tg3_flag_set(tp, FLASH);
12881
12882		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12883		case FLASH_5717VENDOR_ST_M_M25PE20:
12884		case FLASH_5717VENDOR_ST_M_M45PE20:
12885			/* Detect size with tg3_nvram_get_size() */
12886			break;
12887		case FLASH_5717VENDOR_ST_A_M25PE20:
12888		case FLASH_5717VENDOR_ST_A_M45PE20:
12889			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12890			break;
12891		default:
12892			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12893			break;
12894		}
12895		break;
12896	default:
12897		tg3_flag_set(tp, NO_NVRAM);
12898		return;
12899	}
12900
12901	tg3_nvram_get_pagesize(tp, nvcfg1);
12902	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12903		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12904}
12905
12906static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12907{
12908	u32 nvcfg1, nvmpinstrp;
12909
12910	nvcfg1 = tr32(NVRAM_CFG1);
12911	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12912
12913	switch (nvmpinstrp) {
12914	case FLASH_5720_EEPROM_HD:
12915	case FLASH_5720_EEPROM_LD:
12916		tp->nvram_jedecnum = JEDEC_ATMEL;
12917		tg3_flag_set(tp, NVRAM_BUFFERED);
12918
12919		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12920		tw32(NVRAM_CFG1, nvcfg1);
12921		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12922			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12923		else
12924			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12925		return;
12926	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12927	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12928	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12929	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12930	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12931	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12932	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12933	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12934	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12935	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12936	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12937	case FLASH_5720VENDOR_ATMEL_45USPT:
12938		tp->nvram_jedecnum = JEDEC_ATMEL;
12939		tg3_flag_set(tp, NVRAM_BUFFERED);
12940		tg3_flag_set(tp, FLASH);
12941
12942		switch (nvmpinstrp) {
12943		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12944		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12945		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12946			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12947			break;
12948		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12949		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12950		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12951			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12952			break;
12953		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12954		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12955			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12956			break;
12957		default:
12958			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12959			break;
12960		}
12961		break;
12962	case FLASH_5720VENDOR_M_ST_M25PE10:
12963	case FLASH_5720VENDOR_M_ST_M45PE10:
12964	case FLASH_5720VENDOR_A_ST_M25PE10:
12965	case FLASH_5720VENDOR_A_ST_M45PE10:
12966	case FLASH_5720VENDOR_M_ST_M25PE20:
12967	case FLASH_5720VENDOR_M_ST_M45PE20:
12968	case FLASH_5720VENDOR_A_ST_M25PE20:
12969	case FLASH_5720VENDOR_A_ST_M45PE20:
12970	case FLASH_5720VENDOR_M_ST_M25PE40:
12971	case FLASH_5720VENDOR_M_ST_M45PE40:
12972	case FLASH_5720VENDOR_A_ST_M25PE40:
12973	case FLASH_5720VENDOR_A_ST_M45PE40:
12974	case FLASH_5720VENDOR_M_ST_M25PE80:
12975	case FLASH_5720VENDOR_M_ST_M45PE80:
12976	case FLASH_5720VENDOR_A_ST_M25PE80:
12977	case FLASH_5720VENDOR_A_ST_M45PE80:
12978	case FLASH_5720VENDOR_ST_25USPT:
12979	case FLASH_5720VENDOR_ST_45USPT:
12980		tp->nvram_jedecnum = JEDEC_ST;
12981		tg3_flag_set(tp, NVRAM_BUFFERED);
12982		tg3_flag_set(tp, FLASH);
12983
12984		switch (nvmpinstrp) {
12985		case FLASH_5720VENDOR_M_ST_M25PE20:
12986		case FLASH_5720VENDOR_M_ST_M45PE20:
12987		case FLASH_5720VENDOR_A_ST_M25PE20:
12988		case FLASH_5720VENDOR_A_ST_M45PE20:
12989			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12990			break;
12991		case FLASH_5720VENDOR_M_ST_M25PE40:
12992		case FLASH_5720VENDOR_M_ST_M45PE40:
12993		case FLASH_5720VENDOR_A_ST_M25PE40:
12994		case FLASH_5720VENDOR_A_ST_M45PE40:
12995			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12996			break;
12997		case FLASH_5720VENDOR_M_ST_M25PE80:
12998		case FLASH_5720VENDOR_M_ST_M45PE80:
12999		case FLASH_5720VENDOR_A_ST_M25PE80:
13000		case FLASH_5720VENDOR_A_ST_M45PE80:
13001			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13002			break;
13003		default:
13004			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13005			break;
13006		}
13007		break;
13008	default:
13009		tg3_flag_set(tp, NO_NVRAM);
13010		return;
13011	}
13012
13013	tg3_nvram_get_pagesize(tp, nvcfg1);
13014	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13015		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13016}
13017
13018/* Chips other than 5700/5701 use the NVRAM for fetching info. */
13019static void __devinit tg3_nvram_init(struct tg3 *tp)
13020{
13021	tw32_f(GRC_EEPROM_ADDR,
13022	     (EEPROM_ADDR_FSM_RESET |
13023	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
13024	       EEPROM_ADDR_CLKPERD_SHIFT)));
13025
13026	msleep(1);
13027
13028	/* Enable seeprom accesses. */
13029	tw32_f(GRC_LOCAL_CTRL,
13030	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13031	udelay(100);
13032
13033	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13034	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13035		tg3_flag_set(tp, NVRAM);
13036
13037		if (tg3_nvram_lock(tp)) {
13038			netdev_warn(tp->dev,
13039				    "Cannot get nvram lock, %s failed\n",
13040				    __func__);
13041			return;
13042		}
13043		tg3_enable_nvram_access(tp);
13044
13045		tp->nvram_size = 0;
13046
13047		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13048			tg3_get_5752_nvram_info(tp);
13049		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13050			tg3_get_5755_nvram_info(tp);
13051		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13052			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13053			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13054			tg3_get_5787_nvram_info(tp);
13055		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13056			tg3_get_5761_nvram_info(tp);
13057		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13058			tg3_get_5906_nvram_info(tp);
13059		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13060			 tg3_flag(tp, 57765_CLASS))
13061			tg3_get_57780_nvram_info(tp);
13062		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13063			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13064			tg3_get_5717_nvram_info(tp);
13065		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13066			tg3_get_5720_nvram_info(tp);
13067		else
13068			tg3_get_nvram_info(tp);
13069
13070		if (tp->nvram_size == 0)
13071			tg3_get_nvram_size(tp);
13072
13073		tg3_disable_nvram_access(tp);
13074		tg3_nvram_unlock(tp);
13075
13076	} else {
13077		tg3_flag_clear(tp, NVRAM);
13078		tg3_flag_clear(tp, NVRAM_BUFFERED);
13079
13080		tg3_get_eeprom_size(tp);
13081	}
13082}
13083
13084struct subsys_tbl_ent {
13085	u16 subsys_vendor, subsys_devid;
13086	u32 phy_id;
13087};
13088
13089static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13090	/* Broadcom boards. */
13091	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13092	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13093	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13094	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13095	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13096	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13097	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13098	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13099	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13100	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13101	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13102	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13103	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13104	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13105	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13106	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13107	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13108	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13109	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13110	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13111	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
13112	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13113
13114	/* 3com boards. */
13115	{ TG3PCI_SUBVENDOR_ID_3COM,
13116	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13117	{ TG3PCI_SUBVENDOR_ID_3COM,
13118	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13119	{ TG3PCI_SUBVENDOR_ID_3COM,
13120	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13121	{ TG3PCI_SUBVENDOR_ID_3COM,
13122	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13123	{ TG3PCI_SUBVENDOR_ID_3COM,
13124	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13125
13126	/* DELL boards. */
13127	{ TG3PCI_SUBVENDOR_ID_DELL,
13128	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13129	{ TG3PCI_SUBVENDOR_ID_DELL,
13130	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13131	{ TG3PCI_SUBVENDOR_ID_DELL,
13132	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13133	{ TG3PCI_SUBVENDOR_ID_DELL,
13134	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13135
13136	/* Compaq boards. */
13137	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13138	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13139	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13140	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13141	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13142	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13143	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13144	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13145	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
13146	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13147
13148	/* IBM boards. */
13149	{ TG3PCI_SUBVENDOR_ID_IBM,
13150	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13151};
13152
13153static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13154{
13155	int i;
13156
13157	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13158		if ((subsys_id_to_phy_id[i].subsys_vendor ==
13159		     tp->pdev->subsystem_vendor) &&
13160		    (subsys_id_to_phy_id[i].subsys_devid ==
13161		     tp->pdev->subsystem_device))
13162			return &subsys_id_to_phy_id[i];
13163	}
13164	return NULL;
13165}
13166
13167static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13168{
13169	u32 val;
13170
13171	tp->phy_id = TG3_PHY_ID_INVALID;
13172	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13173
13174	/* Assume an onboard device and WOL capable by default.  */
13175	tg3_flag_set(tp, EEPROM_WRITE_PROT);
13176	tg3_flag_set(tp, WOL_CAP);
13177
13178	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13179		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13180			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13181			tg3_flag_set(tp, IS_NIC);
13182		}
13183		val = tr32(VCPU_CFGSHDW);
13184		if (val & VCPU_CFGSHDW_ASPM_DBNC)
13185			tg3_flag_set(tp, ASPM_WORKAROUND);
13186		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13187		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13188			tg3_flag_set(tp, WOL_ENABLE);
13189			device_set_wakeup_enable(&tp->pdev->dev, true);
13190		}
13191		goto done;
13192	}
13193
13194	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13195	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13196		u32 nic_cfg, led_cfg;
13197		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13198		int eeprom_phy_serdes = 0;
13199
13200		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13201		tp->nic_sram_data_cfg = nic_cfg;
13202
13203		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13204		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13205		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13206		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13207		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13208		    (ver > 0) && (ver < 0x100))
13209			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13210
13211		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13212			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13213
13214		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13215		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13216			eeprom_phy_serdes = 1;
13217
13218		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13219		if (nic_phy_id != 0) {
13220			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13221			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13222
13223			eeprom_phy_id  = (id1 >> 16) << 10;
13224			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13225			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13226		} else
13227			eeprom_phy_id = 0;
13228
13229		tp->phy_id = eeprom_phy_id;
13230		if (eeprom_phy_serdes) {
13231			if (!tg3_flag(tp, 5705_PLUS))
13232				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13233			else
13234				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13235		}
13236
13237		if (tg3_flag(tp, 5750_PLUS))
13238			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13239				    SHASTA_EXT_LED_MODE_MASK);
13240		else
13241			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13242
13243		switch (led_cfg) {
13244		default:
13245		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13246			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13247			break;
13248
13249		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13250			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13251			break;
13252
13253		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13254			tp->led_ctrl = LED_CTRL_MODE_MAC;
13255
13256			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13257			 * read on some older 5700/5701 bootcode.
13258			 */
13259			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13260			    ASIC_REV_5700 ||
13261			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13262			    ASIC_REV_5701)
13263				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13264
13265			break;
13266
13267		case SHASTA_EXT_LED_SHARED:
13268			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13269			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13270			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13271				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13272						 LED_CTRL_MODE_PHY_2);
13273			break;
13274
13275		case SHASTA_EXT_LED_MAC:
13276			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13277			break;
13278
13279		case SHASTA_EXT_LED_COMBO:
13280			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13281			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13282				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13283						 LED_CTRL_MODE_PHY_2);
13284			break;
13285
13286		}
13287
13288		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13289		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13290		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13291			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13292
13293		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13294			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13295
13296		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13297			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13298			if ((tp->pdev->subsystem_vendor ==
13299			     PCI_VENDOR_ID_ARIMA) &&
13300			    (tp->pdev->subsystem_device == 0x205a ||
13301			     tp->pdev->subsystem_device == 0x2063))
13302				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13303		} else {
13304			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13305			tg3_flag_set(tp, IS_NIC);
13306		}
13307
13308		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13309			tg3_flag_set(tp, ENABLE_ASF);
13310			if (tg3_flag(tp, 5750_PLUS))
13311				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13312		}
13313
13314		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13315		    tg3_flag(tp, 5750_PLUS))
13316			tg3_flag_set(tp, ENABLE_APE);
13317
13318		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13319		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13320			tg3_flag_clear(tp, WOL_CAP);
13321
13322		if (tg3_flag(tp, WOL_CAP) &&
13323		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13324			tg3_flag_set(tp, WOL_ENABLE);
13325			device_set_wakeup_enable(&tp->pdev->dev, true);
13326		}
13327
13328		if (cfg2 & (1 << 17))
13329			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13330
13331		/* serdes signal pre-emphasis in register 0x590 set by */
13332		/* bootcode if bit 18 is set */
13333		if (cfg2 & (1 << 18))
13334			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13335
13336		if ((tg3_flag(tp, 57765_PLUS) ||
13337		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13338		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13339		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13340			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13341
13342		if (tg3_flag(tp, PCI_EXPRESS) &&
13343		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13344		    !tg3_flag(tp, 57765_PLUS)) {
13345			u32 cfg3;
13346
13347			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13348			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13349				tg3_flag_set(tp, ASPM_WORKAROUND);
13350		}
13351
13352		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13353			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13354		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13355			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13356		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13357			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13358	}
13359done:
13360	if (tg3_flag(tp, WOL_CAP))
13361		device_set_wakeup_enable(&tp->pdev->dev,
13362					 tg3_flag(tp, WOL_ENABLE));
13363	else
13364		device_set_wakeup_capable(&tp->pdev->dev, false);
13365}
13366
13367static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13368{
13369	int i;
13370	u32 val;
13371
13372	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13373	tw32(OTP_CTRL, cmd);
13374
13375	/* Wait for up to 1 ms for command to execute. */
13376	for (i = 0; i < 100; i++) {
13377		val = tr32(OTP_STATUS);
13378		if (val & OTP_STATUS_CMD_DONE)
13379			break;
13380		udelay(10);
13381	}
13382
13383	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13384}
13385
13386/* Read the gphy configuration from the OTP region of the chip.  The gphy
13387 * configuration is a 32-bit value that straddles the alignment boundary.
13388 * We do two 32-bit reads and then shift and merge the results.
13389 */
13390static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13391{
13392	u32 bhalf_otp, thalf_otp;
13393
13394	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13395
13396	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13397		return 0;
13398
13399	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13400
13401	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13402		return 0;
13403
13404	thalf_otp = tr32(OTP_READ_DATA);
13405
13406	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13407
13408	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13409		return 0;
13410
13411	bhalf_otp = tr32(OTP_READ_DATA);
13412
13413	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13414}
13415
13416static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13417{
13418	u32 adv = ADVERTISED_Autoneg;
13419
13420	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13421		adv |= ADVERTISED_1000baseT_Half |
13422		       ADVERTISED_1000baseT_Full;
13423
13424	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13425		adv |= ADVERTISED_100baseT_Half |
13426		       ADVERTISED_100baseT_Full |
13427		       ADVERTISED_10baseT_Half |
13428		       ADVERTISED_10baseT_Full |
13429		       ADVERTISED_TP;
13430	else
13431		adv |= ADVERTISED_FIBRE;
13432
13433	tp->link_config.advertising = adv;
13434	tp->link_config.speed = SPEED_UNKNOWN;
13435	tp->link_config.duplex = DUPLEX_UNKNOWN;
13436	tp->link_config.autoneg = AUTONEG_ENABLE;
13437	tp->link_config.active_speed = SPEED_UNKNOWN;
13438	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13439
13440	tp->old_link = -1;
13441}
13442
13443static int __devinit tg3_phy_probe(struct tg3 *tp)
13444{
13445	u32 hw_phy_id_1, hw_phy_id_2;
13446	u32 hw_phy_id, hw_phy_id_masked;
13447	int err;
13448
13449	/* flow control autonegotiation is default behavior */
13450	tg3_flag_set(tp, PAUSE_AUTONEG);
13451	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13452
13453	if (tg3_flag(tp, USE_PHYLIB))
13454		return tg3_phy_init(tp);
13455
13456	/* Reading the PHY ID register can conflict with ASF
13457	 * firmware access to the PHY hardware.
13458	 */
13459	err = 0;
13460	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13461		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13462	} else {
13463		/* Now read the physical PHY_ID from the chip and verify
13464		 * that it is sane.  If it doesn't look good, we fall back
13465		 * to either the hard-coded table based PHY_ID and failing
13466		 * that the value found in the eeprom area.
13467		 */
13468		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13469		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13470
13471		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13472		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13473		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13474
13475		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13476	}
13477
13478	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13479		tp->phy_id = hw_phy_id;
13480		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13481			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13482		else
13483			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13484	} else {
13485		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13486			/* Do nothing, phy ID already set up in
13487			 * tg3_get_eeprom_hw_cfg().
13488			 */
13489		} else {
13490			struct subsys_tbl_ent *p;
13491
13492			/* No eeprom signature?  Try the hardcoded
13493			 * subsys device table.
13494			 */
13495			p = tg3_lookup_by_subsys(tp);
13496			if (!p)
13497				return -ENODEV;
13498
13499			tp->phy_id = p->phy_id;
13500			if (!tp->phy_id ||
13501			    tp->phy_id == TG3_PHY_ID_BCM8002)
13502				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13503		}
13504	}
13505
13506	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13507	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13508	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13509	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13510	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13511	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13512	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13513		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13514
13515	tg3_phy_init_link_config(tp);
13516
13517	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13518	    !tg3_flag(tp, ENABLE_APE) &&
13519	    !tg3_flag(tp, ENABLE_ASF)) {
13520		u32 bmsr, dummy;
13521
13522		tg3_readphy(tp, MII_BMSR, &bmsr);
13523		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13524		    (bmsr & BMSR_LSTATUS))
13525			goto skip_phy_reset;
13526
13527		err = tg3_phy_reset(tp);
13528		if (err)
13529			return err;
13530
13531		tg3_phy_set_wirespeed(tp);
13532
13533		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13534			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13535					    tp->link_config.flowctrl);
13536
13537			tg3_writephy(tp, MII_BMCR,
13538				     BMCR_ANENABLE | BMCR_ANRESTART);
13539		}
13540	}
13541
13542skip_phy_reset:
13543	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13544		err = tg3_init_5401phy_dsp(tp);
13545		if (err)
13546			return err;
13547
13548		err = tg3_init_5401phy_dsp(tp);
13549	}
13550
13551	return err;
13552}
13553
13554static void __devinit tg3_read_vpd(struct tg3 *tp)
13555{
13556	u8 *vpd_data;
13557	unsigned int block_end, rosize, len;
13558	u32 vpdlen;
13559	int j, i = 0;
13560
13561	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13562	if (!vpd_data)
13563		goto out_no_vpd;
13564
13565	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13566	if (i < 0)
13567		goto out_not_found;
13568
13569	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13570	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13571	i += PCI_VPD_LRDT_TAG_SIZE;
13572
13573	if (block_end > vpdlen)
13574		goto out_not_found;
13575
13576	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13577				      PCI_VPD_RO_KEYWORD_MFR_ID);
13578	if (j > 0) {
13579		len = pci_vpd_info_field_size(&vpd_data[j]);
13580
13581		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13582		if (j + len > block_end || len != 4 ||
13583		    memcmp(&vpd_data[j], "1028", 4))
13584			goto partno;
13585
13586		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13587					      PCI_VPD_RO_KEYWORD_VENDOR0);
13588		if (j < 0)
13589			goto partno;
13590
13591		len = pci_vpd_info_field_size(&vpd_data[j]);
13592
13593		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13594		if (j + len > block_end)
13595			goto partno;
13596
13597		memcpy(tp->fw_ver, &vpd_data[j], len);
13598		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13599	}
13600
13601partno:
13602	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13603				      PCI_VPD_RO_KEYWORD_PARTNO);
13604	if (i < 0)
13605		goto out_not_found;
13606
13607	len = pci_vpd_info_field_size(&vpd_data[i]);
13608
13609	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13610	if (len > TG3_BPN_SIZE ||
13611	    (len + i) > vpdlen)
13612		goto out_not_found;
13613
13614	memcpy(tp->board_part_number, &vpd_data[i], len);
13615
13616out_not_found:
13617	kfree(vpd_data);
13618	if (tp->board_part_number[0])
13619		return;
13620
13621out_no_vpd:
13622	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13623		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13624			strcpy(tp->board_part_number, "BCM5717");
13625		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13626			strcpy(tp->board_part_number, "BCM5718");
13627		else
13628			goto nomatch;
13629	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13630		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13631			strcpy(tp->board_part_number, "BCM57780");
13632		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13633			strcpy(tp->board_part_number, "BCM57760");
13634		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13635			strcpy(tp->board_part_number, "BCM57790");
13636		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13637			strcpy(tp->board_part_number, "BCM57788");
13638		else
13639			goto nomatch;
13640	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13641		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13642			strcpy(tp->board_part_number, "BCM57761");
13643		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13644			strcpy(tp->board_part_number, "BCM57765");
13645		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13646			strcpy(tp->board_part_number, "BCM57781");
13647		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13648			strcpy(tp->board_part_number, "BCM57785");
13649		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13650			strcpy(tp->board_part_number, "BCM57791");
13651		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13652			strcpy(tp->board_part_number, "BCM57795");
13653		else
13654			goto nomatch;
13655	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13656		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13657			strcpy(tp->board_part_number, "BCM57762");
13658		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13659			strcpy(tp->board_part_number, "BCM57766");
13660		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13661			strcpy(tp->board_part_number, "BCM57782");
13662		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13663			strcpy(tp->board_part_number, "BCM57786");
13664		else
13665			goto nomatch;
13666	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13667		strcpy(tp->board_part_number, "BCM95906");
13668	} else {
13669nomatch:
13670		strcpy(tp->board_part_number, "none");
13671	}
13672}
13673
13674static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13675{
13676	u32 val;
13677
13678	if (tg3_nvram_read(tp, offset, &val) ||
13679	    (val & 0xfc000000) != 0x0c000000 ||
13680	    tg3_nvram_read(tp, offset + 4, &val) ||
13681	    val != 0)
13682		return 0;
13683
13684	return 1;
13685}
13686
13687static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13688{
13689	u32 val, offset, start, ver_offset;
13690	int i, dst_off;
13691	bool newver = false;
13692
13693	if (tg3_nvram_read(tp, 0xc, &offset) ||
13694	    tg3_nvram_read(tp, 0x4, &start))
13695		return;
13696
13697	offset = tg3_nvram_logical_addr(tp, offset);
13698
13699	if (tg3_nvram_read(tp, offset, &val))
13700		return;
13701
13702	if ((val & 0xfc000000) == 0x0c000000) {
13703		if (tg3_nvram_read(tp, offset + 4, &val))
13704			return;
13705
13706		if (val == 0)
13707			newver = true;
13708	}
13709
13710	dst_off = strlen(tp->fw_ver);
13711
13712	if (newver) {
13713		if (TG3_VER_SIZE - dst_off < 16 ||
13714		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13715			return;
13716
13717		offset = offset + ver_offset - start;
13718		for (i = 0; i < 16; i += 4) {
13719			__be32 v;
13720			if (tg3_nvram_read_be32(tp, offset + i, &v))
13721				return;
13722
13723			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13724		}
13725	} else {
13726		u32 major, minor;
13727
13728		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13729			return;
13730
13731		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13732			TG3_NVM_BCVER_MAJSFT;
13733		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13734		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13735			 "v%d.%02d", major, minor);
13736	}
13737}
13738
13739static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13740{
13741	u32 val, major, minor;
13742
13743	/* Use native endian representation */
13744	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13745		return;
13746
13747	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13748		TG3_NVM_HWSB_CFG1_MAJSFT;
13749	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13750		TG3_NVM_HWSB_CFG1_MINSFT;
13751
13752	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13753}
13754
13755static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13756{
13757	u32 offset, major, minor, build;
13758
13759	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13760
13761	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13762		return;
13763
13764	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13765	case TG3_EEPROM_SB_REVISION_0:
13766		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13767		break;
13768	case TG3_EEPROM_SB_REVISION_2:
13769		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13770		break;
13771	case TG3_EEPROM_SB_REVISION_3:
13772		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13773		break;
13774	case TG3_EEPROM_SB_REVISION_4:
13775		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13776		break;
13777	case TG3_EEPROM_SB_REVISION_5:
13778		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13779		break;
13780	case TG3_EEPROM_SB_REVISION_6:
13781		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13782		break;
13783	default:
13784		return;
13785	}
13786
13787	if (tg3_nvram_read(tp, offset, &val))
13788		return;
13789
13790	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13791		TG3_EEPROM_SB_EDH_BLD_SHFT;
13792	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13793		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13794	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13795
13796	if (minor > 99 || build > 26)
13797		return;
13798
13799	offset = strlen(tp->fw_ver);
13800	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13801		 " v%d.%02d", major, minor);
13802
13803	if (build > 0) {
13804		offset = strlen(tp->fw_ver);
13805		if (offset < TG3_VER_SIZE - 1)
13806			tp->fw_ver[offset] = 'a' + build - 1;
13807	}
13808}
13809
13810static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13811{
13812	u32 val, offset, start;
13813	int i, vlen;
13814
13815	for (offset = TG3_NVM_DIR_START;
13816	     offset < TG3_NVM_DIR_END;
13817	     offset += TG3_NVM_DIRENT_SIZE) {
13818		if (tg3_nvram_read(tp, offset, &val))
13819			return;
13820
13821		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13822			break;
13823	}
13824
13825	if (offset == TG3_NVM_DIR_END)
13826		return;
13827
13828	if (!tg3_flag(tp, 5705_PLUS))
13829		start = 0x08000000;
13830	else if (tg3_nvram_read(tp, offset - 4, &start))
13831		return;
13832
13833	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13834	    !tg3_fw_img_is_valid(tp, offset) ||
13835	    tg3_nvram_read(tp, offset + 8, &val))
13836		return;
13837
13838	offset += val - start;
13839
13840	vlen = strlen(tp->fw_ver);
13841
13842	tp->fw_ver[vlen++] = ',';
13843	tp->fw_ver[vlen++] = ' ';
13844
13845	for (i = 0; i < 4; i++) {
13846		__be32 v;
13847		if (tg3_nvram_read_be32(tp, offset, &v))
13848			return;
13849
13850		offset += sizeof(v);
13851
13852		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13853			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13854			break;
13855		}
13856
13857		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13858		vlen += sizeof(v);
13859	}
13860}
13861
13862static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13863{
13864	int vlen;
13865	u32 apedata;
13866	char *fwtype;
13867
13868	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13869		return;
13870
13871	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13872	if (apedata != APE_SEG_SIG_MAGIC)
13873		return;
13874
13875	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13876	if (!(apedata & APE_FW_STATUS_READY))
13877		return;
13878
13879	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13880
13881	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13882		tg3_flag_set(tp, APE_HAS_NCSI);
13883		fwtype = "NCSI";
13884	} else {
13885		fwtype = "DASH";
13886	}
13887
13888	vlen = strlen(tp->fw_ver);
13889
13890	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13891		 fwtype,
13892		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13893		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13894		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13895		 (apedata & APE_FW_VERSION_BLDMSK));
13896}
13897
13898static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13899{
13900	u32 val;
13901	bool vpd_vers = false;
13902
13903	if (tp->fw_ver[0] != 0)
13904		vpd_vers = true;
13905
13906	if (tg3_flag(tp, NO_NVRAM)) {
13907		strcat(tp->fw_ver, "sb");
13908		return;
13909	}
13910
13911	if (tg3_nvram_read(tp, 0, &val))
13912		return;
13913
13914	if (val == TG3_EEPROM_MAGIC)
13915		tg3_read_bc_ver(tp);
13916	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13917		tg3_read_sb_ver(tp, val);
13918	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13919		tg3_read_hwsb_ver(tp);
13920	else
13921		return;
13922
13923	if (vpd_vers)
13924		goto done;
13925
13926	if (tg3_flag(tp, ENABLE_APE)) {
13927		if (tg3_flag(tp, ENABLE_ASF))
13928			tg3_read_dash_ver(tp);
13929	} else if (tg3_flag(tp, ENABLE_ASF)) {
13930		tg3_read_mgmtfw_ver(tp);
13931	}
13932
13933done:
13934	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13935}
13936
13937static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13938{
13939	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13940		return TG3_RX_RET_MAX_SIZE_5717;
13941	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13942		return TG3_RX_RET_MAX_SIZE_5700;
13943	else
13944		return TG3_RX_RET_MAX_SIZE_5705;
13945}
13946
13947static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13948	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13949	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13950	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13951	{ },
13952};
13953
13954static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13955{
13956	struct pci_dev *peer;
13957	unsigned int func, devnr = tp->pdev->devfn & ~7;
13958
13959	for (func = 0; func < 8; func++) {
13960		peer = pci_get_slot(tp->pdev->bus, devnr | func);
13961		if (peer && peer != tp->pdev)
13962			break;
13963		pci_dev_put(peer);
13964	}
13965	/* 5704 can be configured in single-port mode, set peer to
13966	 * tp->pdev in that case.
13967	 */
13968	if (!peer) {
13969		peer = tp->pdev;
13970		return peer;
13971	}
13972
13973	/*
13974	 * We don't need to keep the refcount elevated; there's no way
13975	 * to remove one half of this device without removing the other
13976	 */
13977	pci_dev_put(peer);
13978
13979	return peer;
13980}
13981
13982static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13983{
13984	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13985	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13986		u32 reg;
13987
13988		/* All devices that use the alternate
13989		 * ASIC REV location have a CPMU.
13990		 */
13991		tg3_flag_set(tp, CPMU_PRESENT);
13992
13993		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13994		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13995		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13996		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13997			reg = TG3PCI_GEN2_PRODID_ASICREV;
13998		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13999			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14000			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14001			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14002			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14003			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14004			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14005			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14006			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14007			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14008			reg = TG3PCI_GEN15_PRODID_ASICREV;
14009		else
14010			reg = TG3PCI_PRODID_ASICREV;
14011
14012		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14013	}
14014
14015	/* Wrong chip ID in 5752 A0. This code can be removed later
14016	 * as A0 is not in production.
14017	 */
14018	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14019		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14020
14021	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14022	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14023	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14024		tg3_flag_set(tp, 5717_PLUS);
14025
14026	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14027	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14028		tg3_flag_set(tp, 57765_CLASS);
14029
14030	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14031		tg3_flag_set(tp, 57765_PLUS);
14032
14033	/* Intentionally exclude ASIC_REV_5906 */
14034	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14035	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14036	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14037	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14038	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14039	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14040	    tg3_flag(tp, 57765_PLUS))
14041		tg3_flag_set(tp, 5755_PLUS);
14042
14043	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14044	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14045		tg3_flag_set(tp, 5780_CLASS);
14046
14047	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14048	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14049	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14050	    tg3_flag(tp, 5755_PLUS) ||
14051	    tg3_flag(tp, 5780_CLASS))
14052		tg3_flag_set(tp, 5750_PLUS);
14053
14054	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14055	    tg3_flag(tp, 5750_PLUS))
14056		tg3_flag_set(tp, 5705_PLUS);
14057}
14058
14059static int __devinit tg3_get_invariants(struct tg3 *tp)
14060{
14061	u32 misc_ctrl_reg;
14062	u32 pci_state_reg, grc_misc_cfg;
14063	u32 val;
14064	u16 pci_cmd;
14065	int err;
14066
14067	/* Force memory write invalidate off.  If we leave it on,
14068	 * then on 5700_BX chips we have to enable a workaround.
14069	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14070	 * to match the cacheline size.  The Broadcom driver have this
14071	 * workaround but turns MWI off all the times so never uses
14072	 * it.  This seems to suggest that the workaround is insufficient.
14073	 */
14074	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14075	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14076	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14077
14078	/* Important! -- Make sure register accesses are byteswapped
14079	 * correctly.  Also, for those chips that require it, make
14080	 * sure that indirect register accesses are enabled before
14081	 * the first operation.
14082	 */
14083	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14084			      &misc_ctrl_reg);
14085	tp->misc_host_ctrl |= (misc_ctrl_reg &
14086			       MISC_HOST_CTRL_CHIPREV);
14087	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14088			       tp->misc_host_ctrl);
14089
14090	tg3_detect_asic_rev(tp, misc_ctrl_reg);
14091
14092	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14093	 * we need to disable memory and use config. cycles
14094	 * only to access all registers. The 5702/03 chips
14095	 * can mistakenly decode the special cycles from the
14096	 * ICH chipsets as memory write cycles, causing corruption
14097	 * of register and memory space. Only certain ICH bridges
14098	 * will drive special cycles with non-zero data during the
14099	 * address phase which can fall within the 5703's address
14100	 * range. This is not an ICH bug as the PCI spec allows
14101	 * non-zero address during special cycles. However, only
14102	 * these ICH bridges are known to drive non-zero addresses
14103	 * during special cycles.
14104	 *
14105	 * Since special cycles do not cross PCI bridges, we only
14106	 * enable this workaround if the 5703 is on the secondary
14107	 * bus of these ICH bridges.
14108	 */
14109	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14110	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14111		static struct tg3_dev_id {
14112			u32	vendor;
14113			u32	device;
14114			u32	rev;
14115		} ich_chipsets[] = {
14116			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14117			  PCI_ANY_ID },
14118			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14119			  PCI_ANY_ID },
14120			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14121			  0xa },
14122			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14123			  PCI_ANY_ID },
14124			{ },
14125		};
14126		struct tg3_dev_id *pci_id = &ich_chipsets[0];
14127		struct pci_dev *bridge = NULL;
14128
14129		while (pci_id->vendor != 0) {
14130			bridge = pci_get_device(pci_id->vendor, pci_id->device,
14131						bridge);
14132			if (!bridge) {
14133				pci_id++;
14134				continue;
14135			}
14136			if (pci_id->rev != PCI_ANY_ID) {
14137				if (bridge->revision > pci_id->rev)
14138					continue;
14139			}
14140			if (bridge->subordinate &&
14141			    (bridge->subordinate->number ==
14142			     tp->pdev->bus->number)) {
14143				tg3_flag_set(tp, ICH_WORKAROUND);
14144				pci_dev_put(bridge);
14145				break;
14146			}
14147		}
14148	}
14149
14150	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14151		static struct tg3_dev_id {
14152			u32	vendor;
14153			u32	device;
14154		} bridge_chipsets[] = {
14155			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14156			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14157			{ },
14158		};
14159		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14160		struct pci_dev *bridge = NULL;
14161
14162		while (pci_id->vendor != 0) {
14163			bridge = pci_get_device(pci_id->vendor,
14164						pci_id->device,
14165						bridge);
14166			if (!bridge) {
14167				pci_id++;
14168				continue;
14169			}
14170			if (bridge->subordinate &&
14171			    (bridge->subordinate->number <=
14172			     tp->pdev->bus->number) &&
14173			    (bridge->subordinate->subordinate >=
14174			     tp->pdev->bus->number)) {
14175				tg3_flag_set(tp, 5701_DMA_BUG);
14176				pci_dev_put(bridge);
14177				break;
14178			}
14179		}
14180	}
14181
14182	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
14183	 * DMA addresses > 40-bit. This bridge may have other additional
14184	 * 57xx devices behind it in some 4-port NIC designs for example.
14185	 * Any tg3 device found behind the bridge will also need the 40-bit
14186	 * DMA workaround.
14187	 */
14188	if (tg3_flag(tp, 5780_CLASS)) {
14189		tg3_flag_set(tp, 40BIT_DMA_BUG);
14190		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14191	} else {
14192		struct pci_dev *bridge = NULL;
14193
14194		do {
14195			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14196						PCI_DEVICE_ID_SERVERWORKS_EPB,
14197						bridge);
14198			if (bridge && bridge->subordinate &&
14199			    (bridge->subordinate->number <=
14200			     tp->pdev->bus->number) &&
14201			    (bridge->subordinate->subordinate >=
14202			     tp->pdev->bus->number)) {
14203				tg3_flag_set(tp, 40BIT_DMA_BUG);
14204				pci_dev_put(bridge);
14205				break;
14206			}
14207		} while (bridge);
14208	}
14209
14210	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14211	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14212		tp->pdev_peer = tg3_find_peer(tp);
14213
14214	/* Determine TSO capabilities */
14215	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14216		; /* Do nothing. HW bug. */
14217	else if (tg3_flag(tp, 57765_PLUS))
14218		tg3_flag_set(tp, HW_TSO_3);
14219	else if (tg3_flag(tp, 5755_PLUS) ||
14220		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14221		tg3_flag_set(tp, HW_TSO_2);
14222	else if (tg3_flag(tp, 5750_PLUS)) {
14223		tg3_flag_set(tp, HW_TSO_1);
14224		tg3_flag_set(tp, TSO_BUG);
14225		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14226		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14227			tg3_flag_clear(tp, TSO_BUG);
14228	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14229		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14230		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14231			tg3_flag_set(tp, TSO_BUG);
14232		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14233			tp->fw_needed = FIRMWARE_TG3TSO5;
14234		else
14235			tp->fw_needed = FIRMWARE_TG3TSO;
14236	}
14237
14238	/* Selectively allow TSO based on operating conditions */
14239	if (tg3_flag(tp, HW_TSO_1) ||
14240	    tg3_flag(tp, HW_TSO_2) ||
14241	    tg3_flag(tp, HW_TSO_3) ||
14242	    tp->fw_needed) {
14243		/* For firmware TSO, assume ASF is disabled.
14244		 * We'll disable TSO later if we discover ASF
14245		 * is enabled in tg3_get_eeprom_hw_cfg().
14246		 */
14247		tg3_flag_set(tp, TSO_CAPABLE);
14248	} else {
14249		tg3_flag_clear(tp, TSO_CAPABLE);
14250		tg3_flag_clear(tp, TSO_BUG);
14251		tp->fw_needed = NULL;
14252	}
14253
14254	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14255		tp->fw_needed = FIRMWARE_TG3;
14256
14257	tp->irq_max = 1;
14258
14259	if (tg3_flag(tp, 5750_PLUS)) {
14260		tg3_flag_set(tp, SUPPORT_MSI);
14261		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14262		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14263		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14264		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14265		     tp->pdev_peer == tp->pdev))
14266			tg3_flag_clear(tp, SUPPORT_MSI);
14267
14268		if (tg3_flag(tp, 5755_PLUS) ||
14269		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14270			tg3_flag_set(tp, 1SHOT_MSI);
14271		}
14272
14273		if (tg3_flag(tp, 57765_PLUS)) {
14274			tg3_flag_set(tp, SUPPORT_MSIX);
14275			tp->irq_max = TG3_IRQ_MAX_VECS;
14276			tg3_rss_init_dflt_indir_tbl(tp);
14277		}
14278	}
14279
14280	if (tg3_flag(tp, 5755_PLUS) ||
14281	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14282		tg3_flag_set(tp, SHORT_DMA_BUG);
14283
14284	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14285		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14286
14287	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14288	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14289	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14290		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14291
14292	if (tg3_flag(tp, 57765_PLUS) &&
14293	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14294		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14295
14296	if (!tg3_flag(tp, 5705_PLUS) ||
14297	    tg3_flag(tp, 5780_CLASS) ||
14298	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14299		tg3_flag_set(tp, JUMBO_CAPABLE);
14300
14301	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14302			      &pci_state_reg);
14303
14304	if (pci_is_pcie(tp->pdev)) {
14305		u16 lnkctl;
14306
14307		tg3_flag_set(tp, PCI_EXPRESS);
14308
14309		pci_read_config_word(tp->pdev,
14310				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14311				     &lnkctl);
14312		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14313			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14314			    ASIC_REV_5906) {
14315				tg3_flag_clear(tp, HW_TSO_2);
14316				tg3_flag_clear(tp, TSO_CAPABLE);
14317			}
14318			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14319			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14320			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14321			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14322				tg3_flag_set(tp, CLKREQ_BUG);
14323		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14324			tg3_flag_set(tp, L1PLLPD_EN);
14325		}
14326	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14327		/* BCM5785 devices are effectively PCIe devices, and should
14328		 * follow PCIe codepaths, but do not have a PCIe capabilities
14329		 * section.
14330		 */
14331		tg3_flag_set(tp, PCI_EXPRESS);
14332	} else if (!tg3_flag(tp, 5705_PLUS) ||
14333		   tg3_flag(tp, 5780_CLASS)) {
14334		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14335		if (!tp->pcix_cap) {
14336			dev_err(&tp->pdev->dev,
14337				"Cannot find PCI-X capability, aborting\n");
14338			return -EIO;
14339		}
14340
14341		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14342			tg3_flag_set(tp, PCIX_MODE);
14343	}
14344
14345	/* If we have an AMD 762 or VIA K8T800 chipset, write
14346	 * reordering to the mailbox registers done by the host
14347	 * controller can cause major troubles.  We read back from
14348	 * every mailbox register write to force the writes to be
14349	 * posted to the chip in order.
14350	 */
14351	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14352	    !tg3_flag(tp, PCI_EXPRESS))
14353		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14354
14355	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14356			     &tp->pci_cacheline_sz);
14357	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14358			     &tp->pci_lat_timer);
14359	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14360	    tp->pci_lat_timer < 64) {
14361		tp->pci_lat_timer = 64;
14362		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14363				      tp->pci_lat_timer);
14364	}
14365
14366	/* Important! -- It is critical that the PCI-X hw workaround
14367	 * situation is decided before the first MMIO register access.
14368	 */
14369	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14370		/* 5700 BX chips need to have their TX producer index
14371		 * mailboxes written twice to workaround a bug.
14372		 */
14373		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14374
14375		/* If we are in PCI-X mode, enable register write workaround.
14376		 *
14377		 * The workaround is to use indirect register accesses
14378		 * for all chip writes not to mailbox registers.
14379		 */
14380		if (tg3_flag(tp, PCIX_MODE)) {
14381			u32 pm_reg;
14382
14383			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14384
14385			/* The chip can have it's power management PCI config
14386			 * space registers clobbered due to this bug.
14387			 * So explicitly force the chip into D0 here.
14388			 */
14389			pci_read_config_dword(tp->pdev,
14390					      tp->pm_cap + PCI_PM_CTRL,
14391					      &pm_reg);
14392			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14393			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14394			pci_write_config_dword(tp->pdev,
14395					       tp->pm_cap + PCI_PM_CTRL,
14396					       pm_reg);
14397
14398			/* Also, force SERR#/PERR# in PCI command. */
14399			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14400			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14401			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14402		}
14403	}
14404
14405	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14406		tg3_flag_set(tp, PCI_HIGH_SPEED);
14407	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14408		tg3_flag_set(tp, PCI_32BIT);
14409
14410	/* Chip-specific fixup from Broadcom driver */
14411	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14412	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14413		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14414		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14415	}
14416
14417	/* Default fast path register access methods */
14418	tp->read32 = tg3_read32;
14419	tp->write32 = tg3_write32;
14420	tp->read32_mbox = tg3_read32;
14421	tp->write32_mbox = tg3_write32;
14422	tp->write32_tx_mbox = tg3_write32;
14423	tp->write32_rx_mbox = tg3_write32;
14424
14425	/* Various workaround register access methods */
14426	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14427		tp->write32 = tg3_write_indirect_reg32;
14428	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14429		 (tg3_flag(tp, PCI_EXPRESS) &&
14430		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14431		/*
14432		 * Back to back register writes can cause problems on these
14433		 * chips, the workaround is to read back all reg writes
14434		 * except those to mailbox regs.
14435		 *
14436		 * See tg3_write_indirect_reg32().
14437		 */
14438		tp->write32 = tg3_write_flush_reg32;
14439	}
14440
14441	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14442		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14443		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14444			tp->write32_rx_mbox = tg3_write_flush_reg32;
14445	}
14446
14447	if (tg3_flag(tp, ICH_WORKAROUND)) {
14448		tp->read32 = tg3_read_indirect_reg32;
14449		tp->write32 = tg3_write_indirect_reg32;
14450		tp->read32_mbox = tg3_read_indirect_mbox;
14451		tp->write32_mbox = tg3_write_indirect_mbox;
14452		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14453		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14454
14455		iounmap(tp->regs);
14456		tp->regs = NULL;
14457
14458		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14459		pci_cmd &= ~PCI_COMMAND_MEMORY;
14460		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14461	}
14462	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14463		tp->read32_mbox = tg3_read32_mbox_5906;
14464		tp->write32_mbox = tg3_write32_mbox_5906;
14465		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14466		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14467	}
14468
14469	if (tp->write32 == tg3_write_indirect_reg32 ||
14470	    (tg3_flag(tp, PCIX_MODE) &&
14471	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14472	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14473		tg3_flag_set(tp, SRAM_USE_CONFIG);
14474
14475	/* The memory arbiter has to be enabled in order for SRAM accesses
14476	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14477	 * sure it is enabled, but other entities such as system netboot
14478	 * code might disable it.
14479	 */
14480	val = tr32(MEMARB_MODE);
14481	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14482
14483	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14484	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14485	    tg3_flag(tp, 5780_CLASS)) {
14486		if (tg3_flag(tp, PCIX_MODE)) {
14487			pci_read_config_dword(tp->pdev,
14488					      tp->pcix_cap + PCI_X_STATUS,
14489					      &val);
14490			tp->pci_fn = val & 0x7;
14491		}
14492	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14493		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14494		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14495		    NIC_SRAM_CPMUSTAT_SIG) {
14496			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14497			tp->pci_fn = tp->pci_fn ? 1 : 0;
14498		}
14499	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14500		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14501		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14502		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14503		    NIC_SRAM_CPMUSTAT_SIG) {
14504			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14505				     TG3_CPMU_STATUS_FSHFT_5719;
14506		}
14507	}
14508
14509	/* Get eeprom hw config before calling tg3_set_power_state().
14510	 * In particular, the TG3_FLAG_IS_NIC flag must be
14511	 * determined before calling tg3_set_power_state() so that
14512	 * we know whether or not to switch out of Vaux power.
14513	 * When the flag is set, it means that GPIO1 is used for eeprom
14514	 * write protect and also implies that it is a LOM where GPIOs
14515	 * are not used to switch power.
14516	 */
14517	tg3_get_eeprom_hw_cfg(tp);
14518
14519	if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14520		tg3_flag_clear(tp, TSO_CAPABLE);
14521		tg3_flag_clear(tp, TSO_BUG);
14522		tp->fw_needed = NULL;
14523	}
14524
14525	if (tg3_flag(tp, ENABLE_APE)) {
14526		/* Allow reads and writes to the
14527		 * APE register and memory space.
14528		 */
14529		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14530				 PCISTATE_ALLOW_APE_SHMEM_WR |
14531				 PCISTATE_ALLOW_APE_PSPACE_WR;
14532		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14533				       pci_state_reg);
14534
14535		tg3_ape_lock_init(tp);
14536	}
14537
14538	/* Set up tp->grc_local_ctrl before calling
14539	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14540	 * will bring 5700's external PHY out of reset.
14541	 * It is also used as eeprom write protect on LOMs.
14542	 */
14543	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14544	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14545	    tg3_flag(tp, EEPROM_WRITE_PROT))
14546		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14547				       GRC_LCLCTRL_GPIO_OUTPUT1);
14548	/* Unused GPIO3 must be driven as output on 5752 because there
14549	 * are no pull-up resistors on unused GPIO pins.
14550	 */
14551	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14552		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14553
14554	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14555	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14556	    tg3_flag(tp, 57765_CLASS))
14557		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14558
14559	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14560	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14561		/* Turn off the debug UART. */
14562		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14563		if (tg3_flag(tp, IS_NIC))
14564			/* Keep VMain power. */
14565			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14566					      GRC_LCLCTRL_GPIO_OUTPUT0;
14567	}
14568
14569	/* Switch out of Vaux if it is a NIC */
14570	tg3_pwrsrc_switch_to_vmain(tp);
14571
14572	/* Derive initial jumbo mode from MTU assigned in
14573	 * ether_setup() via the alloc_etherdev() call
14574	 */
14575	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14576		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14577
14578	/* Determine WakeOnLan speed to use. */
14579	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14580	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14581	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14582	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14583		tg3_flag_clear(tp, WOL_SPEED_100MB);
14584	} else {
14585		tg3_flag_set(tp, WOL_SPEED_100MB);
14586	}
14587
14588	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14589		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14590
14591	/* A few boards don't want Ethernet@WireSpeed phy feature */
14592	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14593	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14594	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14595	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14596	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14597	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14598		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14599
14600	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14601	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14602		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14603	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14604		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14605
14606	if (tg3_flag(tp, 5705_PLUS) &&
14607	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14608	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14609	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14610	    !tg3_flag(tp, 57765_PLUS)) {
14611		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14612		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14613		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14614		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14615			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14616			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14617				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14618			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14619				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14620		} else
14621			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14622	}
14623
14624	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14625	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14626		tp->phy_otp = tg3_read_otp_phycfg(tp);
14627		if (tp->phy_otp == 0)
14628			tp->phy_otp = TG3_OTP_DEFAULT;
14629	}
14630
14631	if (tg3_flag(tp, CPMU_PRESENT))
14632		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14633	else
14634		tp->mi_mode = MAC_MI_MODE_BASE;
14635
14636	tp->coalesce_mode = 0;
14637	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14638	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14639		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14640
14641	/* Set these bits to enable statistics workaround. */
14642	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14643	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14644	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14645		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14646		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14647	}
14648
14649	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14650	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14651		tg3_flag_set(tp, USE_PHYLIB);
14652
14653	err = tg3_mdio_init(tp);
14654	if (err)
14655		return err;
14656
14657	/* Initialize data/descriptor byte/word swapping. */
14658	val = tr32(GRC_MODE);
14659	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14660		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14661			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14662			GRC_MODE_B2HRX_ENABLE |
14663			GRC_MODE_HTX2B_ENABLE |
14664			GRC_MODE_HOST_STACKUP);
14665	else
14666		val &= GRC_MODE_HOST_STACKUP;
14667
14668	tw32(GRC_MODE, val | tp->grc_mode);
14669
14670	tg3_switch_clocks(tp);
14671
14672	/* Clear this out for sanity. */
14673	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14674
14675	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14676			      &pci_state_reg);
14677	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14678	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14679		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14680
14681		if (chiprevid == CHIPREV_ID_5701_A0 ||
14682		    chiprevid == CHIPREV_ID_5701_B0 ||
14683		    chiprevid == CHIPREV_ID_5701_B2 ||
14684		    chiprevid == CHIPREV_ID_5701_B5) {
14685			void __iomem *sram_base;
14686
14687			/* Write some dummy words into the SRAM status block
14688			 * area, see if it reads back correctly.  If the return
14689			 * value is bad, force enable the PCIX workaround.
14690			 */
14691			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14692
14693			writel(0x00000000, sram_base);
14694			writel(0x00000000, sram_base + 4);
14695			writel(0xffffffff, sram_base + 4);
14696			if (readl(sram_base) != 0x00000000)
14697				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14698		}
14699	}
14700
14701	udelay(50);
14702	tg3_nvram_init(tp);
14703
14704	grc_misc_cfg = tr32(GRC_MISC_CFG);
14705	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14706
14707	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14708	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14709	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14710		tg3_flag_set(tp, IS_5788);
14711
14712	if (!tg3_flag(tp, IS_5788) &&
14713	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14714		tg3_flag_set(tp, TAGGED_STATUS);
14715	if (tg3_flag(tp, TAGGED_STATUS)) {
14716		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14717				      HOSTCC_MODE_CLRTICK_TXBD);
14718
14719		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14720		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14721				       tp->misc_host_ctrl);
14722	}
14723
14724	/* Preserve the APE MAC_MODE bits */
14725	if (tg3_flag(tp, ENABLE_APE))
14726		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14727	else
14728		tp->mac_mode = 0;
14729
14730	/* these are limited to 10/100 only */
14731	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14732	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14733	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14734	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14735	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14736	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14737	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14738	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14739	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14740	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14741	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14742	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14743	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14744	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14745	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14746		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14747
14748	err = tg3_phy_probe(tp);
14749	if (err) {
14750		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14751		/* ... but do not return immediately ... */
14752		tg3_mdio_fini(tp);
14753	}
14754
14755	tg3_read_vpd(tp);
14756	tg3_read_fw_ver(tp);
14757
14758	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14759		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14760	} else {
14761		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14762			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14763		else
14764			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14765	}
14766
14767	/* 5700 {AX,BX} chips have a broken status block link
14768	 * change bit implementation, so we must use the
14769	 * status register in those cases.
14770	 */
14771	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14772		tg3_flag_set(tp, USE_LINKCHG_REG);
14773	else
14774		tg3_flag_clear(tp, USE_LINKCHG_REG);
14775
14776	/* The led_ctrl is set during tg3_phy_probe, here we might
14777	 * have to force the link status polling mechanism based
14778	 * upon subsystem IDs.
14779	 */
14780	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14781	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14782	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14783		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14784		tg3_flag_set(tp, USE_LINKCHG_REG);
14785	}
14786
14787	/* For all SERDES we poll the MAC status register. */
14788	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14789		tg3_flag_set(tp, POLL_SERDES);
14790	else
14791		tg3_flag_clear(tp, POLL_SERDES);
14792
14793	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14794	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14795	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14796	    tg3_flag(tp, PCIX_MODE)) {
14797		tp->rx_offset = NET_SKB_PAD;
14798#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14799		tp->rx_copy_thresh = ~(u16)0;
14800#endif
14801	}
14802
14803	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14804	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14805	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14806
14807	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14808
14809	/* Increment the rx prod index on the rx std ring by at most
14810	 * 8 for these chips to workaround hw errata.
14811	 */
14812	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14813	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14814	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14815		tp->rx_std_max_post = 8;
14816
14817	if (tg3_flag(tp, ASPM_WORKAROUND))
14818		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14819				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14820
14821	return err;
14822}
14823
14824#ifdef CONFIG_SPARC
14825static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14826{
14827	struct net_device *dev = tp->dev;
14828	struct pci_dev *pdev = tp->pdev;
14829	struct device_node *dp = pci_device_to_OF_node(pdev);
14830	const unsigned char *addr;
14831	int len;
14832
14833	addr = of_get_property(dp, "local-mac-address", &len);
14834	if (addr && len == 6) {
14835		memcpy(dev->dev_addr, addr, 6);
14836		memcpy(dev->perm_addr, dev->dev_addr, 6);
14837		return 0;
14838	}
14839	return -ENODEV;
14840}
14841
14842static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14843{
14844	struct net_device *dev = tp->dev;
14845
14846	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14847	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14848	return 0;
14849}
14850#endif
14851
14852static int __devinit tg3_get_device_address(struct tg3 *tp)
14853{
14854	struct net_device *dev = tp->dev;
14855	u32 hi, lo, mac_offset;
14856	int addr_ok = 0;
14857
14858#ifdef CONFIG_SPARC
14859	if (!tg3_get_macaddr_sparc(tp))
14860		return 0;
14861#endif
14862
14863	mac_offset = 0x7c;
14864	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14865	    tg3_flag(tp, 5780_CLASS)) {
14866		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14867			mac_offset = 0xcc;
14868		if (tg3_nvram_lock(tp))
14869			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14870		else
14871			tg3_nvram_unlock(tp);
14872	} else if (tg3_flag(tp, 5717_PLUS)) {
14873		if (tp->pci_fn & 1)
14874			mac_offset = 0xcc;
14875		if (tp->pci_fn > 1)
14876			mac_offset += 0x18c;
14877	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14878		mac_offset = 0x10;
14879
14880	/* First try to get it from MAC address mailbox. */
14881	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14882	if ((hi >> 16) == 0x484b) {
14883		dev->dev_addr[0] = (hi >>  8) & 0xff;
14884		dev->dev_addr[1] = (hi >>  0) & 0xff;
14885
14886		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14887		dev->dev_addr[2] = (lo >> 24) & 0xff;
14888		dev->dev_addr[3] = (lo >> 16) & 0xff;
14889		dev->dev_addr[4] = (lo >>  8) & 0xff;
14890		dev->dev_addr[5] = (lo >>  0) & 0xff;
14891
14892		/* Some old bootcode may report a 0 MAC address in SRAM */
14893		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14894	}
14895	if (!addr_ok) {
14896		/* Next, try NVRAM. */
14897		if (!tg3_flag(tp, NO_NVRAM) &&
14898		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14899		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14900			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14901			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14902		}
14903		/* Finally just fetch it out of the MAC control regs. */
14904		else {
14905			hi = tr32(MAC_ADDR_0_HIGH);
14906			lo = tr32(MAC_ADDR_0_LOW);
14907
14908			dev->dev_addr[5] = lo & 0xff;
14909			dev->dev_addr[4] = (lo >> 8) & 0xff;
14910			dev->dev_addr[3] = (lo >> 16) & 0xff;
14911			dev->dev_addr[2] = (lo >> 24) & 0xff;
14912			dev->dev_addr[1] = hi & 0xff;
14913			dev->dev_addr[0] = (hi >> 8) & 0xff;
14914		}
14915	}
14916
14917	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14918#ifdef CONFIG_SPARC
14919		if (!tg3_get_default_macaddr_sparc(tp))
14920			return 0;
14921#endif
14922		return -EINVAL;
14923	}
14924	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14925	return 0;
14926}
14927
14928#define BOUNDARY_SINGLE_CACHELINE	1
14929#define BOUNDARY_MULTI_CACHELINE	2
14930
14931static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14932{
14933	int cacheline_size;
14934	u8 byte;
14935	int goal;
14936
14937	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14938	if (byte == 0)
14939		cacheline_size = 1024;
14940	else
14941		cacheline_size = (int) byte * 4;
14942
14943	/* On 5703 and later chips, the boundary bits have no
14944	 * effect.
14945	 */
14946	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14947	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14948	    !tg3_flag(tp, PCI_EXPRESS))
14949		goto out;
14950
14951#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14952	goal = BOUNDARY_MULTI_CACHELINE;
14953#else
14954#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14955	goal = BOUNDARY_SINGLE_CACHELINE;
14956#else
14957	goal = 0;
14958#endif
14959#endif
14960
14961	if (tg3_flag(tp, 57765_PLUS)) {
14962		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14963		goto out;
14964	}
14965
14966	if (!goal)
14967		goto out;
14968
14969	/* PCI controllers on most RISC systems tend to disconnect
14970	 * when a device tries to burst across a cache-line boundary.
14971	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14972	 *
14973	 * Unfortunately, for PCI-E there are only limited
14974	 * write-side controls for this, and thus for reads
14975	 * we will still get the disconnects.  We'll also waste
14976	 * these PCI cycles for both read and write for chips
14977	 * other than 5700 and 5701 which do not implement the
14978	 * boundary bits.
14979	 */
14980	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14981		switch (cacheline_size) {
14982		case 16:
14983		case 32:
14984		case 64:
14985		case 128:
14986			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14987				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14988					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14989			} else {
14990				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14991					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14992			}
14993			break;
14994
14995		case 256:
14996			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14997				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14998			break;
14999
15000		default:
15001			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15002				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15003			break;
15004		}
15005	} else if (tg3_flag(tp, PCI_EXPRESS)) {
15006		switch (cacheline_size) {
15007		case 16:
15008		case 32:
15009		case 64:
15010			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15011				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15012				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15013				break;
15014			}
15015			/* fallthrough */
15016		case 128:
15017		default:
15018			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15019			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15020			break;
15021		}
15022	} else {
15023		switch (cacheline_size) {
15024		case 16:
15025			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15026				val |= (DMA_RWCTRL_READ_BNDRY_16 |
15027					DMA_RWCTRL_WRITE_BNDRY_16);
15028				break;
15029			}
15030			/* fallthrough */
15031		case 32:
15032			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15033				val |= (DMA_RWCTRL_READ_BNDRY_32 |
15034					DMA_RWCTRL_WRITE_BNDRY_32);
15035				break;
15036			}
15037			/* fallthrough */
15038		case 64:
15039			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15040				val |= (DMA_RWCTRL_READ_BNDRY_64 |
15041					DMA_RWCTRL_WRITE_BNDRY_64);
15042				break;
15043			}
15044			/* fallthrough */
15045		case 128:
15046			if (goal == BOUNDARY_SINGLE_CACHELINE) {
15047				val |= (DMA_RWCTRL_READ_BNDRY_128 |
15048					DMA_RWCTRL_WRITE_BNDRY_128);
15049				break;
15050			}
15051			/* fallthrough */
15052		case 256:
15053			val |= (DMA_RWCTRL_READ_BNDRY_256 |
15054				DMA_RWCTRL_WRITE_BNDRY_256);
15055			break;
15056		case 512:
15057			val |= (DMA_RWCTRL_READ_BNDRY_512 |
15058				DMA_RWCTRL_WRITE_BNDRY_512);
15059			break;
15060		case 1024:
15061		default:
15062			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15063				DMA_RWCTRL_WRITE_BNDRY_1024);
15064			break;
15065		}
15066	}
15067
15068out:
15069	return val;
15070}
15071
15072static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15073{
15074	struct tg3_internal_buffer_desc test_desc;
15075	u32 sram_dma_descs;
15076	int i, ret;
15077
15078	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15079
15080	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15081	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15082	tw32(RDMAC_STATUS, 0);
15083	tw32(WDMAC_STATUS, 0);
15084
15085	tw32(BUFMGR_MODE, 0);
15086	tw32(FTQ_RESET, 0);
15087
15088	test_desc.addr_hi = ((u64) buf_dma) >> 32;
15089	test_desc.addr_lo = buf_dma & 0xffffffff;
15090	test_desc.nic_mbuf = 0x00002100;
15091	test_desc.len = size;
15092
15093	/*
15094	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15095	 * the *second* time the tg3 driver was getting loaded after an
15096	 * initial scan.
15097	 *
15098	 * Broadcom tells me:
15099	 *   ...the DMA engine is connected to the GRC block and a DMA
15100	 *   reset may affect the GRC block in some unpredictable way...
15101	 *   The behavior of resets to individual blocks has not been tested.
15102	 *
15103	 * Broadcom noted the GRC reset will also reset all sub-components.
15104	 */
15105	if (to_device) {
15106		test_desc.cqid_sqid = (13 << 8) | 2;
15107
15108		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15109		udelay(40);
15110	} else {
15111		test_desc.cqid_sqid = (16 << 8) | 7;
15112
15113		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15114		udelay(40);
15115	}
15116	test_desc.flags = 0x00000005;
15117
15118	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15119		u32 val;
15120
15121		val = *(((u32 *)&test_desc) + i);
15122		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15123				       sram_dma_descs + (i * sizeof(u32)));
15124		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15125	}
15126	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15127
15128	if (to_device)
15129		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15130	else
15131		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15132
15133	ret = -ENODEV;
15134	for (i = 0; i < 40; i++) {
15135		u32 val;
15136
15137		if (to_device)
15138			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15139		else
15140			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15141		if ((val & 0xffff) == sram_dma_descs) {
15142			ret = 0;
15143			break;
15144		}
15145
15146		udelay(100);
15147	}
15148
15149	return ret;
15150}
15151
15152#define TEST_BUFFER_SIZE	0x2000
15153
15154static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15155	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15156	{ },
15157};
15158
15159static int __devinit tg3_test_dma(struct tg3 *tp)
15160{
15161	dma_addr_t buf_dma;
15162	u32 *buf, saved_dma_rwctrl;
15163	int ret = 0;
15164
15165	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15166				 &buf_dma, GFP_KERNEL);
15167	if (!buf) {
15168		ret = -ENOMEM;
15169		goto out_nofree;
15170	}
15171
15172	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15173			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15174
15175	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15176
15177	if (tg3_flag(tp, 57765_PLUS))
15178		goto out;
15179
15180	if (tg3_flag(tp, PCI_EXPRESS)) {
15181		/* DMA read watermark not used on PCIE */
15182		tp->dma_rwctrl |= 0x00180000;
15183	} else if (!tg3_flag(tp, PCIX_MODE)) {
15184		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15185		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15186			tp->dma_rwctrl |= 0x003f0000;
15187		else
15188			tp->dma_rwctrl |= 0x003f000f;
15189	} else {
15190		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15191		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15192			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15193			u32 read_water = 0x7;
15194
15195			/* If the 5704 is behind the EPB bridge, we can
15196			 * do the less restrictive ONE_DMA workaround for
15197			 * better performance.
15198			 */
15199			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15200			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15201				tp->dma_rwctrl |= 0x8000;
15202			else if (ccval == 0x6 || ccval == 0x7)
15203				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15204
15205			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15206				read_water = 4;
15207			/* Set bit 23 to enable PCIX hw bug fix */
15208			tp->dma_rwctrl |=
15209				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15210				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15211				(1 << 23);
15212		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15213			/* 5780 always in PCIX mode */
15214			tp->dma_rwctrl |= 0x00144000;
15215		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15216			/* 5714 always in PCIX mode */
15217			tp->dma_rwctrl |= 0x00148000;
15218		} else {
15219			tp->dma_rwctrl |= 0x001b000f;
15220		}
15221	}
15222
15223	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15224	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15225		tp->dma_rwctrl &= 0xfffffff0;
15226
15227	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15228	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15229		/* Remove this if it causes problems for some boards. */
15230		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15231
15232		/* On 5700/5701 chips, we need to set this bit.
15233		 * Otherwise the chip will issue cacheline transactions
15234		 * to streamable DMA memory with not all the byte
15235		 * enables turned on.  This is an error on several
15236		 * RISC PCI controllers, in particular sparc64.
15237		 *
15238		 * On 5703/5704 chips, this bit has been reassigned
15239		 * a different meaning.  In particular, it is used
15240		 * on those chips to enable a PCI-X workaround.
15241		 */
15242		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15243	}
15244
15245	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15246
15247#if 0
15248	/* Unneeded, already done by tg3_get_invariants.  */
15249	tg3_switch_clocks(tp);
15250#endif
15251
15252	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15253	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15254		goto out;
15255
15256	/* It is best to perform DMA test with maximum write burst size
15257	 * to expose the 5700/5701 write DMA bug.
15258	 */
15259	saved_dma_rwctrl = tp->dma_rwctrl;
15260	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15261	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15262
15263	while (1) {
15264		u32 *p = buf, i;
15265
15266		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15267			p[i] = i;
15268
15269		/* Send the buffer to the chip. */
15270		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15271		if (ret) {
15272			dev_err(&tp->pdev->dev,
15273				"%s: Buffer write failed. err = %d\n",
15274				__func__, ret);
15275			break;
15276		}
15277
15278#if 0
15279		/* validate data reached card RAM correctly. */
15280		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15281			u32 val;
15282			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15283			if (le32_to_cpu(val) != p[i]) {
15284				dev_err(&tp->pdev->dev,
15285					"%s: Buffer corrupted on device! "
15286					"(%d != %d)\n", __func__, val, i);
15287				/* ret = -ENODEV here? */
15288			}
15289			p[i] = 0;
15290		}
15291#endif
15292		/* Now read it back. */
15293		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15294		if (ret) {
15295			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15296				"err = %d\n", __func__, ret);
15297			break;
15298		}
15299
15300		/* Verify it. */
15301		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15302			if (p[i] == i)
15303				continue;
15304
15305			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15306			    DMA_RWCTRL_WRITE_BNDRY_16) {
15307				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15308				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15309				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15310				break;
15311			} else {
15312				dev_err(&tp->pdev->dev,
15313					"%s: Buffer corrupted on read back! "
15314					"(%d != %d)\n", __func__, p[i], i);
15315				ret = -ENODEV;
15316				goto out;
15317			}
15318		}
15319
15320		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15321			/* Success. */
15322			ret = 0;
15323			break;
15324		}
15325	}
15326	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15327	    DMA_RWCTRL_WRITE_BNDRY_16) {
15328		/* DMA test passed without adjusting DMA boundary,
15329		 * now look for chipsets that are known to expose the
15330		 * DMA bug without failing the test.
15331		 */
15332		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15333			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15334			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15335		} else {
15336			/* Safe to use the calculated DMA boundary. */
15337			tp->dma_rwctrl = saved_dma_rwctrl;
15338		}
15339
15340		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15341	}
15342
15343out:
15344	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15345out_nofree:
15346	return ret;
15347}
15348
15349static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15350{
15351	if (tg3_flag(tp, 57765_PLUS)) {
15352		tp->bufmgr_config.mbuf_read_dma_low_water =
15353			DEFAULT_MB_RDMA_LOW_WATER_5705;
15354		tp->bufmgr_config.mbuf_mac_rx_low_water =
15355			DEFAULT_MB_MACRX_LOW_WATER_57765;
15356		tp->bufmgr_config.mbuf_high_water =
15357			DEFAULT_MB_HIGH_WATER_57765;
15358
15359		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15360			DEFAULT_MB_RDMA_LOW_WATER_5705;
15361		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15362			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15363		tp->bufmgr_config.mbuf_high_water_jumbo =
15364			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15365	} else if (tg3_flag(tp, 5705_PLUS)) {
15366		tp->bufmgr_config.mbuf_read_dma_low_water =
15367			DEFAULT_MB_RDMA_LOW_WATER_5705;
15368		tp->bufmgr_config.mbuf_mac_rx_low_water =
15369			DEFAULT_MB_MACRX_LOW_WATER_5705;
15370		tp->bufmgr_config.mbuf_high_water =
15371			DEFAULT_MB_HIGH_WATER_5705;
15372		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15373			tp->bufmgr_config.mbuf_mac_rx_low_water =
15374				DEFAULT_MB_MACRX_LOW_WATER_5906;
15375			tp->bufmgr_config.mbuf_high_water =
15376				DEFAULT_MB_HIGH_WATER_5906;
15377		}
15378
15379		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15380			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15381		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15382			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15383		tp->bufmgr_config.mbuf_high_water_jumbo =
15384			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15385	} else {
15386		tp->bufmgr_config.mbuf_read_dma_low_water =
15387			DEFAULT_MB_RDMA_LOW_WATER;
15388		tp->bufmgr_config.mbuf_mac_rx_low_water =
15389			DEFAULT_MB_MACRX_LOW_WATER;
15390		tp->bufmgr_config.mbuf_high_water =
15391			DEFAULT_MB_HIGH_WATER;
15392
15393		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15394			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15395		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15396			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15397		tp->bufmgr_config.mbuf_high_water_jumbo =
15398			DEFAULT_MB_HIGH_WATER_JUMBO;
15399	}
15400
15401	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15402	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15403}
15404
15405static char * __devinit tg3_phy_string(struct tg3 *tp)
15406{
15407	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15408	case TG3_PHY_ID_BCM5400:	return "5400";
15409	case TG3_PHY_ID_BCM5401:	return "5401";
15410	case TG3_PHY_ID_BCM5411:	return "5411";
15411	case TG3_PHY_ID_BCM5701:	return "5701";
15412	case TG3_PHY_ID_BCM5703:	return "5703";
15413	case TG3_PHY_ID_BCM5704:	return "5704";
15414	case TG3_PHY_ID_BCM5705:	return "5705";
15415	case TG3_PHY_ID_BCM5750:	return "5750";
15416	case TG3_PHY_ID_BCM5752:	return "5752";
15417	case TG3_PHY_ID_BCM5714:	return "5714";
15418	case TG3_PHY_ID_BCM5780:	return "5780";
15419	case TG3_PHY_ID_BCM5755:	return "5755";
15420	case TG3_PHY_ID_BCM5787:	return "5787";
15421	case TG3_PHY_ID_BCM5784:	return "5784";
15422	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15423	case TG3_PHY_ID_BCM5906:	return "5906";
15424	case TG3_PHY_ID_BCM5761:	return "5761";
15425	case TG3_PHY_ID_BCM5718C:	return "5718C";
15426	case TG3_PHY_ID_BCM5718S:	return "5718S";
15427	case TG3_PHY_ID_BCM57765:	return "57765";
15428	case TG3_PHY_ID_BCM5719C:	return "5719C";
15429	case TG3_PHY_ID_BCM5720C:	return "5720C";
15430	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15431	case 0:			return "serdes";
15432	default:		return "unknown";
15433	}
15434}
15435
15436static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15437{
15438	if (tg3_flag(tp, PCI_EXPRESS)) {
15439		strcpy(str, "PCI Express");
15440		return str;
15441	} else if (tg3_flag(tp, PCIX_MODE)) {
15442		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15443
15444		strcpy(str, "PCIX:");
15445
15446		if ((clock_ctrl == 7) ||
15447		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15448		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15449			strcat(str, "133MHz");
15450		else if (clock_ctrl == 0)
15451			strcat(str, "33MHz");
15452		else if (clock_ctrl == 2)
15453			strcat(str, "50MHz");
15454		else if (clock_ctrl == 4)
15455			strcat(str, "66MHz");
15456		else if (clock_ctrl == 6)
15457			strcat(str, "100MHz");
15458	} else {
15459		strcpy(str, "PCI:");
15460		if (tg3_flag(tp, PCI_HIGH_SPEED))
15461			strcat(str, "66MHz");
15462		else
15463			strcat(str, "33MHz");
15464	}
15465	if (tg3_flag(tp, PCI_32BIT))
15466		strcat(str, ":32-bit");
15467	else
15468		strcat(str, ":64-bit");
15469	return str;
15470}
15471
15472static void __devinit tg3_init_coal(struct tg3 *tp)
15473{
15474	struct ethtool_coalesce *ec = &tp->coal;
15475
15476	memset(ec, 0, sizeof(*ec));
15477	ec->cmd = ETHTOOL_GCOALESCE;
15478	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15479	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15480	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15481	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15482	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15483	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15484	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15485	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15486	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15487
15488	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15489				 HOSTCC_MODE_CLRTICK_TXBD)) {
15490		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15491		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15492		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15493		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15494	}
15495
15496	if (tg3_flag(tp, 5705_PLUS)) {
15497		ec->rx_coalesce_usecs_irq = 0;
15498		ec->tx_coalesce_usecs_irq = 0;
15499		ec->stats_block_coalesce_usecs = 0;
15500	}
15501}
15502
15503static int __devinit tg3_init_one(struct pci_dev *pdev,
15504				  const struct pci_device_id *ent)
15505{
15506	struct net_device *dev;
15507	struct tg3 *tp;
15508	int i, err, pm_cap;
15509	u32 sndmbx, rcvmbx, intmbx;
15510	char str[40];
15511	u64 dma_mask, persist_dma_mask;
15512	netdev_features_t features = 0;
15513
15514	printk_once(KERN_INFO "%s\n", version);
15515
15516	err = pci_enable_device(pdev);
15517	if (err) {
15518		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15519		return err;
15520	}
15521
15522	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15523	if (err) {
15524		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15525		goto err_out_disable_pdev;
15526	}
15527
15528	pci_set_master(pdev);
15529
15530	/* Find power-management capability. */
15531	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15532	if (pm_cap == 0) {
15533		dev_err(&pdev->dev,
15534			"Cannot find Power Management capability, aborting\n");
15535		err = -EIO;
15536		goto err_out_free_res;
15537	}
15538
15539	err = pci_set_power_state(pdev, PCI_D0);
15540	if (err) {
15541		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15542		goto err_out_free_res;
15543	}
15544
15545	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15546	if (!dev) {
15547		err = -ENOMEM;
15548		goto err_out_power_down;
15549	}
15550
15551	SET_NETDEV_DEV(dev, &pdev->dev);
15552
15553	tp = netdev_priv(dev);
15554	tp->pdev = pdev;
15555	tp->dev = dev;
15556	tp->pm_cap = pm_cap;
15557	tp->rx_mode = TG3_DEF_RX_MODE;
15558	tp->tx_mode = TG3_DEF_TX_MODE;
15559
15560	if (tg3_debug > 0)
15561		tp->msg_enable = tg3_debug;
15562	else
15563		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15564
15565	/* The word/byte swap controls here control register access byte
15566	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15567	 * setting below.
15568	 */
15569	tp->misc_host_ctrl =
15570		MISC_HOST_CTRL_MASK_PCI_INT |
15571		MISC_HOST_CTRL_WORD_SWAP |
15572		MISC_HOST_CTRL_INDIR_ACCESS |
15573		MISC_HOST_CTRL_PCISTATE_RW;
15574
15575	/* The NONFRM (non-frame) byte/word swap controls take effect
15576	 * on descriptor entries, anything which isn't packet data.
15577	 *
15578	 * The StrongARM chips on the board (one for tx, one for rx)
15579	 * are running in big-endian mode.
15580	 */
15581	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15582			GRC_MODE_WSWAP_NONFRM_DATA);
15583#ifdef __BIG_ENDIAN
15584	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15585#endif
15586	spin_lock_init(&tp->lock);
15587	spin_lock_init(&tp->indirect_lock);
15588	INIT_WORK(&tp->reset_task, tg3_reset_task);
15589
15590	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15591	if (!tp->regs) {
15592		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15593		err = -ENOMEM;
15594		goto err_out_free_dev;
15595	}
15596
15597	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15598	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15599	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15600	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15601	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15602	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15603	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15604	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15605		tg3_flag_set(tp, ENABLE_APE);
15606		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15607		if (!tp->aperegs) {
15608			dev_err(&pdev->dev,
15609				"Cannot map APE registers, aborting\n");
15610			err = -ENOMEM;
15611			goto err_out_iounmap;
15612		}
15613	}
15614
15615	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15616	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15617
15618	dev->ethtool_ops = &tg3_ethtool_ops;
15619	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15620	dev->netdev_ops = &tg3_netdev_ops;
15621	dev->irq = pdev->irq;
15622
15623	err = tg3_get_invariants(tp);
15624	if (err) {
15625		dev_err(&pdev->dev,
15626			"Problem fetching invariants of chip, aborting\n");
15627		goto err_out_apeunmap;
15628	}
15629
15630	/* The EPB bridge inside 5714, 5715, and 5780 and any
15631	 * device behind the EPB cannot support DMA addresses > 40-bit.
15632	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15633	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15634	 * do DMA address check in tg3_start_xmit().
15635	 */
15636	if (tg3_flag(tp, IS_5788))
15637		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15638	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15639		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15640#ifdef CONFIG_HIGHMEM
15641		dma_mask = DMA_BIT_MASK(64);
15642#endif
15643	} else
15644		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15645
15646	/* Configure DMA attributes. */
15647	if (dma_mask > DMA_BIT_MASK(32)) {
15648		err = pci_set_dma_mask(pdev, dma_mask);
15649		if (!err) {
15650			features |= NETIF_F_HIGHDMA;
15651			err = pci_set_consistent_dma_mask(pdev,
15652							  persist_dma_mask);
15653			if (err < 0) {
15654				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15655					"DMA for consistent allocations\n");
15656				goto err_out_apeunmap;
15657			}
15658		}
15659	}
15660	if (err || dma_mask == DMA_BIT_MASK(32)) {
15661		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15662		if (err) {
15663			dev_err(&pdev->dev,
15664				"No usable DMA configuration, aborting\n");
15665			goto err_out_apeunmap;
15666		}
15667	}
15668
15669	tg3_init_bufmgr_config(tp);
15670
15671	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15672
15673	/* 5700 B0 chips do not support checksumming correctly due
15674	 * to hardware bugs.
15675	 */
15676	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15677		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15678
15679		if (tg3_flag(tp, 5755_PLUS))
15680			features |= NETIF_F_IPV6_CSUM;
15681	}
15682
15683	/* TSO is on by default on chips that support hardware TSO.
15684	 * Firmware TSO on older chips gives lower performance, so it
15685	 * is off by default, but can be enabled using ethtool.
15686	 */
15687	if ((tg3_flag(tp, HW_TSO_1) ||
15688	     tg3_flag(tp, HW_TSO_2) ||
15689	     tg3_flag(tp, HW_TSO_3)) &&
15690	    (features & NETIF_F_IP_CSUM))
15691		features |= NETIF_F_TSO;
15692	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15693		if (features & NETIF_F_IPV6_CSUM)
15694			features |= NETIF_F_TSO6;
15695		if (tg3_flag(tp, HW_TSO_3) ||
15696		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15697		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15698		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15699		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15700		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15701			features |= NETIF_F_TSO_ECN;
15702	}
15703
15704	dev->features |= features;
15705	dev->vlan_features |= features;
15706
15707	/*
15708	 * Add loopback capability only for a subset of devices that support
15709	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15710	 * loopback for the remaining devices.
15711	 */
15712	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15713	    !tg3_flag(tp, CPMU_PRESENT))
15714		/* Add the loopback capability */
15715		features |= NETIF_F_LOOPBACK;
15716
15717	dev->hw_features |= features;
15718
15719	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15720	    !tg3_flag(tp, TSO_CAPABLE) &&
15721	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15722		tg3_flag_set(tp, MAX_RXPEND_64);
15723		tp->rx_pending = 63;
15724	}
15725
15726	err = tg3_get_device_address(tp);
15727	if (err) {
15728		dev_err(&pdev->dev,
15729			"Could not obtain valid ethernet address, aborting\n");
15730		goto err_out_apeunmap;
15731	}
15732
15733	/*
15734	 * Reset chip in case UNDI or EFI driver did not shutdown
15735	 * DMA self test will enable WDMAC and we'll see (spurious)
15736	 * pending DMA on the PCI bus at that point.
15737	 */
15738	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15739	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15740		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15741		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15742	}
15743
15744	err = tg3_test_dma(tp);
15745	if (err) {
15746		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15747		goto err_out_apeunmap;
15748	}
15749
15750	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15751	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15752	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15753	for (i = 0; i < tp->irq_max; i++) {
15754		struct tg3_napi *tnapi = &tp->napi[i];
15755
15756		tnapi->tp = tp;
15757		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15758
15759		tnapi->int_mbox = intmbx;
15760		if (i <= 4)
15761			intmbx += 0x8;
15762		else
15763			intmbx += 0x4;
15764
15765		tnapi->consmbox = rcvmbx;
15766		tnapi->prodmbox = sndmbx;
15767
15768		if (i)
15769			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15770		else
15771			tnapi->coal_now = HOSTCC_MODE_NOW;
15772
15773		if (!tg3_flag(tp, SUPPORT_MSIX))
15774			break;
15775
15776		/*
15777		 * If we support MSIX, we'll be using RSS.  If we're using
15778		 * RSS, the first vector only handles link interrupts and the
15779		 * remaining vectors handle rx and tx interrupts.  Reuse the
15780		 * mailbox values for the next iteration.  The values we setup
15781		 * above are still useful for the single vectored mode.
15782		 */
15783		if (!i)
15784			continue;
15785
15786		rcvmbx += 0x8;
15787
15788		if (sndmbx & 0x4)
15789			sndmbx -= 0x4;
15790		else
15791			sndmbx += 0xc;
15792	}
15793
15794	tg3_init_coal(tp);
15795
15796	pci_set_drvdata(pdev, dev);
15797
15798	if (tg3_flag(tp, 5717_PLUS)) {
15799		/* Resume a low-power mode */
15800		tg3_frob_aux_power(tp, false);
15801	}
15802
15803	tg3_timer_init(tp);
15804
15805	err = register_netdev(dev);
15806	if (err) {
15807		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15808		goto err_out_apeunmap;
15809	}
15810
15811	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15812		    tp->board_part_number,
15813		    tp->pci_chip_rev_id,
15814		    tg3_bus_string(tp, str),
15815		    dev->dev_addr);
15816
15817	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15818		struct phy_device *phydev;
15819		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15820		netdev_info(dev,
15821			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15822			    phydev->drv->name, dev_name(&phydev->dev));
15823	} else {
15824		char *ethtype;
15825
15826		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15827			ethtype = "10/100Base-TX";
15828		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15829			ethtype = "1000Base-SX";
15830		else
15831			ethtype = "10/100/1000Base-T";
15832
15833		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15834			    "(WireSpeed[%d], EEE[%d])\n",
15835			    tg3_phy_string(tp), ethtype,
15836			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15837			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15838	}
15839
15840	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15841		    (dev->features & NETIF_F_RXCSUM) != 0,
15842		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15843		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15844		    tg3_flag(tp, ENABLE_ASF) != 0,
15845		    tg3_flag(tp, TSO_CAPABLE) != 0);
15846	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15847		    tp->dma_rwctrl,
15848		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15849		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15850
15851	pci_save_state(pdev);
15852
15853	return 0;
15854
15855err_out_apeunmap:
15856	if (tp->aperegs) {
15857		iounmap(tp->aperegs);
15858		tp->aperegs = NULL;
15859	}
15860
15861err_out_iounmap:
15862	if (tp->regs) {
15863		iounmap(tp->regs);
15864		tp->regs = NULL;
15865	}
15866
15867err_out_free_dev:
15868	free_netdev(dev);
15869
15870err_out_power_down:
15871	pci_set_power_state(pdev, PCI_D3hot);
15872
15873err_out_free_res:
15874	pci_release_regions(pdev);
15875
15876err_out_disable_pdev:
15877	pci_disable_device(pdev);
15878	pci_set_drvdata(pdev, NULL);
15879	return err;
15880}
15881
15882static void __devexit tg3_remove_one(struct pci_dev *pdev)
15883{
15884	struct net_device *dev = pci_get_drvdata(pdev);
15885
15886	if (dev) {
15887		struct tg3 *tp = netdev_priv(dev);
15888
15889		release_firmware(tp->fw);
15890
15891		tg3_reset_task_cancel(tp);
15892
15893		if (tg3_flag(tp, USE_PHYLIB)) {
15894			tg3_phy_fini(tp);
15895			tg3_mdio_fini(tp);
15896		}
15897
15898		unregister_netdev(dev);
15899		if (tp->aperegs) {
15900			iounmap(tp->aperegs);
15901			tp->aperegs = NULL;
15902		}
15903		if (tp->regs) {
15904			iounmap(tp->regs);
15905			tp->regs = NULL;
15906		}
15907		free_netdev(dev);
15908		pci_release_regions(pdev);
15909		pci_disable_device(pdev);
15910		pci_set_drvdata(pdev, NULL);
15911	}
15912}
15913
15914#ifdef CONFIG_PM_SLEEP
15915static int tg3_suspend(struct device *device)
15916{
15917	struct pci_dev *pdev = to_pci_dev(device);
15918	struct net_device *dev = pci_get_drvdata(pdev);
15919	struct tg3 *tp = netdev_priv(dev);
15920	int err;
15921
15922	if (!netif_running(dev))
15923		return 0;
15924
15925	tg3_reset_task_cancel(tp);
15926	tg3_phy_stop(tp);
15927	tg3_netif_stop(tp);
15928
15929	tg3_timer_stop(tp);
15930
15931	tg3_full_lock(tp, 1);
15932	tg3_disable_ints(tp);
15933	tg3_full_unlock(tp);
15934
15935	netif_device_detach(dev);
15936
15937	tg3_full_lock(tp, 0);
15938	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15939	tg3_flag_clear(tp, INIT_COMPLETE);
15940	tg3_full_unlock(tp);
15941
15942	err = tg3_power_down_prepare(tp);
15943	if (err) {
15944		int err2;
15945
15946		tg3_full_lock(tp, 0);
15947
15948		tg3_flag_set(tp, INIT_COMPLETE);
15949		err2 = tg3_restart_hw(tp, 1);
15950		if (err2)
15951			goto out;
15952
15953		tg3_timer_start(tp);
15954
15955		netif_device_attach(dev);
15956		tg3_netif_start(tp);
15957
15958out:
15959		tg3_full_unlock(tp);
15960
15961		if (!err2)
15962			tg3_phy_start(tp);
15963	}
15964
15965	return err;
15966}
15967
15968static int tg3_resume(struct device *device)
15969{
15970	struct pci_dev *pdev = to_pci_dev(device);
15971	struct net_device *dev = pci_get_drvdata(pdev);
15972	struct tg3 *tp = netdev_priv(dev);
15973	int err;
15974
15975	if (!netif_running(dev))
15976		return 0;
15977
15978	netif_device_attach(dev);
15979
15980	tg3_full_lock(tp, 0);
15981
15982	tg3_flag_set(tp, INIT_COMPLETE);
15983	err = tg3_restart_hw(tp, 1);
15984	if (err)
15985		goto out;
15986
15987	tg3_timer_start(tp);
15988
15989	tg3_netif_start(tp);
15990
15991out:
15992	tg3_full_unlock(tp);
15993
15994	if (!err)
15995		tg3_phy_start(tp);
15996
15997	return err;
15998}
15999
16000static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16001#define TG3_PM_OPS (&tg3_pm_ops)
16002
16003#else
16004
16005#define TG3_PM_OPS NULL
16006
16007#endif /* CONFIG_PM_SLEEP */
16008
16009/**
16010 * tg3_io_error_detected - called when PCI error is detected
16011 * @pdev: Pointer to PCI device
16012 * @state: The current pci connection state
16013 *
16014 * This function is called after a PCI bus error affecting
16015 * this device has been detected.
16016 */
16017static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16018					      pci_channel_state_t state)
16019{
16020	struct net_device *netdev = pci_get_drvdata(pdev);
16021	struct tg3 *tp = netdev_priv(netdev);
16022	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16023
16024	netdev_info(netdev, "PCI I/O error detected\n");
16025
16026	rtnl_lock();
16027
16028	if (!netif_running(netdev))
16029		goto done;
16030
16031	tg3_phy_stop(tp);
16032
16033	tg3_netif_stop(tp);
16034
16035	tg3_timer_stop(tp);
16036
16037	/* Want to make sure that the reset task doesn't run */
16038	tg3_reset_task_cancel(tp);
16039
16040	netif_device_detach(netdev);
16041
16042	/* Clean up software state, even if MMIO is blocked */
16043	tg3_full_lock(tp, 0);
16044	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16045	tg3_full_unlock(tp);
16046
16047done:
16048	if (state == pci_channel_io_perm_failure)
16049		err = PCI_ERS_RESULT_DISCONNECT;
16050	else
16051		pci_disable_device(pdev);
16052
16053	rtnl_unlock();
16054
16055	return err;
16056}
16057
16058/**
16059 * tg3_io_slot_reset - called after the pci bus has been reset.
16060 * @pdev: Pointer to PCI device
16061 *
16062 * Restart the card from scratch, as if from a cold-boot.
16063 * At this point, the card has exprienced a hard reset,
16064 * followed by fixups by BIOS, and has its config space
16065 * set up identically to what it was at cold boot.
16066 */
16067static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16068{
16069	struct net_device *netdev = pci_get_drvdata(pdev);
16070	struct tg3 *tp = netdev_priv(netdev);
16071	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16072	int err;
16073
16074	rtnl_lock();
16075
16076	if (pci_enable_device(pdev)) {
16077		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16078		goto done;
16079	}
16080
16081	pci_set_master(pdev);
16082	pci_restore_state(pdev);
16083	pci_save_state(pdev);
16084
16085	if (!netif_running(netdev)) {
16086		rc = PCI_ERS_RESULT_RECOVERED;
16087		goto done;
16088	}
16089
16090	err = tg3_power_up(tp);
16091	if (err)
16092		goto done;
16093
16094	rc = PCI_ERS_RESULT_RECOVERED;
16095
16096done:
16097	rtnl_unlock();
16098
16099	return rc;
16100}
16101
16102/**
16103 * tg3_io_resume - called when traffic can start flowing again.
16104 * @pdev: Pointer to PCI device
16105 *
16106 * This callback is called when the error recovery driver tells
16107 * us that its OK to resume normal operation.
16108 */
16109static void tg3_io_resume(struct pci_dev *pdev)
16110{
16111	struct net_device *netdev = pci_get_drvdata(pdev);
16112	struct tg3 *tp = netdev_priv(netdev);
16113	int err;
16114
16115	rtnl_lock();
16116
16117	if (!netif_running(netdev))
16118		goto done;
16119
16120	tg3_full_lock(tp, 0);
16121	tg3_flag_set(tp, INIT_COMPLETE);
16122	err = tg3_restart_hw(tp, 1);
16123	tg3_full_unlock(tp);
16124	if (err) {
16125		netdev_err(netdev, "Cannot restart hardware after reset.\n");
16126		goto done;
16127	}
16128
16129	netif_device_attach(netdev);
16130
16131	tg3_timer_start(tp);
16132
16133	tg3_netif_start(tp);
16134
16135	tg3_phy_start(tp);
16136
16137done:
16138	rtnl_unlock();
16139}
16140
16141static struct pci_error_handlers tg3_err_handler = {
16142	.error_detected	= tg3_io_error_detected,
16143	.slot_reset	= tg3_io_slot_reset,
16144	.resume		= tg3_io_resume
16145};
16146
16147static struct pci_driver tg3_driver = {
16148	.name		= DRV_MODULE_NAME,
16149	.id_table	= tg3_pci_tbl,
16150	.probe		= tg3_init_one,
16151	.remove		= __devexit_p(tg3_remove_one),
16152	.err_handler	= &tg3_err_handler,
16153	.driver.pm	= TG3_PM_OPS,
16154};
16155
16156static int __init tg3_init(void)
16157{
16158	return pci_register_driver(&tg3_driver);
16159}
16160
16161static void __exit tg3_cleanup(void)
16162{
16163	pci_unregister_driver(&tg3_driver);
16164}
16165
16166module_init(tg3_init);
16167module_exit(tg3_cleanup);