Linux Audio

Check our new training course

Loading...
v3.15
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik		: For pointing out the improper error condition
  15 *			  check in the s2io_xmit routine and also some
  16 *			  issues in the Tx watch dog function. Also for
  17 *			  patiently answering all those innumerable
  18 *			  questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
  20 *			  macros available only in 2.6 Kernel.
  21 * Francois Romieu	: For pointing out all code part that were
  22 *			  deprecated and also styling related comments.
  23 * Grant Grundler	: For helping me get rid of some Architecture
  24 *			  dependent code.
  25 * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *		values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
  45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
  46 *      Possible values '1' for enable and '0' for disable. Default is '0'
  47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  48 *                 Possible values '1' for enable , '0' for disable.
  49 *                 Default is '2' - which means disable in promisc mode
  50 *                 and enable in non-promiscuous mode.
  51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  52 *      Possible values '1' for enable and '0' for disable. Default is '0'
  53 ************************************************************************/
  54
  55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  56
  57#include <linux/module.h>
  58#include <linux/types.h>
  59#include <linux/errno.h>
  60#include <linux/ioport.h>
  61#include <linux/pci.h>
  62#include <linux/dma-mapping.h>
  63#include <linux/kernel.h>
  64#include <linux/netdevice.h>
  65#include <linux/etherdevice.h>
  66#include <linux/mdio.h>
  67#include <linux/skbuff.h>
  68#include <linux/init.h>
  69#include <linux/delay.h>
  70#include <linux/stddef.h>
  71#include <linux/ioctl.h>
  72#include <linux/timex.h>
  73#include <linux/ethtool.h>
  74#include <linux/workqueue.h>
  75#include <linux/if_vlan.h>
  76#include <linux/ip.h>
  77#include <linux/tcp.h>
  78#include <linux/uaccess.h>
  79#include <linux/io.h>
 
  80#include <linux/slab.h>
  81#include <linux/prefetch.h>
  82#include <net/tcp.h>
  83#include <net/checksum.h>
  84
  85#include <asm/div64.h>
  86#include <asm/irq.h>
  87
  88/* local include */
  89#include "s2io.h"
  90#include "s2io-regs.h"
  91
  92#define DRV_VERSION "2.0.26.28"
  93
  94/* S2io Driver name & version. */
  95static const char s2io_driver_name[] = "Neterion";
  96static const char s2io_driver_version[] = DRV_VERSION;
  97
  98static const int rxd_size[2] = {32, 48};
  99static const int rxd_count[2] = {127, 85};
 100
 101static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 102{
 103	int ret;
 104
 105	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 106	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 107
 108	return ret;
 109}
 110
 111/*
 112 * Cards with following subsystem_id have a link state indication
 113 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 114 * macro below identifies these cards given the subsystem_id.
 115 */
 116#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
 117	(dev_type == XFRAME_I_DEVICE) ?					\
 118	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
 119	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 120
 121#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 122				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 123
 124static inline int is_s2io_card_up(const struct s2io_nic *sp)
 125{
 126	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 127}
 128
 129/* Ethtool related variables and Macros. */
 130static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 131	"Register test\t(offline)",
 132	"Eeprom test\t(offline)",
 133	"Link test\t(online)",
 134	"RLDRAM test\t(offline)",
 135	"BIST Test\t(offline)"
 136};
 137
 138static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 139	{"tmac_frms"},
 140	{"tmac_data_octets"},
 141	{"tmac_drop_frms"},
 142	{"tmac_mcst_frms"},
 143	{"tmac_bcst_frms"},
 144	{"tmac_pause_ctrl_frms"},
 145	{"tmac_ttl_octets"},
 146	{"tmac_ucst_frms"},
 147	{"tmac_nucst_frms"},
 148	{"tmac_any_err_frms"},
 149	{"tmac_ttl_less_fb_octets"},
 150	{"tmac_vld_ip_octets"},
 151	{"tmac_vld_ip"},
 152	{"tmac_drop_ip"},
 153	{"tmac_icmp"},
 154	{"tmac_rst_tcp"},
 155	{"tmac_tcp"},
 156	{"tmac_udp"},
 157	{"rmac_vld_frms"},
 158	{"rmac_data_octets"},
 159	{"rmac_fcs_err_frms"},
 160	{"rmac_drop_frms"},
 161	{"rmac_vld_mcst_frms"},
 162	{"rmac_vld_bcst_frms"},
 163	{"rmac_in_rng_len_err_frms"},
 164	{"rmac_out_rng_len_err_frms"},
 165	{"rmac_long_frms"},
 166	{"rmac_pause_ctrl_frms"},
 167	{"rmac_unsup_ctrl_frms"},
 168	{"rmac_ttl_octets"},
 169	{"rmac_accepted_ucst_frms"},
 170	{"rmac_accepted_nucst_frms"},
 171	{"rmac_discarded_frms"},
 172	{"rmac_drop_events"},
 173	{"rmac_ttl_less_fb_octets"},
 174	{"rmac_ttl_frms"},
 175	{"rmac_usized_frms"},
 176	{"rmac_osized_frms"},
 177	{"rmac_frag_frms"},
 178	{"rmac_jabber_frms"},
 179	{"rmac_ttl_64_frms"},
 180	{"rmac_ttl_65_127_frms"},
 181	{"rmac_ttl_128_255_frms"},
 182	{"rmac_ttl_256_511_frms"},
 183	{"rmac_ttl_512_1023_frms"},
 184	{"rmac_ttl_1024_1518_frms"},
 185	{"rmac_ip"},
 186	{"rmac_ip_octets"},
 187	{"rmac_hdr_err_ip"},
 188	{"rmac_drop_ip"},
 189	{"rmac_icmp"},
 190	{"rmac_tcp"},
 191	{"rmac_udp"},
 192	{"rmac_err_drp_udp"},
 193	{"rmac_xgmii_err_sym"},
 194	{"rmac_frms_q0"},
 195	{"rmac_frms_q1"},
 196	{"rmac_frms_q2"},
 197	{"rmac_frms_q3"},
 198	{"rmac_frms_q4"},
 199	{"rmac_frms_q5"},
 200	{"rmac_frms_q6"},
 201	{"rmac_frms_q7"},
 202	{"rmac_full_q0"},
 203	{"rmac_full_q1"},
 204	{"rmac_full_q2"},
 205	{"rmac_full_q3"},
 206	{"rmac_full_q4"},
 207	{"rmac_full_q5"},
 208	{"rmac_full_q6"},
 209	{"rmac_full_q7"},
 210	{"rmac_pause_cnt"},
 211	{"rmac_xgmii_data_err_cnt"},
 212	{"rmac_xgmii_ctrl_err_cnt"},
 213	{"rmac_accepted_ip"},
 214	{"rmac_err_tcp"},
 215	{"rd_req_cnt"},
 216	{"new_rd_req_cnt"},
 217	{"new_rd_req_rtry_cnt"},
 218	{"rd_rtry_cnt"},
 219	{"wr_rtry_rd_ack_cnt"},
 220	{"wr_req_cnt"},
 221	{"new_wr_req_cnt"},
 222	{"new_wr_req_rtry_cnt"},
 223	{"wr_rtry_cnt"},
 224	{"wr_disc_cnt"},
 225	{"rd_rtry_wr_ack_cnt"},
 226	{"txp_wr_cnt"},
 227	{"txd_rd_cnt"},
 228	{"txd_wr_cnt"},
 229	{"rxd_rd_cnt"},
 230	{"rxd_wr_cnt"},
 231	{"txf_rd_cnt"},
 232	{"rxf_wr_cnt"}
 233};
 234
 235static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 236	{"rmac_ttl_1519_4095_frms"},
 237	{"rmac_ttl_4096_8191_frms"},
 238	{"rmac_ttl_8192_max_frms"},
 239	{"rmac_ttl_gt_max_frms"},
 240	{"rmac_osized_alt_frms"},
 241	{"rmac_jabber_alt_frms"},
 242	{"rmac_gt_max_alt_frms"},
 243	{"rmac_vlan_frms"},
 244	{"rmac_len_discard"},
 245	{"rmac_fcs_discard"},
 246	{"rmac_pf_discard"},
 247	{"rmac_da_discard"},
 248	{"rmac_red_discard"},
 249	{"rmac_rts_discard"},
 250	{"rmac_ingm_full_discard"},
 251	{"link_fault_cnt"}
 252};
 253
 254static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 255	{"\n DRIVER STATISTICS"},
 256	{"single_bit_ecc_errs"},
 257	{"double_bit_ecc_errs"},
 258	{"parity_err_cnt"},
 259	{"serious_err_cnt"},
 260	{"soft_reset_cnt"},
 261	{"fifo_full_cnt"},
 262	{"ring_0_full_cnt"},
 263	{"ring_1_full_cnt"},
 264	{"ring_2_full_cnt"},
 265	{"ring_3_full_cnt"},
 266	{"ring_4_full_cnt"},
 267	{"ring_5_full_cnt"},
 268	{"ring_6_full_cnt"},
 269	{"ring_7_full_cnt"},
 270	{"alarm_transceiver_temp_high"},
 271	{"alarm_transceiver_temp_low"},
 272	{"alarm_laser_bias_current_high"},
 273	{"alarm_laser_bias_current_low"},
 274	{"alarm_laser_output_power_high"},
 275	{"alarm_laser_output_power_low"},
 276	{"warn_transceiver_temp_high"},
 277	{"warn_transceiver_temp_low"},
 278	{"warn_laser_bias_current_high"},
 279	{"warn_laser_bias_current_low"},
 280	{"warn_laser_output_power_high"},
 281	{"warn_laser_output_power_low"},
 282	{"lro_aggregated_pkts"},
 283	{"lro_flush_both_count"},
 284	{"lro_out_of_sequence_pkts"},
 285	{"lro_flush_due_to_max_pkts"},
 286	{"lro_avg_aggr_pkts"},
 287	{"mem_alloc_fail_cnt"},
 288	{"pci_map_fail_cnt"},
 289	{"watchdog_timer_cnt"},
 290	{"mem_allocated"},
 291	{"mem_freed"},
 292	{"link_up_cnt"},
 293	{"link_down_cnt"},
 294	{"link_up_time"},
 295	{"link_down_time"},
 296	{"tx_tcode_buf_abort_cnt"},
 297	{"tx_tcode_desc_abort_cnt"},
 298	{"tx_tcode_parity_err_cnt"},
 299	{"tx_tcode_link_loss_cnt"},
 300	{"tx_tcode_list_proc_err_cnt"},
 301	{"rx_tcode_parity_err_cnt"},
 302	{"rx_tcode_abort_cnt"},
 303	{"rx_tcode_parity_abort_cnt"},
 304	{"rx_tcode_rda_fail_cnt"},
 305	{"rx_tcode_unkn_prot_cnt"},
 306	{"rx_tcode_fcs_err_cnt"},
 307	{"rx_tcode_buf_size_err_cnt"},
 308	{"rx_tcode_rxd_corrupt_cnt"},
 309	{"rx_tcode_unkn_err_cnt"},
 310	{"tda_err_cnt"},
 311	{"pfc_err_cnt"},
 312	{"pcc_err_cnt"},
 313	{"tti_err_cnt"},
 314	{"tpa_err_cnt"},
 315	{"sm_err_cnt"},
 316	{"lso_err_cnt"},
 317	{"mac_tmac_err_cnt"},
 318	{"mac_rmac_err_cnt"},
 319	{"xgxs_txgxs_err_cnt"},
 320	{"xgxs_rxgxs_err_cnt"},
 321	{"rc_err_cnt"},
 322	{"prc_pcix_err_cnt"},
 323	{"rpa_err_cnt"},
 324	{"rda_err_cnt"},
 325	{"rti_err_cnt"},
 326	{"mc_err_cnt"}
 327};
 328
 329#define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
 330#define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
 331#define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
 332
 333#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 334#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 335
 336#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 337#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 338
 339#define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
 340#define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
 341
 342#define S2IO_TIMER_CONF(timer, handle, arg, exp)	\
 343	init_timer(&timer);				\
 344	timer.function = handle;			\
 345	timer.data = (unsigned long)arg;		\
 346	mod_timer(&timer, (jiffies + exp))		\
 347
 348/* copy mac addr to def_mac_addr array */
 349static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 350{
 351	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 352	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 353	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 354	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 355	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 356	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 357}
 358
 359/*
 360 * Constants to be programmed into the Xena's registers, to configure
 361 * the XAUI.
 362 */
 363
 364#define	END_SIGN	0x0
 365static const u64 herc_act_dtx_cfg[] = {
 366	/* Set address */
 367	0x8000051536750000ULL, 0x80000515367500E0ULL,
 368	/* Write data */
 369	0x8000051536750004ULL, 0x80000515367500E4ULL,
 370	/* Set address */
 371	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 372	/* Write data */
 373	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 374	/* Set address */
 375	0x801205150D440000ULL, 0x801205150D4400E0ULL,
 376	/* Write data */
 377	0x801205150D440004ULL, 0x801205150D4400E4ULL,
 378	/* Set address */
 379	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 380	/* Write data */
 381	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 382	/* Done */
 383	END_SIGN
 384};
 385
 386static const u64 xena_dtx_cfg[] = {
 387	/* Set address */
 388	0x8000051500000000ULL, 0x80000515000000E0ULL,
 389	/* Write data */
 390	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 391	/* Set address */
 392	0x8001051500000000ULL, 0x80010515000000E0ULL,
 393	/* Write data */
 394	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 395	/* Set address */
 396	0x8002051500000000ULL, 0x80020515000000E0ULL,
 397	/* Write data */
 398	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 399	END_SIGN
 400};
 401
 402/*
 403 * Constants for Fixing the MacAddress problem seen mostly on
 404 * Alpha machines.
 405 */
 406static const u64 fix_mac[] = {
 407	0x0060000000000000ULL, 0x0060600000000000ULL,
 408	0x0040600000000000ULL, 0x0000600000000000ULL,
 409	0x0020600000000000ULL, 0x0060600000000000ULL,
 410	0x0020600000000000ULL, 0x0060600000000000ULL,
 411	0x0020600000000000ULL, 0x0060600000000000ULL,
 412	0x0020600000000000ULL, 0x0060600000000000ULL,
 413	0x0020600000000000ULL, 0x0060600000000000ULL,
 414	0x0020600000000000ULL, 0x0060600000000000ULL,
 415	0x0020600000000000ULL, 0x0060600000000000ULL,
 416	0x0020600000000000ULL, 0x0060600000000000ULL,
 417	0x0020600000000000ULL, 0x0060600000000000ULL,
 418	0x0020600000000000ULL, 0x0060600000000000ULL,
 419	0x0020600000000000ULL, 0x0000600000000000ULL,
 420	0x0040600000000000ULL, 0x0060600000000000ULL,
 421	END_SIGN
 422};
 423
 424MODULE_LICENSE("GPL");
 425MODULE_VERSION(DRV_VERSION);
 426
 427
 428/* Module Loadable parameters. */
 429S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 430S2IO_PARM_INT(rx_ring_num, 1);
 431S2IO_PARM_INT(multiq, 0);
 432S2IO_PARM_INT(rx_ring_mode, 1);
 433S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 434S2IO_PARM_INT(rmac_pause_time, 0x100);
 435S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 436S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 437S2IO_PARM_INT(shared_splits, 0);
 438S2IO_PARM_INT(tmac_util_period, 5);
 439S2IO_PARM_INT(rmac_util_period, 5);
 440S2IO_PARM_INT(l3l4hdr_size, 128);
 441/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 442S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 443/* Frequency of Rx desc syncs expressed as power of 2 */
 444S2IO_PARM_INT(rxsync_frequency, 3);
 445/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 446S2IO_PARM_INT(intr_type, 2);
 447/* Large receive offload feature */
 448
 449/* Max pkts to be aggregated by LRO at one time. If not specified,
 450 * aggregation happens until we hit max IP pkt size(64K)
 451 */
 452S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 453S2IO_PARM_INT(indicate_max_pkts, 0);
 454
 455S2IO_PARM_INT(napi, 1);
 456S2IO_PARM_INT(ufo, 0);
 457S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 458
 459static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 460{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 461static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 462{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 463static unsigned int rts_frm_len[MAX_RX_RINGS] =
 464{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 465
 466module_param_array(tx_fifo_len, uint, NULL, 0);
 467module_param_array(rx_ring_sz, uint, NULL, 0);
 468module_param_array(rts_frm_len, uint, NULL, 0);
 469
 470/*
 471 * S2IO device table.
 472 * This table lists all the devices that this driver supports.
 473 */
 474static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
 475	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 476	 PCI_ANY_ID, PCI_ANY_ID},
 477	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 478	 PCI_ANY_ID, PCI_ANY_ID},
 479	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 480	 PCI_ANY_ID, PCI_ANY_ID},
 481	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 482	 PCI_ANY_ID, PCI_ANY_ID},
 483	{0,}
 484};
 485
 486MODULE_DEVICE_TABLE(pci, s2io_tbl);
 487
 488static const struct pci_error_handlers s2io_err_handler = {
 489	.error_detected = s2io_io_error_detected,
 490	.slot_reset = s2io_io_slot_reset,
 491	.resume = s2io_io_resume,
 492};
 493
 494static struct pci_driver s2io_driver = {
 495	.name = "S2IO",
 496	.id_table = s2io_tbl,
 497	.probe = s2io_init_nic,
 498	.remove = s2io_rem_nic,
 499	.err_handler = &s2io_err_handler,
 500};
 501
 502/* A simplifier macro used both by init and free shared_mem Fns(). */
 503#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
 504
 505/* netqueue manipulation helper functions */
 506static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 507{
 508	if (!sp->config.multiq) {
 509		int i;
 510
 511		for (i = 0; i < sp->config.tx_fifo_num; i++)
 512			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 513	}
 514	netif_tx_stop_all_queues(sp->dev);
 515}
 516
 517static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 518{
 519	if (!sp->config.multiq)
 520		sp->mac_control.fifos[fifo_no].queue_state =
 521			FIFO_QUEUE_STOP;
 522
 523	netif_tx_stop_all_queues(sp->dev);
 524}
 525
 526static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 527{
 528	if (!sp->config.multiq) {
 529		int i;
 530
 531		for (i = 0; i < sp->config.tx_fifo_num; i++)
 532			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 533	}
 534	netif_tx_start_all_queues(sp->dev);
 535}
 536
 537static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
 538{
 539	if (!sp->config.multiq)
 540		sp->mac_control.fifos[fifo_no].queue_state =
 541			FIFO_QUEUE_START;
 542
 543	netif_tx_start_all_queues(sp->dev);
 544}
 545
 546static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 547{
 548	if (!sp->config.multiq) {
 549		int i;
 550
 551		for (i = 0; i < sp->config.tx_fifo_num; i++)
 552			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 553	}
 554	netif_tx_wake_all_queues(sp->dev);
 555}
 556
 557static inline void s2io_wake_tx_queue(
 558	struct fifo_info *fifo, int cnt, u8 multiq)
 559{
 560
 561	if (multiq) {
 562		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 563			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 564	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 565		if (netif_queue_stopped(fifo->dev)) {
 566			fifo->queue_state = FIFO_QUEUE_START;
 567			netif_wake_queue(fifo->dev);
 568		}
 569	}
 570}
 571
 572/**
 573 * init_shared_mem - Allocation and Initialization of Memory
 574 * @nic: Device private variable.
 575 * Description: The function allocates all the memory areas shared
 576 * between the NIC and the driver. This includes Tx descriptors,
 577 * Rx descriptors and the statistics block.
 578 */
 579
 580static int init_shared_mem(struct s2io_nic *nic)
 581{
 582	u32 size;
 583	void *tmp_v_addr, *tmp_v_addr_next;
 584	dma_addr_t tmp_p_addr, tmp_p_addr_next;
 585	struct RxD_block *pre_rxd_blk = NULL;
 586	int i, j, blk_cnt;
 587	int lst_size, lst_per_page;
 588	struct net_device *dev = nic->dev;
 589	unsigned long tmp;
 590	struct buffAdd *ba;
 591	struct config_param *config = &nic->config;
 592	struct mac_info *mac_control = &nic->mac_control;
 593	unsigned long long mem_allocated = 0;
 594
 595	/* Allocation and initialization of TXDLs in FIFOs */
 596	size = 0;
 597	for (i = 0; i < config->tx_fifo_num; i++) {
 598		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 599
 600		size += tx_cfg->fifo_len;
 601	}
 602	if (size > MAX_AVAILABLE_TXDS) {
 603		DBG_PRINT(ERR_DBG,
 604			  "Too many TxDs requested: %d, max supported: %d\n",
 605			  size, MAX_AVAILABLE_TXDS);
 606		return -EINVAL;
 607	}
 608
 609	size = 0;
 610	for (i = 0; i < config->tx_fifo_num; i++) {
 611		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 612
 613		size = tx_cfg->fifo_len;
 614		/*
 615		 * Legal values are from 2 to 8192
 616		 */
 617		if (size < 2) {
 618			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 619				  "Valid lengths are 2 through 8192\n",
 620				  i, size);
 621			return -EINVAL;
 622		}
 623	}
 624
 625	lst_size = (sizeof(struct TxD) * config->max_txds);
 626	lst_per_page = PAGE_SIZE / lst_size;
 627
 628	for (i = 0; i < config->tx_fifo_num; i++) {
 629		struct fifo_info *fifo = &mac_control->fifos[i];
 630		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 631		int fifo_len = tx_cfg->fifo_len;
 632		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 633
 634		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 635		if (!fifo->list_info) {
 636			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 637			return -ENOMEM;
 638		}
 639		mem_allocated += list_holder_size;
 640	}
 641	for (i = 0; i < config->tx_fifo_num; i++) {
 642		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 643						lst_per_page);
 644		struct fifo_info *fifo = &mac_control->fifos[i];
 645		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 646
 647		fifo->tx_curr_put_info.offset = 0;
 648		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 649		fifo->tx_curr_get_info.offset = 0;
 650		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 651		fifo->fifo_no = i;
 652		fifo->nic = nic;
 653		fifo->max_txds = MAX_SKB_FRAGS + 2;
 654		fifo->dev = dev;
 655
 656		for (j = 0; j < page_num; j++) {
 657			int k = 0;
 658			dma_addr_t tmp_p;
 659			void *tmp_v;
 660			tmp_v = pci_alloc_consistent(nic->pdev,
 661						     PAGE_SIZE, &tmp_p);
 662			if (!tmp_v) {
 663				DBG_PRINT(INFO_DBG,
 664					  "pci_alloc_consistent failed for TxDL\n");
 665				return -ENOMEM;
 666			}
 667			/* If we got a zero DMA address(can happen on
 668			 * certain platforms like PPC), reallocate.
 669			 * Store virtual address of page we don't want,
 670			 * to be freed later.
 671			 */
 672			if (!tmp_p) {
 673				mac_control->zerodma_virt_addr = tmp_v;
 674				DBG_PRINT(INIT_DBG,
 675					  "%s: Zero DMA address for TxDL. "
 676					  "Virtual address %p\n",
 677					  dev->name, tmp_v);
 678				tmp_v = pci_alloc_consistent(nic->pdev,
 679							     PAGE_SIZE, &tmp_p);
 
 680				if (!tmp_v) {
 681					DBG_PRINT(INFO_DBG,
 682						  "pci_alloc_consistent failed for TxDL\n");
 683					return -ENOMEM;
 684				}
 685				mem_allocated += PAGE_SIZE;
 686			}
 687			while (k < lst_per_page) {
 688				int l = (j * lst_per_page) + k;
 689				if (l == tx_cfg->fifo_len)
 690					break;
 691				fifo->list_info[l].list_virt_addr =
 692					tmp_v + (k * lst_size);
 693				fifo->list_info[l].list_phy_addr =
 694					tmp_p + (k * lst_size);
 695				k++;
 696			}
 697		}
 698	}
 699
 700	for (i = 0; i < config->tx_fifo_num; i++) {
 701		struct fifo_info *fifo = &mac_control->fifos[i];
 702		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 703
 704		size = tx_cfg->fifo_len;
 705		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 706		if (!fifo->ufo_in_band_v)
 707			return -ENOMEM;
 708		mem_allocated += (size * sizeof(u64));
 709	}
 710
 711	/* Allocation and initialization of RXDs in Rings */
 712	size = 0;
 713	for (i = 0; i < config->rx_ring_num; i++) {
 714		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 715		struct ring_info *ring = &mac_control->rings[i];
 716
 717		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 718			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 719				  "multiple of RxDs per Block\n",
 720				  dev->name, i);
 721			return FAILURE;
 722		}
 723		size += rx_cfg->num_rxd;
 724		ring->block_count = rx_cfg->num_rxd /
 725			(rxd_count[nic->rxd_mode] + 1);
 726		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 727	}
 728	if (nic->rxd_mode == RXD_MODE_1)
 729		size = (size * (sizeof(struct RxD1)));
 730	else
 731		size = (size * (sizeof(struct RxD3)));
 732
 733	for (i = 0; i < config->rx_ring_num; i++) {
 734		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 735		struct ring_info *ring = &mac_control->rings[i];
 736
 737		ring->rx_curr_get_info.block_index = 0;
 738		ring->rx_curr_get_info.offset = 0;
 739		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 740		ring->rx_curr_put_info.block_index = 0;
 741		ring->rx_curr_put_info.offset = 0;
 742		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 743		ring->nic = nic;
 744		ring->ring_no = i;
 745
 746		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 747		/*  Allocating all the Rx blocks */
 748		for (j = 0; j < blk_cnt; j++) {
 749			struct rx_block_info *rx_blocks;
 750			int l;
 751
 752			rx_blocks = &ring->rx_blocks[j];
 753			size = SIZE_OF_BLOCK;	/* size is always page size */
 754			tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
 755							  &tmp_p_addr);
 756			if (tmp_v_addr == NULL) {
 757				/*
 758				 * In case of failure, free_shared_mem()
 759				 * is called, which should free any
 760				 * memory that was alloced till the
 761				 * failure happened.
 762				 */
 763				rx_blocks->block_virt_addr = tmp_v_addr;
 764				return -ENOMEM;
 765			}
 766			mem_allocated += size;
 767			memset(tmp_v_addr, 0, size);
 768
 769			size = sizeof(struct rxd_info) *
 770				rxd_count[nic->rxd_mode];
 771			rx_blocks->block_virt_addr = tmp_v_addr;
 772			rx_blocks->block_dma_addr = tmp_p_addr;
 773			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 774			if (!rx_blocks->rxds)
 775				return -ENOMEM;
 776			mem_allocated += size;
 777			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 778				rx_blocks->rxds[l].virt_addr =
 779					rx_blocks->block_virt_addr +
 780					(rxd_size[nic->rxd_mode] * l);
 781				rx_blocks->rxds[l].dma_addr =
 782					rx_blocks->block_dma_addr +
 783					(rxd_size[nic->rxd_mode] * l);
 784			}
 785		}
 786		/* Interlinking all Rx Blocks */
 787		for (j = 0; j < blk_cnt; j++) {
 788			int next = (j + 1) % blk_cnt;
 789			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 790			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 791			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 792			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 793
 794			pre_rxd_blk = tmp_v_addr;
 795			pre_rxd_blk->reserved_2_pNext_RxD_block =
 796				(unsigned long)tmp_v_addr_next;
 797			pre_rxd_blk->pNext_RxD_Blk_physical =
 798				(u64)tmp_p_addr_next;
 799		}
 800	}
 801	if (nic->rxd_mode == RXD_MODE_3B) {
 802		/*
 803		 * Allocation of Storages for buffer addresses in 2BUFF mode
 804		 * and the buffers as well.
 805		 */
 806		for (i = 0; i < config->rx_ring_num; i++) {
 807			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 808			struct ring_info *ring = &mac_control->rings[i];
 809
 810			blk_cnt = rx_cfg->num_rxd /
 811				(rxd_count[nic->rxd_mode] + 1);
 812			size = sizeof(struct buffAdd *) * blk_cnt;
 813			ring->ba = kmalloc(size, GFP_KERNEL);
 814			if (!ring->ba)
 815				return -ENOMEM;
 816			mem_allocated += size;
 817			for (j = 0; j < blk_cnt; j++) {
 818				int k = 0;
 819
 820				size = sizeof(struct buffAdd) *
 821					(rxd_count[nic->rxd_mode] + 1);
 822				ring->ba[j] = kmalloc(size, GFP_KERNEL);
 823				if (!ring->ba[j])
 824					return -ENOMEM;
 825				mem_allocated += size;
 826				while (k != rxd_count[nic->rxd_mode]) {
 827					ba = &ring->ba[j][k];
 828					size = BUF0_LEN + ALIGN_SIZE;
 829					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 830					if (!ba->ba_0_org)
 831						return -ENOMEM;
 832					mem_allocated += size;
 833					tmp = (unsigned long)ba->ba_0_org;
 834					tmp += ALIGN_SIZE;
 835					tmp &= ~((unsigned long)ALIGN_SIZE);
 836					ba->ba_0 = (void *)tmp;
 837
 838					size = BUF1_LEN + ALIGN_SIZE;
 839					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 840					if (!ba->ba_1_org)
 841						return -ENOMEM;
 842					mem_allocated += size;
 843					tmp = (unsigned long)ba->ba_1_org;
 844					tmp += ALIGN_SIZE;
 845					tmp &= ~((unsigned long)ALIGN_SIZE);
 846					ba->ba_1 = (void *)tmp;
 847					k++;
 848				}
 849			}
 850		}
 851	}
 852
 853	/* Allocation and initialization of Statistics block */
 854	size = sizeof(struct stat_block);
 855	mac_control->stats_mem =
 856		pci_alloc_consistent(nic->pdev, size,
 857				     &mac_control->stats_mem_phy);
 858
 859	if (!mac_control->stats_mem) {
 860		/*
 861		 * In case of failure, free_shared_mem() is called, which
 862		 * should free any memory that was alloced till the
 863		 * failure happened.
 864		 */
 865		return -ENOMEM;
 866	}
 867	mem_allocated += size;
 868	mac_control->stats_mem_sz = size;
 869
 870	tmp_v_addr = mac_control->stats_mem;
 871	mac_control->stats_info = tmp_v_addr;
 872	memset(tmp_v_addr, 0, size);
 873	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 874		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 875	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 876	return SUCCESS;
 877}
 878
 879/**
 880 * free_shared_mem - Free the allocated Memory
 881 * @nic:  Device private variable.
 882 * Description: This function is to free all memory locations allocated by
 883 * the init_shared_mem() function and return it to the kernel.
 884 */
 885
 886static void free_shared_mem(struct s2io_nic *nic)
 887{
 888	int i, j, blk_cnt, size;
 889	void *tmp_v_addr;
 890	dma_addr_t tmp_p_addr;
 891	int lst_size, lst_per_page;
 892	struct net_device *dev;
 893	int page_num = 0;
 894	struct config_param *config;
 895	struct mac_info *mac_control;
 896	struct stat_block *stats;
 897	struct swStat *swstats;
 898
 899	if (!nic)
 900		return;
 901
 902	dev = nic->dev;
 903
 904	config = &nic->config;
 905	mac_control = &nic->mac_control;
 906	stats = mac_control->stats_info;
 907	swstats = &stats->sw_stat;
 908
 909	lst_size = sizeof(struct TxD) * config->max_txds;
 910	lst_per_page = PAGE_SIZE / lst_size;
 911
 912	for (i = 0; i < config->tx_fifo_num; i++) {
 913		struct fifo_info *fifo = &mac_control->fifos[i];
 914		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 915
 916		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 917		for (j = 0; j < page_num; j++) {
 918			int mem_blks = (j * lst_per_page);
 919			struct list_info_hold *fli;
 920
 921			if (!fifo->list_info)
 922				return;
 923
 924			fli = &fifo->list_info[mem_blks];
 925			if (!fli->list_virt_addr)
 926				break;
 927			pci_free_consistent(nic->pdev, PAGE_SIZE,
 928					    fli->list_virt_addr,
 929					    fli->list_phy_addr);
 930			swstats->mem_freed += PAGE_SIZE;
 931		}
 932		/* If we got a zero DMA address during allocation,
 933		 * free the page now
 934		 */
 935		if (mac_control->zerodma_virt_addr) {
 936			pci_free_consistent(nic->pdev, PAGE_SIZE,
 937					    mac_control->zerodma_virt_addr,
 938					    (dma_addr_t)0);
 939			DBG_PRINT(INIT_DBG,
 940				  "%s: Freeing TxDL with zero DMA address. "
 941				  "Virtual address %p\n",
 942				  dev->name, mac_control->zerodma_virt_addr);
 943			swstats->mem_freed += PAGE_SIZE;
 944		}
 945		kfree(fifo->list_info);
 946		swstats->mem_freed += tx_cfg->fifo_len *
 947			sizeof(struct list_info_hold);
 948	}
 949
 950	size = SIZE_OF_BLOCK;
 951	for (i = 0; i < config->rx_ring_num; i++) {
 952		struct ring_info *ring = &mac_control->rings[i];
 953
 954		blk_cnt = ring->block_count;
 955		for (j = 0; j < blk_cnt; j++) {
 956			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 957			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 958			if (tmp_v_addr == NULL)
 959				break;
 960			pci_free_consistent(nic->pdev, size,
 961					    tmp_v_addr, tmp_p_addr);
 962			swstats->mem_freed += size;
 963			kfree(ring->rx_blocks[j].rxds);
 964			swstats->mem_freed += sizeof(struct rxd_info) *
 965				rxd_count[nic->rxd_mode];
 966		}
 967	}
 968
 969	if (nic->rxd_mode == RXD_MODE_3B) {
 970		/* Freeing buffer storage addresses in 2BUFF mode. */
 971		for (i = 0; i < config->rx_ring_num; i++) {
 972			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 973			struct ring_info *ring = &mac_control->rings[i];
 974
 975			blk_cnt = rx_cfg->num_rxd /
 976				(rxd_count[nic->rxd_mode] + 1);
 977			for (j = 0; j < blk_cnt; j++) {
 978				int k = 0;
 979				if (!ring->ba[j])
 980					continue;
 981				while (k != rxd_count[nic->rxd_mode]) {
 982					struct buffAdd *ba = &ring->ba[j][k];
 983					kfree(ba->ba_0_org);
 984					swstats->mem_freed +=
 985						BUF0_LEN + ALIGN_SIZE;
 986					kfree(ba->ba_1_org);
 987					swstats->mem_freed +=
 988						BUF1_LEN + ALIGN_SIZE;
 989					k++;
 990				}
 991				kfree(ring->ba[j]);
 992				swstats->mem_freed += sizeof(struct buffAdd) *
 993					(rxd_count[nic->rxd_mode] + 1);
 994			}
 995			kfree(ring->ba);
 996			swstats->mem_freed += sizeof(struct buffAdd *) *
 997				blk_cnt;
 998		}
 999	}
1000
1001	for (i = 0; i < nic->config.tx_fifo_num; i++) {
1002		struct fifo_info *fifo = &mac_control->fifos[i];
1003		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1004
1005		if (fifo->ufo_in_band_v) {
1006			swstats->mem_freed += tx_cfg->fifo_len *
1007				sizeof(u64);
1008			kfree(fifo->ufo_in_band_v);
1009		}
1010	}
1011
1012	if (mac_control->stats_mem) {
1013		swstats->mem_freed += mac_control->stats_mem_sz;
1014		pci_free_consistent(nic->pdev,
1015				    mac_control->stats_mem_sz,
1016				    mac_control->stats_mem,
1017				    mac_control->stats_mem_phy);
1018	}
1019}
1020
1021/**
1022 * s2io_verify_pci_mode -
1023 */
1024
1025static int s2io_verify_pci_mode(struct s2io_nic *nic)
1026{
1027	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1028	register u64 val64 = 0;
1029	int     mode;
1030
1031	val64 = readq(&bar0->pci_mode);
1032	mode = (u8)GET_PCI_MODE(val64);
1033
1034	if (val64 & PCI_MODE_UNKNOWN_MODE)
1035		return -1;      /* Unknown PCI mode */
1036	return mode;
1037}
1038
1039#define NEC_VENID   0x1033
1040#define NEC_DEVID   0x0125
1041static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1042{
1043	struct pci_dev *tdev = NULL;
1044	for_each_pci_dev(tdev) {
1045		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1046			if (tdev->bus == s2io_pdev->bus->parent) {
1047				pci_dev_put(tdev);
1048				return 1;
1049			}
1050		}
1051	}
1052	return 0;
1053}
1054
1055static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1056/**
1057 * s2io_print_pci_mode -
1058 */
1059static int s2io_print_pci_mode(struct s2io_nic *nic)
1060{
1061	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1062	register u64 val64 = 0;
1063	int	mode;
1064	struct config_param *config = &nic->config;
1065	const char *pcimode;
1066
1067	val64 = readq(&bar0->pci_mode);
1068	mode = (u8)GET_PCI_MODE(val64);
1069
1070	if (val64 & PCI_MODE_UNKNOWN_MODE)
1071		return -1;	/* Unknown PCI mode */
1072
1073	config->bus_speed = bus_speed[mode];
1074
1075	if (s2io_on_nec_bridge(nic->pdev)) {
1076		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1077			  nic->dev->name);
1078		return mode;
1079	}
1080
1081	switch (mode) {
1082	case PCI_MODE_PCI_33:
1083		pcimode = "33MHz PCI bus";
1084		break;
1085	case PCI_MODE_PCI_66:
1086		pcimode = "66MHz PCI bus";
1087		break;
1088	case PCI_MODE_PCIX_M1_66:
1089		pcimode = "66MHz PCIX(M1) bus";
1090		break;
1091	case PCI_MODE_PCIX_M1_100:
1092		pcimode = "100MHz PCIX(M1) bus";
1093		break;
1094	case PCI_MODE_PCIX_M1_133:
1095		pcimode = "133MHz PCIX(M1) bus";
1096		break;
1097	case PCI_MODE_PCIX_M2_66:
1098		pcimode = "133MHz PCIX(M2) bus";
1099		break;
1100	case PCI_MODE_PCIX_M2_100:
1101		pcimode = "200MHz PCIX(M2) bus";
1102		break;
1103	case PCI_MODE_PCIX_M2_133:
1104		pcimode = "266MHz PCIX(M2) bus";
1105		break;
1106	default:
1107		pcimode = "unsupported bus!";
1108		mode = -1;
1109	}
1110
1111	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1112		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1113
1114	return mode;
1115}
1116
1117/**
1118 *  init_tti - Initialization transmit traffic interrupt scheme
1119 *  @nic: device private variable
1120 *  @link: link status (UP/DOWN) used to enable/disable continuous
1121 *  transmit interrupts
1122 *  Description: The function configures transmit traffic interrupts
1123 *  Return Value:  SUCCESS on success and
1124 *  '-1' on failure
1125 */
1126
1127static int init_tti(struct s2io_nic *nic, int link)
1128{
1129	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1130	register u64 val64 = 0;
1131	int i;
1132	struct config_param *config = &nic->config;
1133
1134	for (i = 0; i < config->tx_fifo_num; i++) {
1135		/*
1136		 * TTI Initialization. Default Tx timer gets us about
1137		 * 250 interrupts per sec. Continuous interrupts are enabled
1138		 * by default.
1139		 */
1140		if (nic->device_type == XFRAME_II_DEVICE) {
1141			int count = (nic->config.bus_speed * 125)/2;
1142			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1143		} else
1144			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1145
1146		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1147			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1148			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1149			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1150		if (i == 0)
1151			if (use_continuous_tx_intrs && (link == LINK_UP))
1152				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1153		writeq(val64, &bar0->tti_data1_mem);
1154
1155		if (nic->config.intr_type == MSI_X) {
1156			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1157				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1158				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1159				TTI_DATA2_MEM_TX_UFC_D(0x300);
1160		} else {
1161			if ((nic->config.tx_steering_type ==
1162			     TX_DEFAULT_STEERING) &&
1163			    (config->tx_fifo_num > 1) &&
1164			    (i >= nic->udp_fifo_idx) &&
1165			    (i < (nic->udp_fifo_idx +
1166				  nic->total_udp_fifos)))
1167				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1168					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1169					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1170					TTI_DATA2_MEM_TX_UFC_D(0x120);
1171			else
1172				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1173					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1174					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1175					TTI_DATA2_MEM_TX_UFC_D(0x80);
1176		}
1177
1178		writeq(val64, &bar0->tti_data2_mem);
1179
1180		val64 = TTI_CMD_MEM_WE |
1181			TTI_CMD_MEM_STROBE_NEW_CMD |
1182			TTI_CMD_MEM_OFFSET(i);
1183		writeq(val64, &bar0->tti_command_mem);
1184
1185		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1186					  TTI_CMD_MEM_STROBE_NEW_CMD,
1187					  S2IO_BIT_RESET) != SUCCESS)
1188			return FAILURE;
1189	}
1190
1191	return SUCCESS;
1192}
1193
1194/**
1195 *  init_nic - Initialization of hardware
1196 *  @nic: device private variable
1197 *  Description: The function sequentially configures every block
1198 *  of the H/W from their reset values.
1199 *  Return Value:  SUCCESS on success and
1200 *  '-1' on failure (endian settings incorrect).
1201 */
1202
1203static int init_nic(struct s2io_nic *nic)
1204{
1205	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1206	struct net_device *dev = nic->dev;
1207	register u64 val64 = 0;
1208	void __iomem *add;
1209	u32 time;
1210	int i, j;
1211	int dtx_cnt = 0;
1212	unsigned long long mem_share;
1213	int mem_size;
1214	struct config_param *config = &nic->config;
1215	struct mac_info *mac_control = &nic->mac_control;
1216
1217	/* to set the swapper controle on the card */
1218	if (s2io_set_swapper(nic)) {
1219		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1220		return -EIO;
1221	}
1222
1223	/*
1224	 * Herc requires EOI to be removed from reset before XGXS, so..
1225	 */
1226	if (nic->device_type & XFRAME_II_DEVICE) {
1227		val64 = 0xA500000000ULL;
1228		writeq(val64, &bar0->sw_reset);
1229		msleep(500);
1230		val64 = readq(&bar0->sw_reset);
1231	}
1232
1233	/* Remove XGXS from reset state */
1234	val64 = 0;
1235	writeq(val64, &bar0->sw_reset);
1236	msleep(500);
1237	val64 = readq(&bar0->sw_reset);
1238
1239	/* Ensure that it's safe to access registers by checking
1240	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1241	 */
1242	if (nic->device_type == XFRAME_II_DEVICE) {
1243		for (i = 0; i < 50; i++) {
1244			val64 = readq(&bar0->adapter_status);
1245			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1246				break;
1247			msleep(10);
1248		}
1249		if (i == 50)
1250			return -ENODEV;
1251	}
1252
1253	/*  Enable Receiving broadcasts */
1254	add = &bar0->mac_cfg;
1255	val64 = readq(&bar0->mac_cfg);
1256	val64 |= MAC_RMAC_BCAST_ENABLE;
1257	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1258	writel((u32)val64, add);
1259	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1260	writel((u32) (val64 >> 32), (add + 4));
1261
1262	/* Read registers in all blocks */
1263	val64 = readq(&bar0->mac_int_mask);
1264	val64 = readq(&bar0->mc_int_mask);
1265	val64 = readq(&bar0->xgxs_int_mask);
1266
1267	/*  Set MTU */
1268	val64 = dev->mtu;
1269	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1270
1271	if (nic->device_type & XFRAME_II_DEVICE) {
1272		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1273			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1274					  &bar0->dtx_control, UF);
1275			if (dtx_cnt & 0x1)
1276				msleep(1); /* Necessary!! */
1277			dtx_cnt++;
1278		}
1279	} else {
1280		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1281			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1282					  &bar0->dtx_control, UF);
1283			val64 = readq(&bar0->dtx_control);
1284			dtx_cnt++;
1285		}
1286	}
1287
1288	/*  Tx DMA Initialization */
1289	val64 = 0;
1290	writeq(val64, &bar0->tx_fifo_partition_0);
1291	writeq(val64, &bar0->tx_fifo_partition_1);
1292	writeq(val64, &bar0->tx_fifo_partition_2);
1293	writeq(val64, &bar0->tx_fifo_partition_3);
1294
1295	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1296		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1297
1298		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1299			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1300
1301		if (i == (config->tx_fifo_num - 1)) {
1302			if (i % 2 == 0)
1303				i++;
1304		}
1305
1306		switch (i) {
1307		case 1:
1308			writeq(val64, &bar0->tx_fifo_partition_0);
1309			val64 = 0;
1310			j = 0;
1311			break;
1312		case 3:
1313			writeq(val64, &bar0->tx_fifo_partition_1);
1314			val64 = 0;
1315			j = 0;
1316			break;
1317		case 5:
1318			writeq(val64, &bar0->tx_fifo_partition_2);
1319			val64 = 0;
1320			j = 0;
1321			break;
1322		case 7:
1323			writeq(val64, &bar0->tx_fifo_partition_3);
1324			val64 = 0;
1325			j = 0;
1326			break;
1327		default:
1328			j++;
1329			break;
1330		}
1331	}
1332
1333	/*
1334	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1335	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1336	 */
1337	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1338		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1339
1340	val64 = readq(&bar0->tx_fifo_partition_0);
1341	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1342		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1343
1344	/*
1345	 * Initialization of Tx_PA_CONFIG register to ignore packet
1346	 * integrity checking.
1347	 */
1348	val64 = readq(&bar0->tx_pa_cfg);
1349	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1350		TX_PA_CFG_IGNORE_SNAP_OUI |
1351		TX_PA_CFG_IGNORE_LLC_CTRL |
1352		TX_PA_CFG_IGNORE_L2_ERR;
1353	writeq(val64, &bar0->tx_pa_cfg);
1354
1355	/* Rx DMA intialization. */
1356	val64 = 0;
1357	for (i = 0; i < config->rx_ring_num; i++) {
1358		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1359
1360		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1361	}
1362	writeq(val64, &bar0->rx_queue_priority);
1363
1364	/*
1365	 * Allocating equal share of memory to all the
1366	 * configured Rings.
1367	 */
1368	val64 = 0;
1369	if (nic->device_type & XFRAME_II_DEVICE)
1370		mem_size = 32;
1371	else
1372		mem_size = 64;
1373
1374	for (i = 0; i < config->rx_ring_num; i++) {
1375		switch (i) {
1376		case 0:
1377			mem_share = (mem_size / config->rx_ring_num +
1378				     mem_size % config->rx_ring_num);
1379			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1380			continue;
1381		case 1:
1382			mem_share = (mem_size / config->rx_ring_num);
1383			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1384			continue;
1385		case 2:
1386			mem_share = (mem_size / config->rx_ring_num);
1387			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1388			continue;
1389		case 3:
1390			mem_share = (mem_size / config->rx_ring_num);
1391			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1392			continue;
1393		case 4:
1394			mem_share = (mem_size / config->rx_ring_num);
1395			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1396			continue;
1397		case 5:
1398			mem_share = (mem_size / config->rx_ring_num);
1399			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1400			continue;
1401		case 6:
1402			mem_share = (mem_size / config->rx_ring_num);
1403			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1404			continue;
1405		case 7:
1406			mem_share = (mem_size / config->rx_ring_num);
1407			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1408			continue;
1409		}
1410	}
1411	writeq(val64, &bar0->rx_queue_cfg);
1412
1413	/*
1414	 * Filling Tx round robin registers
1415	 * as per the number of FIFOs for equal scheduling priority
1416	 */
1417	switch (config->tx_fifo_num) {
1418	case 1:
1419		val64 = 0x0;
1420		writeq(val64, &bar0->tx_w_round_robin_0);
1421		writeq(val64, &bar0->tx_w_round_robin_1);
1422		writeq(val64, &bar0->tx_w_round_robin_2);
1423		writeq(val64, &bar0->tx_w_round_robin_3);
1424		writeq(val64, &bar0->tx_w_round_robin_4);
1425		break;
1426	case 2:
1427		val64 = 0x0001000100010001ULL;
1428		writeq(val64, &bar0->tx_w_round_robin_0);
1429		writeq(val64, &bar0->tx_w_round_robin_1);
1430		writeq(val64, &bar0->tx_w_round_robin_2);
1431		writeq(val64, &bar0->tx_w_round_robin_3);
1432		val64 = 0x0001000100000000ULL;
1433		writeq(val64, &bar0->tx_w_round_robin_4);
1434		break;
1435	case 3:
1436		val64 = 0x0001020001020001ULL;
1437		writeq(val64, &bar0->tx_w_round_robin_0);
1438		val64 = 0x0200010200010200ULL;
1439		writeq(val64, &bar0->tx_w_round_robin_1);
1440		val64 = 0x0102000102000102ULL;
1441		writeq(val64, &bar0->tx_w_round_robin_2);
1442		val64 = 0x0001020001020001ULL;
1443		writeq(val64, &bar0->tx_w_round_robin_3);
1444		val64 = 0x0200010200000000ULL;
1445		writeq(val64, &bar0->tx_w_round_robin_4);
1446		break;
1447	case 4:
1448		val64 = 0x0001020300010203ULL;
1449		writeq(val64, &bar0->tx_w_round_robin_0);
1450		writeq(val64, &bar0->tx_w_round_robin_1);
1451		writeq(val64, &bar0->tx_w_round_robin_2);
1452		writeq(val64, &bar0->tx_w_round_robin_3);
1453		val64 = 0x0001020300000000ULL;
1454		writeq(val64, &bar0->tx_w_round_robin_4);
1455		break;
1456	case 5:
1457		val64 = 0x0001020304000102ULL;
1458		writeq(val64, &bar0->tx_w_round_robin_0);
1459		val64 = 0x0304000102030400ULL;
1460		writeq(val64, &bar0->tx_w_round_robin_1);
1461		val64 = 0x0102030400010203ULL;
1462		writeq(val64, &bar0->tx_w_round_robin_2);
1463		val64 = 0x0400010203040001ULL;
1464		writeq(val64, &bar0->tx_w_round_robin_3);
1465		val64 = 0x0203040000000000ULL;
1466		writeq(val64, &bar0->tx_w_round_robin_4);
1467		break;
1468	case 6:
1469		val64 = 0x0001020304050001ULL;
1470		writeq(val64, &bar0->tx_w_round_robin_0);
1471		val64 = 0x0203040500010203ULL;
1472		writeq(val64, &bar0->tx_w_round_robin_1);
1473		val64 = 0x0405000102030405ULL;
1474		writeq(val64, &bar0->tx_w_round_robin_2);
1475		val64 = 0x0001020304050001ULL;
1476		writeq(val64, &bar0->tx_w_round_robin_3);
1477		val64 = 0x0203040500000000ULL;
1478		writeq(val64, &bar0->tx_w_round_robin_4);
1479		break;
1480	case 7:
1481		val64 = 0x0001020304050600ULL;
1482		writeq(val64, &bar0->tx_w_round_robin_0);
1483		val64 = 0x0102030405060001ULL;
1484		writeq(val64, &bar0->tx_w_round_robin_1);
1485		val64 = 0x0203040506000102ULL;
1486		writeq(val64, &bar0->tx_w_round_robin_2);
1487		val64 = 0x0304050600010203ULL;
1488		writeq(val64, &bar0->tx_w_round_robin_3);
1489		val64 = 0x0405060000000000ULL;
1490		writeq(val64, &bar0->tx_w_round_robin_4);
1491		break;
1492	case 8:
1493		val64 = 0x0001020304050607ULL;
1494		writeq(val64, &bar0->tx_w_round_robin_0);
1495		writeq(val64, &bar0->tx_w_round_robin_1);
1496		writeq(val64, &bar0->tx_w_round_robin_2);
1497		writeq(val64, &bar0->tx_w_round_robin_3);
1498		val64 = 0x0001020300000000ULL;
1499		writeq(val64, &bar0->tx_w_round_robin_4);
1500		break;
1501	}
1502
1503	/* Enable all configured Tx FIFO partitions */
1504	val64 = readq(&bar0->tx_fifo_partition_0);
1505	val64 |= (TX_FIFO_PARTITION_EN);
1506	writeq(val64, &bar0->tx_fifo_partition_0);
1507
1508	/* Filling the Rx round robin registers as per the
1509	 * number of Rings and steering based on QoS with
1510	 * equal priority.
1511	 */
1512	switch (config->rx_ring_num) {
1513	case 1:
1514		val64 = 0x0;
1515		writeq(val64, &bar0->rx_w_round_robin_0);
1516		writeq(val64, &bar0->rx_w_round_robin_1);
1517		writeq(val64, &bar0->rx_w_round_robin_2);
1518		writeq(val64, &bar0->rx_w_round_robin_3);
1519		writeq(val64, &bar0->rx_w_round_robin_4);
1520
1521		val64 = 0x8080808080808080ULL;
1522		writeq(val64, &bar0->rts_qos_steering);
1523		break;
1524	case 2:
1525		val64 = 0x0001000100010001ULL;
1526		writeq(val64, &bar0->rx_w_round_robin_0);
1527		writeq(val64, &bar0->rx_w_round_robin_1);
1528		writeq(val64, &bar0->rx_w_round_robin_2);
1529		writeq(val64, &bar0->rx_w_round_robin_3);
1530		val64 = 0x0001000100000000ULL;
1531		writeq(val64, &bar0->rx_w_round_robin_4);
1532
1533		val64 = 0x8080808040404040ULL;
1534		writeq(val64, &bar0->rts_qos_steering);
1535		break;
1536	case 3:
1537		val64 = 0x0001020001020001ULL;
1538		writeq(val64, &bar0->rx_w_round_robin_0);
1539		val64 = 0x0200010200010200ULL;
1540		writeq(val64, &bar0->rx_w_round_robin_1);
1541		val64 = 0x0102000102000102ULL;
1542		writeq(val64, &bar0->rx_w_round_robin_2);
1543		val64 = 0x0001020001020001ULL;
1544		writeq(val64, &bar0->rx_w_round_robin_3);
1545		val64 = 0x0200010200000000ULL;
1546		writeq(val64, &bar0->rx_w_round_robin_4);
1547
1548		val64 = 0x8080804040402020ULL;
1549		writeq(val64, &bar0->rts_qos_steering);
1550		break;
1551	case 4:
1552		val64 = 0x0001020300010203ULL;
1553		writeq(val64, &bar0->rx_w_round_robin_0);
1554		writeq(val64, &bar0->rx_w_round_robin_1);
1555		writeq(val64, &bar0->rx_w_round_robin_2);
1556		writeq(val64, &bar0->rx_w_round_robin_3);
1557		val64 = 0x0001020300000000ULL;
1558		writeq(val64, &bar0->rx_w_round_robin_4);
1559
1560		val64 = 0x8080404020201010ULL;
1561		writeq(val64, &bar0->rts_qos_steering);
1562		break;
1563	case 5:
1564		val64 = 0x0001020304000102ULL;
1565		writeq(val64, &bar0->rx_w_round_robin_0);
1566		val64 = 0x0304000102030400ULL;
1567		writeq(val64, &bar0->rx_w_round_robin_1);
1568		val64 = 0x0102030400010203ULL;
1569		writeq(val64, &bar0->rx_w_round_robin_2);
1570		val64 = 0x0400010203040001ULL;
1571		writeq(val64, &bar0->rx_w_round_robin_3);
1572		val64 = 0x0203040000000000ULL;
1573		writeq(val64, &bar0->rx_w_round_robin_4);
1574
1575		val64 = 0x8080404020201008ULL;
1576		writeq(val64, &bar0->rts_qos_steering);
1577		break;
1578	case 6:
1579		val64 = 0x0001020304050001ULL;
1580		writeq(val64, &bar0->rx_w_round_robin_0);
1581		val64 = 0x0203040500010203ULL;
1582		writeq(val64, &bar0->rx_w_round_robin_1);
1583		val64 = 0x0405000102030405ULL;
1584		writeq(val64, &bar0->rx_w_round_robin_2);
1585		val64 = 0x0001020304050001ULL;
1586		writeq(val64, &bar0->rx_w_round_robin_3);
1587		val64 = 0x0203040500000000ULL;
1588		writeq(val64, &bar0->rx_w_round_robin_4);
1589
1590		val64 = 0x8080404020100804ULL;
1591		writeq(val64, &bar0->rts_qos_steering);
1592		break;
1593	case 7:
1594		val64 = 0x0001020304050600ULL;
1595		writeq(val64, &bar0->rx_w_round_robin_0);
1596		val64 = 0x0102030405060001ULL;
1597		writeq(val64, &bar0->rx_w_round_robin_1);
1598		val64 = 0x0203040506000102ULL;
1599		writeq(val64, &bar0->rx_w_round_robin_2);
1600		val64 = 0x0304050600010203ULL;
1601		writeq(val64, &bar0->rx_w_round_robin_3);
1602		val64 = 0x0405060000000000ULL;
1603		writeq(val64, &bar0->rx_w_round_robin_4);
1604
1605		val64 = 0x8080402010080402ULL;
1606		writeq(val64, &bar0->rts_qos_steering);
1607		break;
1608	case 8:
1609		val64 = 0x0001020304050607ULL;
1610		writeq(val64, &bar0->rx_w_round_robin_0);
1611		writeq(val64, &bar0->rx_w_round_robin_1);
1612		writeq(val64, &bar0->rx_w_round_robin_2);
1613		writeq(val64, &bar0->rx_w_round_robin_3);
1614		val64 = 0x0001020300000000ULL;
1615		writeq(val64, &bar0->rx_w_round_robin_4);
1616
1617		val64 = 0x8040201008040201ULL;
1618		writeq(val64, &bar0->rts_qos_steering);
1619		break;
1620	}
1621
1622	/* UDP Fix */
1623	val64 = 0;
1624	for (i = 0; i < 8; i++)
1625		writeq(val64, &bar0->rts_frm_len_n[i]);
1626
1627	/* Set the default rts frame length for the rings configured */
1628	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1629	for (i = 0 ; i < config->rx_ring_num ; i++)
1630		writeq(val64, &bar0->rts_frm_len_n[i]);
1631
1632	/* Set the frame length for the configured rings
1633	 * desired by the user
1634	 */
1635	for (i = 0; i < config->rx_ring_num; i++) {
1636		/* If rts_frm_len[i] == 0 then it is assumed that user not
1637		 * specified frame length steering.
1638		 * If the user provides the frame length then program
1639		 * the rts_frm_len register for those values or else
1640		 * leave it as it is.
1641		 */
1642		if (rts_frm_len[i] != 0) {
1643			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1644			       &bar0->rts_frm_len_n[i]);
1645		}
1646	}
1647
1648	/* Disable differentiated services steering logic */
1649	for (i = 0; i < 64; i++) {
1650		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1651			DBG_PRINT(ERR_DBG,
1652				  "%s: rts_ds_steer failed on codepoint %d\n",
1653				  dev->name, i);
1654			return -ENODEV;
1655		}
1656	}
1657
1658	/* Program statistics memory */
1659	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1660
1661	if (nic->device_type == XFRAME_II_DEVICE) {
1662		val64 = STAT_BC(0x320);
1663		writeq(val64, &bar0->stat_byte_cnt);
1664	}
1665
1666	/*
1667	 * Initializing the sampling rate for the device to calculate the
1668	 * bandwidth utilization.
1669	 */
1670	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1671		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1672	writeq(val64, &bar0->mac_link_util);
1673
1674	/*
1675	 * Initializing the Transmit and Receive Traffic Interrupt
1676	 * Scheme.
1677	 */
1678
1679	/* Initialize TTI */
1680	if (SUCCESS != init_tti(nic, nic->last_link_state))
1681		return -ENODEV;
1682
1683	/* RTI Initialization */
1684	if (nic->device_type == XFRAME_II_DEVICE) {
1685		/*
1686		 * Programmed to generate Apprx 500 Intrs per
1687		 * second
1688		 */
1689		int count = (nic->config.bus_speed * 125)/4;
1690		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1691	} else
1692		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1693	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1694		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1695		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1696		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1697
1698	writeq(val64, &bar0->rti_data1_mem);
1699
1700	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1701		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1702	if (nic->config.intr_type == MSI_X)
1703		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1704			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1705	else
1706		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1707			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1708	writeq(val64, &bar0->rti_data2_mem);
1709
1710	for (i = 0; i < config->rx_ring_num; i++) {
1711		val64 = RTI_CMD_MEM_WE |
1712			RTI_CMD_MEM_STROBE_NEW_CMD |
1713			RTI_CMD_MEM_OFFSET(i);
1714		writeq(val64, &bar0->rti_command_mem);
1715
1716		/*
1717		 * Once the operation completes, the Strobe bit of the
1718		 * command register will be reset. We poll for this
1719		 * particular condition. We wait for a maximum of 500ms
1720		 * for the operation to complete, if it's not complete
1721		 * by then we return error.
1722		 */
1723		time = 0;
1724		while (true) {
1725			val64 = readq(&bar0->rti_command_mem);
1726			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1727				break;
1728
1729			if (time > 10) {
1730				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1731					  dev->name);
1732				return -ENODEV;
1733			}
1734			time++;
1735			msleep(50);
1736		}
1737	}
1738
1739	/*
1740	 * Initializing proper values as Pause threshold into all
1741	 * the 8 Queues on Rx side.
1742	 */
1743	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1744	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1745
1746	/* Disable RMAC PAD STRIPPING */
1747	add = &bar0->mac_cfg;
1748	val64 = readq(&bar0->mac_cfg);
1749	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1750	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1751	writel((u32) (val64), add);
1752	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1753	writel((u32) (val64 >> 32), (add + 4));
1754	val64 = readq(&bar0->mac_cfg);
1755
1756	/* Enable FCS stripping by adapter */
1757	add = &bar0->mac_cfg;
1758	val64 = readq(&bar0->mac_cfg);
1759	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1760	if (nic->device_type == XFRAME_II_DEVICE)
1761		writeq(val64, &bar0->mac_cfg);
1762	else {
1763		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1764		writel((u32) (val64), add);
1765		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1766		writel((u32) (val64 >> 32), (add + 4));
1767	}
1768
1769	/*
1770	 * Set the time value to be inserted in the pause frame
1771	 * generated by xena.
1772	 */
1773	val64 = readq(&bar0->rmac_pause_cfg);
1774	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1775	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1776	writeq(val64, &bar0->rmac_pause_cfg);
1777
1778	/*
1779	 * Set the Threshold Limit for Generating the pause frame
1780	 * If the amount of data in any Queue exceeds ratio of
1781	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1782	 * pause frame is generated
1783	 */
1784	val64 = 0;
1785	for (i = 0; i < 4; i++) {
1786		val64 |= (((u64)0xFF00 |
1787			   nic->mac_control.mc_pause_threshold_q0q3)
1788			  << (i * 2 * 8));
1789	}
1790	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1791
1792	val64 = 0;
1793	for (i = 0; i < 4; i++) {
1794		val64 |= (((u64)0xFF00 |
1795			   nic->mac_control.mc_pause_threshold_q4q7)
1796			  << (i * 2 * 8));
1797	}
1798	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1799
1800	/*
1801	 * TxDMA will stop Read request if the number of read split has
1802	 * exceeded the limit pointed by shared_splits
1803	 */
1804	val64 = readq(&bar0->pic_control);
1805	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1806	writeq(val64, &bar0->pic_control);
1807
1808	if (nic->config.bus_speed == 266) {
1809		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1810		writeq(0x0, &bar0->read_retry_delay);
1811		writeq(0x0, &bar0->write_retry_delay);
1812	}
1813
1814	/*
1815	 * Programming the Herc to split every write transaction
1816	 * that does not start on an ADB to reduce disconnects.
1817	 */
1818	if (nic->device_type == XFRAME_II_DEVICE) {
1819		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1820			MISC_LINK_STABILITY_PRD(3);
1821		writeq(val64, &bar0->misc_control);
1822		val64 = readq(&bar0->pic_control2);
1823		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1824		writeq(val64, &bar0->pic_control2);
1825	}
1826	if (strstr(nic->product_name, "CX4")) {
1827		val64 = TMAC_AVG_IPG(0x17);
1828		writeq(val64, &bar0->tmac_avg_ipg);
1829	}
1830
1831	return SUCCESS;
1832}
1833#define LINK_UP_DOWN_INTERRUPT		1
1834#define MAC_RMAC_ERR_TIMER		2
1835
1836static int s2io_link_fault_indication(struct s2io_nic *nic)
1837{
1838	if (nic->device_type == XFRAME_II_DEVICE)
1839		return LINK_UP_DOWN_INTERRUPT;
1840	else
1841		return MAC_RMAC_ERR_TIMER;
1842}
1843
1844/**
1845 *  do_s2io_write_bits -  update alarm bits in alarm register
1846 *  @value: alarm bits
1847 *  @flag: interrupt status
1848 *  @addr: address value
1849 *  Description: update alarm bits in alarm register
1850 *  Return Value:
1851 *  NONE.
1852 */
1853static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1854{
1855	u64 temp64;
1856
1857	temp64 = readq(addr);
1858
1859	if (flag == ENABLE_INTRS)
1860		temp64 &= ~((u64)value);
1861	else
1862		temp64 |= ((u64)value);
1863	writeq(temp64, addr);
1864}
1865
1866static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1867{
1868	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1869	register u64 gen_int_mask = 0;
1870	u64 interruptible;
1871
1872	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1873	if (mask & TX_DMA_INTR) {
1874		gen_int_mask |= TXDMA_INT_M;
1875
1876		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1877				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1878				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1879				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1880
1881		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1882				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1883				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1884				   &bar0->pfc_err_mask);
1885
1886		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1887				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1888				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1889
1890		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1891				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1892				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1893				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1894				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1895				   PCC_TXB_ECC_SG_ERR,
1896				   flag, &bar0->pcc_err_mask);
1897
1898		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1899				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1900
1901		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1902				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1903				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1904				   flag, &bar0->lso_err_mask);
1905
1906		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1907				   flag, &bar0->tpa_err_mask);
1908
1909		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1910	}
1911
1912	if (mask & TX_MAC_INTR) {
1913		gen_int_mask |= TXMAC_INT_M;
1914		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1915				   &bar0->mac_int_mask);
1916		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1917				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1918				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1919				   flag, &bar0->mac_tmac_err_mask);
1920	}
1921
1922	if (mask & TX_XGXS_INTR) {
1923		gen_int_mask |= TXXGXS_INT_M;
1924		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1925				   &bar0->xgxs_int_mask);
1926		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1927				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1928				   flag, &bar0->xgxs_txgxs_err_mask);
1929	}
1930
1931	if (mask & RX_DMA_INTR) {
1932		gen_int_mask |= RXDMA_INT_M;
1933		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1934				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1935				   flag, &bar0->rxdma_int_mask);
1936		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1937				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1938				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1939				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1940		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1941				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1942				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1943				   &bar0->prc_pcix_err_mask);
1944		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1945				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1946				   &bar0->rpa_err_mask);
1947		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1948				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1949				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1950				   RDA_FRM_ECC_SG_ERR |
1951				   RDA_MISC_ERR|RDA_PCIX_ERR,
1952				   flag, &bar0->rda_err_mask);
1953		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1954				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1955				   flag, &bar0->rti_err_mask);
1956	}
1957
1958	if (mask & RX_MAC_INTR) {
1959		gen_int_mask |= RXMAC_INT_M;
1960		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1961				   &bar0->mac_int_mask);
1962		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1963				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1964				 RMAC_DOUBLE_ECC_ERR);
1965		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1966			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1967		do_s2io_write_bits(interruptible,
1968				   flag, &bar0->mac_rmac_err_mask);
1969	}
1970
1971	if (mask & RX_XGXS_INTR) {
1972		gen_int_mask |= RXXGXS_INT_M;
1973		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1974				   &bar0->xgxs_int_mask);
1975		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1976				   &bar0->xgxs_rxgxs_err_mask);
1977	}
1978
1979	if (mask & MC_INTR) {
1980		gen_int_mask |= MC_INT_M;
1981		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1982				   flag, &bar0->mc_int_mask);
1983		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1984				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1985				   &bar0->mc_err_mask);
1986	}
1987	nic->general_int_mask = gen_int_mask;
1988
1989	/* Remove this line when alarm interrupts are enabled */
1990	nic->general_int_mask = 0;
1991}
1992
1993/**
1994 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1995 *  @nic: device private variable,
1996 *  @mask: A mask indicating which Intr block must be modified and,
1997 *  @flag: A flag indicating whether to enable or disable the Intrs.
1998 *  Description: This function will either disable or enable the interrupts
1999 *  depending on the flag argument. The mask argument can be used to
2000 *  enable/disable any Intr block.
2001 *  Return Value: NONE.
2002 */
2003
2004static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2005{
2006	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2007	register u64 temp64 = 0, intr_mask = 0;
2008
2009	intr_mask = nic->general_int_mask;
2010
2011	/*  Top level interrupt classification */
2012	/*  PIC Interrupts */
2013	if (mask & TX_PIC_INTR) {
2014		/*  Enable PIC Intrs in the general intr mask register */
2015		intr_mask |= TXPIC_INT_M;
2016		if (flag == ENABLE_INTRS) {
2017			/*
2018			 * If Hercules adapter enable GPIO otherwise
2019			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2020			 * interrupts for now.
2021			 * TODO
2022			 */
2023			if (s2io_link_fault_indication(nic) ==
2024			    LINK_UP_DOWN_INTERRUPT) {
2025				do_s2io_write_bits(PIC_INT_GPIO, flag,
2026						   &bar0->pic_int_mask);
2027				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2028						   &bar0->gpio_int_mask);
2029			} else
2030				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2031		} else if (flag == DISABLE_INTRS) {
2032			/*
2033			 * Disable PIC Intrs in the general
2034			 * intr mask register
2035			 */
2036			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2037		}
2038	}
2039
2040	/*  Tx traffic interrupts */
2041	if (mask & TX_TRAFFIC_INTR) {
2042		intr_mask |= TXTRAFFIC_INT_M;
2043		if (flag == ENABLE_INTRS) {
2044			/*
2045			 * Enable all the Tx side interrupts
2046			 * writing 0 Enables all 64 TX interrupt levels
2047			 */
2048			writeq(0x0, &bar0->tx_traffic_mask);
2049		} else if (flag == DISABLE_INTRS) {
2050			/*
2051			 * Disable Tx Traffic Intrs in the general intr mask
2052			 * register.
2053			 */
2054			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2055		}
2056	}
2057
2058	/*  Rx traffic interrupts */
2059	if (mask & RX_TRAFFIC_INTR) {
2060		intr_mask |= RXTRAFFIC_INT_M;
2061		if (flag == ENABLE_INTRS) {
2062			/* writing 0 Enables all 8 RX interrupt levels */
2063			writeq(0x0, &bar0->rx_traffic_mask);
2064		} else if (flag == DISABLE_INTRS) {
2065			/*
2066			 * Disable Rx Traffic Intrs in the general intr mask
2067			 * register.
2068			 */
2069			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2070		}
2071	}
2072
2073	temp64 = readq(&bar0->general_int_mask);
2074	if (flag == ENABLE_INTRS)
2075		temp64 &= ~((u64)intr_mask);
2076	else
2077		temp64 = DISABLE_ALL_INTRS;
2078	writeq(temp64, &bar0->general_int_mask);
2079
2080	nic->general_int_mask = readq(&bar0->general_int_mask);
2081}
2082
2083/**
2084 *  verify_pcc_quiescent- Checks for PCC quiescent state
2085 *  Return: 1 If PCC is quiescence
2086 *          0 If PCC is not quiescence
2087 */
2088static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2089{
2090	int ret = 0, herc;
2091	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2092	u64 val64 = readq(&bar0->adapter_status);
2093
2094	herc = (sp->device_type == XFRAME_II_DEVICE);
2095
2096	if (flag == false) {
2097		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2098			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2099				ret = 1;
2100		} else {
2101			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2102				ret = 1;
2103		}
2104	} else {
2105		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2106			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2107			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2108				ret = 1;
2109		} else {
2110			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2111			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2112				ret = 1;
2113		}
2114	}
2115
2116	return ret;
2117}
2118/**
2119 *  verify_xena_quiescence - Checks whether the H/W is ready
2120 *  Description: Returns whether the H/W is ready to go or not. Depending
2121 *  on whether adapter enable bit was written or not the comparison
2122 *  differs and the calling function passes the input argument flag to
2123 *  indicate this.
2124 *  Return: 1 If xena is quiescence
2125 *          0 If Xena is not quiescence
2126 */
2127
2128static int verify_xena_quiescence(struct s2io_nic *sp)
2129{
2130	int  mode;
2131	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2132	u64 val64 = readq(&bar0->adapter_status);
2133	mode = s2io_verify_pci_mode(sp);
2134
2135	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2136		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2137		return 0;
2138	}
2139	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2140		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2141		return 0;
2142	}
2143	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2144		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2145		return 0;
2146	}
2147	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2148		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2149		return 0;
2150	}
2151	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2152		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2153		return 0;
2154	}
2155	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2156		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2157		return 0;
2158	}
2159	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2160		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2161		return 0;
2162	}
2163	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2164		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2165		return 0;
2166	}
2167
2168	/*
2169	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2170	 * the the P_PLL_LOCK bit in the adapter_status register will
2171	 * not be asserted.
2172	 */
2173	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2174	    sp->device_type == XFRAME_II_DEVICE &&
2175	    mode != PCI_MODE_PCI_33) {
2176		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2177		return 0;
2178	}
2179	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2180	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2181		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2182		return 0;
2183	}
2184	return 1;
2185}
2186
2187/**
2188 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2189 * @sp: Pointer to device specifc structure
2190 * Description :
2191 * New procedure to clear mac address reading  problems on Alpha platforms
2192 *
2193 */
2194
2195static void fix_mac_address(struct s2io_nic *sp)
2196{
2197	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2198	int i = 0;
2199
2200	while (fix_mac[i] != END_SIGN) {
2201		writeq(fix_mac[i++], &bar0->gpio_control);
2202		udelay(10);
2203		(void) readq(&bar0->gpio_control);
2204	}
2205}
2206
2207/**
2208 *  start_nic - Turns the device on
2209 *  @nic : device private variable.
2210 *  Description:
2211 *  This function actually turns the device on. Before this  function is
2212 *  called,all Registers are configured from their reset states
2213 *  and shared memory is allocated but the NIC is still quiescent. On
2214 *  calling this function, the device interrupts are cleared and the NIC is
2215 *  literally switched on by writing into the adapter control register.
2216 *  Return Value:
2217 *  SUCCESS on success and -1 on failure.
2218 */
2219
2220static int start_nic(struct s2io_nic *nic)
2221{
2222	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2223	struct net_device *dev = nic->dev;
2224	register u64 val64 = 0;
2225	u16 subid, i;
2226	struct config_param *config = &nic->config;
2227	struct mac_info *mac_control = &nic->mac_control;
2228
2229	/*  PRC Initialization and configuration */
2230	for (i = 0; i < config->rx_ring_num; i++) {
2231		struct ring_info *ring = &mac_control->rings[i];
2232
2233		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2234		       &bar0->prc_rxd0_n[i]);
2235
2236		val64 = readq(&bar0->prc_ctrl_n[i]);
2237		if (nic->rxd_mode == RXD_MODE_1)
2238			val64 |= PRC_CTRL_RC_ENABLED;
2239		else
2240			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2241		if (nic->device_type == XFRAME_II_DEVICE)
2242			val64 |= PRC_CTRL_GROUP_READS;
2243		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2244		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2245		writeq(val64, &bar0->prc_ctrl_n[i]);
2246	}
2247
2248	if (nic->rxd_mode == RXD_MODE_3B) {
2249		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2250		val64 = readq(&bar0->rx_pa_cfg);
2251		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2252		writeq(val64, &bar0->rx_pa_cfg);
2253	}
2254
2255	if (vlan_tag_strip == 0) {
2256		val64 = readq(&bar0->rx_pa_cfg);
2257		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2258		writeq(val64, &bar0->rx_pa_cfg);
2259		nic->vlan_strip_flag = 0;
2260	}
2261
2262	/*
2263	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2264	 * for around 100ms, which is approximately the time required
2265	 * for the device to be ready for operation.
2266	 */
2267	val64 = readq(&bar0->mc_rldram_mrs);
2268	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2269	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2270	val64 = readq(&bar0->mc_rldram_mrs);
2271
2272	msleep(100);	/* Delay by around 100 ms. */
2273
2274	/* Enabling ECC Protection. */
2275	val64 = readq(&bar0->adapter_control);
2276	val64 &= ~ADAPTER_ECC_EN;
2277	writeq(val64, &bar0->adapter_control);
2278
2279	/*
2280	 * Verify if the device is ready to be enabled, if so enable
2281	 * it.
2282	 */
2283	val64 = readq(&bar0->adapter_status);
2284	if (!verify_xena_quiescence(nic)) {
2285		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2286			  "Adapter status reads: 0x%llx\n",
2287			  dev->name, (unsigned long long)val64);
2288		return FAILURE;
2289	}
2290
2291	/*
2292	 * With some switches, link might be already up at this point.
2293	 * Because of this weird behavior, when we enable laser,
2294	 * we may not get link. We need to handle this. We cannot
2295	 * figure out which switch is misbehaving. So we are forced to
2296	 * make a global change.
2297	 */
2298
2299	/* Enabling Laser. */
2300	val64 = readq(&bar0->adapter_control);
2301	val64 |= ADAPTER_EOI_TX_ON;
2302	writeq(val64, &bar0->adapter_control);
2303
2304	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2305		/*
2306		 * Dont see link state interrupts initially on some switches,
2307		 * so directly scheduling the link state task here.
2308		 */
2309		schedule_work(&nic->set_link_task);
2310	}
2311	/* SXE-002: Initialize link and activity LED */
2312	subid = nic->pdev->subsystem_device;
2313	if (((subid & 0xFF) >= 0x07) &&
2314	    (nic->device_type == XFRAME_I_DEVICE)) {
2315		val64 = readq(&bar0->gpio_control);
2316		val64 |= 0x0000800000000000ULL;
2317		writeq(val64, &bar0->gpio_control);
2318		val64 = 0x0411040400000000ULL;
2319		writeq(val64, (void __iomem *)bar0 + 0x2700);
2320	}
2321
2322	return SUCCESS;
2323}
2324/**
2325 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2326 */
2327static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2328					struct TxD *txdlp, int get_off)
2329{
2330	struct s2io_nic *nic = fifo_data->nic;
2331	struct sk_buff *skb;
2332	struct TxD *txds;
2333	u16 j, frg_cnt;
2334
2335	txds = txdlp;
2336	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2337		pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2338				 sizeof(u64), PCI_DMA_TODEVICE);
 
2339		txds++;
2340	}
2341
2342	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2343	if (!skb) {
2344		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345		return NULL;
2346	}
2347	pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2348			 skb_headlen(skb), PCI_DMA_TODEVICE);
2349	frg_cnt = skb_shinfo(skb)->nr_frags;
2350	if (frg_cnt) {
2351		txds++;
2352		for (j = 0; j < frg_cnt; j++, txds++) {
2353			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2354			if (!txds->Buffer_Pointer)
2355				break;
2356			pci_unmap_page(nic->pdev,
2357				       (dma_addr_t)txds->Buffer_Pointer,
2358				       skb_frag_size(frag), PCI_DMA_TODEVICE);
2359		}
2360	}
2361	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2362	return skb;
2363}
2364
2365/**
2366 *  free_tx_buffers - Free all queued Tx buffers
2367 *  @nic : device private variable.
2368 *  Description:
2369 *  Free all queued Tx buffers.
2370 *  Return Value: void
2371 */
2372
2373static void free_tx_buffers(struct s2io_nic *nic)
2374{
2375	struct net_device *dev = nic->dev;
2376	struct sk_buff *skb;
2377	struct TxD *txdp;
2378	int i, j;
2379	int cnt = 0;
2380	struct config_param *config = &nic->config;
2381	struct mac_info *mac_control = &nic->mac_control;
2382	struct stat_block *stats = mac_control->stats_info;
2383	struct swStat *swstats = &stats->sw_stat;
2384
2385	for (i = 0; i < config->tx_fifo_num; i++) {
2386		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2387		struct fifo_info *fifo = &mac_control->fifos[i];
2388		unsigned long flags;
2389
2390		spin_lock_irqsave(&fifo->tx_lock, flags);
2391		for (j = 0; j < tx_cfg->fifo_len; j++) {
2392			txdp = fifo->list_info[j].list_virt_addr;
2393			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2394			if (skb) {
2395				swstats->mem_freed += skb->truesize;
2396				dev_kfree_skb(skb);
2397				cnt++;
2398			}
2399		}
2400		DBG_PRINT(INTR_DBG,
2401			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2402			  dev->name, cnt, i);
2403		fifo->tx_curr_get_info.offset = 0;
2404		fifo->tx_curr_put_info.offset = 0;
2405		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2406	}
2407}
2408
2409/**
2410 *   stop_nic -  To stop the nic
2411 *   @nic ; device private variable.
2412 *   Description:
2413 *   This function does exactly the opposite of what the start_nic()
2414 *   function does. This function is called to stop the device.
2415 *   Return Value:
2416 *   void.
2417 */
2418
2419static void stop_nic(struct s2io_nic *nic)
2420{
2421	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2422	register u64 val64 = 0;
2423	u16 interruptible;
2424
2425	/*  Disable all interrupts */
2426	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2427	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2428	interruptible |= TX_PIC_INTR;
2429	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2430
2431	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2432	val64 = readq(&bar0->adapter_control);
2433	val64 &= ~(ADAPTER_CNTL_EN);
2434	writeq(val64, &bar0->adapter_control);
2435}
2436
2437/**
2438 *  fill_rx_buffers - Allocates the Rx side skbs
2439 *  @ring_info: per ring structure
2440 *  @from_card_up: If this is true, we will map the buffer to get
2441 *     the dma address for buf0 and buf1 to give it to the card.
2442 *     Else we will sync the already mapped buffer to give it to the card.
2443 *  Description:
2444 *  The function allocates Rx side skbs and puts the physical
2445 *  address of these buffers into the RxD buffer pointers, so that the NIC
2446 *  can DMA the received frame into these locations.
2447 *  The NIC supports 3 receive modes, viz
2448 *  1. single buffer,
2449 *  2. three buffer and
2450 *  3. Five buffer modes.
2451 *  Each mode defines how many fragments the received frame will be split
2452 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2453 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2454 *  is split into 3 fragments. As of now only single buffer mode is
2455 *  supported.
2456 *   Return Value:
2457 *  SUCCESS on success or an appropriate -ve value on failure.
2458 */
2459static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2460			   int from_card_up)
2461{
2462	struct sk_buff *skb;
2463	struct RxD_t *rxdp;
2464	int off, size, block_no, block_no1;
2465	u32 alloc_tab = 0;
2466	u32 alloc_cnt;
2467	u64 tmp;
2468	struct buffAdd *ba;
2469	struct RxD_t *first_rxdp = NULL;
2470	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2471	int rxd_index = 0;
2472	struct RxD1 *rxdp1;
2473	struct RxD3 *rxdp3;
2474	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2475
2476	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2477
2478	block_no1 = ring->rx_curr_get_info.block_index;
2479	while (alloc_tab < alloc_cnt) {
2480		block_no = ring->rx_curr_put_info.block_index;
2481
2482		off = ring->rx_curr_put_info.offset;
2483
2484		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2485
2486		rxd_index = off + 1;
2487		if (block_no)
2488			rxd_index += (block_no * ring->rxd_count);
2489
2490		if ((block_no == block_no1) &&
2491		    (off == ring->rx_curr_get_info.offset) &&
2492		    (rxdp->Host_Control)) {
2493			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2494				  ring->dev->name);
2495			goto end;
2496		}
2497		if (off && (off == ring->rxd_count)) {
2498			ring->rx_curr_put_info.block_index++;
2499			if (ring->rx_curr_put_info.block_index ==
2500			    ring->block_count)
2501				ring->rx_curr_put_info.block_index = 0;
2502			block_no = ring->rx_curr_put_info.block_index;
2503			off = 0;
2504			ring->rx_curr_put_info.offset = off;
2505			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2506			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2507				  ring->dev->name, rxdp);
2508
2509		}
2510
2511		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2512		    ((ring->rxd_mode == RXD_MODE_3B) &&
2513		     (rxdp->Control_2 & s2BIT(0)))) {
2514			ring->rx_curr_put_info.offset = off;
2515			goto end;
2516		}
2517		/* calculate size of skb based on ring mode */
2518		size = ring->mtu +
2519			HEADER_ETHERNET_II_802_3_SIZE +
2520			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2521		if (ring->rxd_mode == RXD_MODE_1)
2522			size += NET_IP_ALIGN;
2523		else
2524			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2525
2526		/* allocate skb */
2527		skb = netdev_alloc_skb(nic->dev, size);
2528		if (!skb) {
2529			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2530				  ring->dev->name);
2531			if (first_rxdp) {
2532				wmb();
2533				first_rxdp->Control_1 |= RXD_OWN_XENA;
2534			}
2535			swstats->mem_alloc_fail_cnt++;
2536
2537			return -ENOMEM ;
2538		}
2539		swstats->mem_allocated += skb->truesize;
2540
2541		if (ring->rxd_mode == RXD_MODE_1) {
2542			/* 1 buffer mode - normal operation mode */
2543			rxdp1 = (struct RxD1 *)rxdp;
2544			memset(rxdp, 0, sizeof(struct RxD1));
2545			skb_reserve(skb, NET_IP_ALIGN);
2546			rxdp1->Buffer0_ptr =
2547				pci_map_single(ring->pdev, skb->data,
2548					       size - NET_IP_ALIGN,
2549					       PCI_DMA_FROMDEVICE);
2550			if (pci_dma_mapping_error(nic->pdev,
2551						  rxdp1->Buffer0_ptr))
2552				goto pci_map_failed;
2553
2554			rxdp->Control_2 =
2555				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2556			rxdp->Host_Control = (unsigned long)skb;
2557		} else if (ring->rxd_mode == RXD_MODE_3B) {
2558			/*
2559			 * 2 buffer mode -
2560			 * 2 buffer mode provides 128
2561			 * byte aligned receive buffers.
2562			 */
2563
2564			rxdp3 = (struct RxD3 *)rxdp;
2565			/* save buffer pointers to avoid frequent dma mapping */
2566			Buffer0_ptr = rxdp3->Buffer0_ptr;
2567			Buffer1_ptr = rxdp3->Buffer1_ptr;
2568			memset(rxdp, 0, sizeof(struct RxD3));
2569			/* restore the buffer pointers for dma sync*/
2570			rxdp3->Buffer0_ptr = Buffer0_ptr;
2571			rxdp3->Buffer1_ptr = Buffer1_ptr;
2572
2573			ba = &ring->ba[block_no][off];
2574			skb_reserve(skb, BUF0_LEN);
2575			tmp = (u64)(unsigned long)skb->data;
2576			tmp += ALIGN_SIZE;
2577			tmp &= ~ALIGN_SIZE;
2578			skb->data = (void *) (unsigned long)tmp;
2579			skb_reset_tail_pointer(skb);
2580
2581			if (from_card_up) {
2582				rxdp3->Buffer0_ptr =
2583					pci_map_single(ring->pdev, ba->ba_0,
2584						       BUF0_LEN,
2585						       PCI_DMA_FROMDEVICE);
2586				if (pci_dma_mapping_error(nic->pdev,
2587							  rxdp3->Buffer0_ptr))
2588					goto pci_map_failed;
2589			} else
2590				pci_dma_sync_single_for_device(ring->pdev,
2591							       (dma_addr_t)rxdp3->Buffer0_ptr,
2592							       BUF0_LEN,
2593							       PCI_DMA_FROMDEVICE);
2594
2595			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2596			if (ring->rxd_mode == RXD_MODE_3B) {
2597				/* Two buffer mode */
2598
2599				/*
2600				 * Buffer2 will have L3/L4 header plus
2601				 * L4 payload
2602				 */
2603				rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2604								    skb->data,
2605								    ring->mtu + 4,
2606								    PCI_DMA_FROMDEVICE);
2607
2608				if (pci_dma_mapping_error(nic->pdev,
2609							  rxdp3->Buffer2_ptr))
2610					goto pci_map_failed;
2611
2612				if (from_card_up) {
2613					rxdp3->Buffer1_ptr =
2614						pci_map_single(ring->pdev,
2615							       ba->ba_1,
2616							       BUF1_LEN,
2617							       PCI_DMA_FROMDEVICE);
2618
2619					if (pci_dma_mapping_error(nic->pdev,
2620								  rxdp3->Buffer1_ptr)) {
2621						pci_unmap_single(ring->pdev,
2622								 (dma_addr_t)(unsigned long)
2623								 skb->data,
2624								 ring->mtu + 4,
2625								 PCI_DMA_FROMDEVICE);
2626						goto pci_map_failed;
2627					}
2628				}
2629				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2630				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2631					(ring->mtu + 4);
2632			}
2633			rxdp->Control_2 |= s2BIT(0);
2634			rxdp->Host_Control = (unsigned long) (skb);
2635		}
2636		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2637			rxdp->Control_1 |= RXD_OWN_XENA;
2638		off++;
2639		if (off == (ring->rxd_count + 1))
2640			off = 0;
2641		ring->rx_curr_put_info.offset = off;
2642
2643		rxdp->Control_2 |= SET_RXD_MARKER;
2644		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2645			if (first_rxdp) {
2646				wmb();
2647				first_rxdp->Control_1 |= RXD_OWN_XENA;
2648			}
2649			first_rxdp = rxdp;
2650		}
2651		ring->rx_bufs_left += 1;
2652		alloc_tab++;
2653	}
2654
2655end:
2656	/* Transfer ownership of first descriptor to adapter just before
2657	 * exiting. Before that, use memory barrier so that ownership
2658	 * and other fields are seen by adapter correctly.
2659	 */
2660	if (first_rxdp) {
2661		wmb();
2662		first_rxdp->Control_1 |= RXD_OWN_XENA;
2663	}
2664
2665	return SUCCESS;
2666
2667pci_map_failed:
2668	swstats->pci_map_fail_cnt++;
2669	swstats->mem_freed += skb->truesize;
2670	dev_kfree_skb_irq(skb);
2671	return -ENOMEM;
2672}
2673
2674static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2675{
2676	struct net_device *dev = sp->dev;
2677	int j;
2678	struct sk_buff *skb;
2679	struct RxD_t *rxdp;
2680	struct RxD1 *rxdp1;
2681	struct RxD3 *rxdp3;
2682	struct mac_info *mac_control = &sp->mac_control;
2683	struct stat_block *stats = mac_control->stats_info;
2684	struct swStat *swstats = &stats->sw_stat;
2685
2686	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2687		rxdp = mac_control->rings[ring_no].
2688			rx_blocks[blk].rxds[j].virt_addr;
2689		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2690		if (!skb)
2691			continue;
2692		if (sp->rxd_mode == RXD_MODE_1) {
2693			rxdp1 = (struct RxD1 *)rxdp;
2694			pci_unmap_single(sp->pdev,
2695					 (dma_addr_t)rxdp1->Buffer0_ptr,
2696					 dev->mtu +
2697					 HEADER_ETHERNET_II_802_3_SIZE +
2698					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2699					 PCI_DMA_FROMDEVICE);
2700			memset(rxdp, 0, sizeof(struct RxD1));
2701		} else if (sp->rxd_mode == RXD_MODE_3B) {
2702			rxdp3 = (struct RxD3 *)rxdp;
2703			pci_unmap_single(sp->pdev,
2704					 (dma_addr_t)rxdp3->Buffer0_ptr,
2705					 BUF0_LEN,
2706					 PCI_DMA_FROMDEVICE);
2707			pci_unmap_single(sp->pdev,
2708					 (dma_addr_t)rxdp3->Buffer1_ptr,
2709					 BUF1_LEN,
2710					 PCI_DMA_FROMDEVICE);
2711			pci_unmap_single(sp->pdev,
2712					 (dma_addr_t)rxdp3->Buffer2_ptr,
2713					 dev->mtu + 4,
2714					 PCI_DMA_FROMDEVICE);
2715			memset(rxdp, 0, sizeof(struct RxD3));
2716		}
2717		swstats->mem_freed += skb->truesize;
2718		dev_kfree_skb(skb);
2719		mac_control->rings[ring_no].rx_bufs_left -= 1;
2720	}
2721}
2722
2723/**
2724 *  free_rx_buffers - Frees all Rx buffers
2725 *  @sp: device private variable.
2726 *  Description:
2727 *  This function will free all Rx buffers allocated by host.
2728 *  Return Value:
2729 *  NONE.
2730 */
2731
2732static void free_rx_buffers(struct s2io_nic *sp)
2733{
2734	struct net_device *dev = sp->dev;
2735	int i, blk = 0, buf_cnt = 0;
2736	struct config_param *config = &sp->config;
2737	struct mac_info *mac_control = &sp->mac_control;
2738
2739	for (i = 0; i < config->rx_ring_num; i++) {
2740		struct ring_info *ring = &mac_control->rings[i];
2741
2742		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2743			free_rxd_blk(sp, i, blk);
2744
2745		ring->rx_curr_put_info.block_index = 0;
2746		ring->rx_curr_get_info.block_index = 0;
2747		ring->rx_curr_put_info.offset = 0;
2748		ring->rx_curr_get_info.offset = 0;
2749		ring->rx_bufs_left = 0;
2750		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2751			  dev->name, buf_cnt, i);
2752	}
2753}
2754
2755static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2756{
2757	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2758		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2759			  ring->dev->name);
2760	}
2761	return 0;
2762}
2763
2764/**
2765 * s2io_poll - Rx interrupt handler for NAPI support
2766 * @napi : pointer to the napi structure.
2767 * @budget : The number of packets that were budgeted to be processed
2768 * during  one pass through the 'Poll" function.
2769 * Description:
2770 * Comes into picture only if NAPI support has been incorporated. It does
2771 * the same thing that rx_intr_handler does, but not in a interrupt context
2772 * also It will process only a given number of packets.
2773 * Return value:
2774 * 0 on success and 1 if there are No Rx packets to be processed.
2775 */
2776
2777static int s2io_poll_msix(struct napi_struct *napi, int budget)
2778{
2779	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2780	struct net_device *dev = ring->dev;
2781	int pkts_processed = 0;
2782	u8 __iomem *addr = NULL;
2783	u8 val8 = 0;
2784	struct s2io_nic *nic = netdev_priv(dev);
2785	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2786	int budget_org = budget;
2787
2788	if (unlikely(!is_s2io_card_up(nic)))
2789		return 0;
2790
2791	pkts_processed = rx_intr_handler(ring, budget);
2792	s2io_chk_rx_buffers(nic, ring);
2793
2794	if (pkts_processed < budget_org) {
2795		napi_complete(napi);
2796		/*Re Enable MSI-Rx Vector*/
2797		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2798		addr += 7 - ring->ring_no;
2799		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2800		writeb(val8, addr);
2801		val8 = readb(addr);
2802	}
2803	return pkts_processed;
2804}
2805
2806static int s2io_poll_inta(struct napi_struct *napi, int budget)
2807{
2808	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2809	int pkts_processed = 0;
2810	int ring_pkts_processed, i;
2811	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2812	int budget_org = budget;
2813	struct config_param *config = &nic->config;
2814	struct mac_info *mac_control = &nic->mac_control;
2815
2816	if (unlikely(!is_s2io_card_up(nic)))
2817		return 0;
2818
2819	for (i = 0; i < config->rx_ring_num; i++) {
2820		struct ring_info *ring = &mac_control->rings[i];
2821		ring_pkts_processed = rx_intr_handler(ring, budget);
2822		s2io_chk_rx_buffers(nic, ring);
2823		pkts_processed += ring_pkts_processed;
2824		budget -= ring_pkts_processed;
2825		if (budget <= 0)
2826			break;
2827	}
2828	if (pkts_processed < budget_org) {
2829		napi_complete(napi);
2830		/* Re enable the Rx interrupts for the ring */
2831		writeq(0, &bar0->rx_traffic_mask);
2832		readl(&bar0->rx_traffic_mask);
2833	}
2834	return pkts_processed;
2835}
2836
2837#ifdef CONFIG_NET_POLL_CONTROLLER
2838/**
2839 * s2io_netpoll - netpoll event handler entry point
2840 * @dev : pointer to the device structure.
2841 * Description:
2842 * 	This function will be called by upper layer to check for events on the
2843 * interface in situations where interrupts are disabled. It is used for
2844 * specific in-kernel networking tasks, such as remote consoles and kernel
2845 * debugging over the network (example netdump in RedHat).
2846 */
2847static void s2io_netpoll(struct net_device *dev)
2848{
2849	struct s2io_nic *nic = netdev_priv(dev);
2850	const int irq = nic->pdev->irq;
2851	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2852	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2853	int i;
2854	struct config_param *config = &nic->config;
2855	struct mac_info *mac_control = &nic->mac_control;
2856
2857	if (pci_channel_offline(nic->pdev))
2858		return;
2859
2860	disable_irq(irq);
2861
2862	writeq(val64, &bar0->rx_traffic_int);
2863	writeq(val64, &bar0->tx_traffic_int);
2864
2865	/* we need to free up the transmitted skbufs or else netpoll will
2866	 * run out of skbs and will fail and eventually netpoll application such
2867	 * as netdump will fail.
2868	 */
2869	for (i = 0; i < config->tx_fifo_num; i++)
2870		tx_intr_handler(&mac_control->fifos[i]);
2871
2872	/* check for received packet and indicate up to network */
2873	for (i = 0; i < config->rx_ring_num; i++) {
2874		struct ring_info *ring = &mac_control->rings[i];
2875
2876		rx_intr_handler(ring, 0);
2877	}
2878
2879	for (i = 0; i < config->rx_ring_num; i++) {
2880		struct ring_info *ring = &mac_control->rings[i];
2881
2882		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2883			DBG_PRINT(INFO_DBG,
2884				  "%s: Out of memory in Rx Netpoll!!\n",
2885				  dev->name);
2886			break;
2887		}
2888	}
2889	enable_irq(irq);
2890}
2891#endif
2892
2893/**
2894 *  rx_intr_handler - Rx interrupt handler
2895 *  @ring_info: per ring structure.
2896 *  @budget: budget for napi processing.
2897 *  Description:
2898 *  If the interrupt is because of a received frame or if the
2899 *  receive ring contains fresh as yet un-processed frames,this function is
2900 *  called. It picks out the RxD at which place the last Rx processing had
2901 *  stopped and sends the skb to the OSM's Rx handler and then increments
2902 *  the offset.
2903 *  Return Value:
2904 *  No. of napi packets processed.
2905 */
2906static int rx_intr_handler(struct ring_info *ring_data, int budget)
2907{
2908	int get_block, put_block;
2909	struct rx_curr_get_info get_info, put_info;
2910	struct RxD_t *rxdp;
2911	struct sk_buff *skb;
2912	int pkt_cnt = 0, napi_pkts = 0;
2913	int i;
2914	struct RxD1 *rxdp1;
2915	struct RxD3 *rxdp3;
2916
2917	if (budget <= 0)
2918		return napi_pkts;
2919
2920	get_info = ring_data->rx_curr_get_info;
2921	get_block = get_info.block_index;
2922	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2923	put_block = put_info.block_index;
2924	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2925
2926	while (RXD_IS_UP2DT(rxdp)) {
2927		/*
2928		 * If your are next to put index then it's
2929		 * FIFO full condition
2930		 */
2931		if ((get_block == put_block) &&
2932		    (get_info.offset + 1) == put_info.offset) {
2933			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2934				  ring_data->dev->name);
2935			break;
2936		}
2937		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2938		if (skb == NULL) {
2939			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2940				  ring_data->dev->name);
2941			return 0;
2942		}
2943		if (ring_data->rxd_mode == RXD_MODE_1) {
2944			rxdp1 = (struct RxD1 *)rxdp;
2945			pci_unmap_single(ring_data->pdev, (dma_addr_t)
2946					 rxdp1->Buffer0_ptr,
2947					 ring_data->mtu +
2948					 HEADER_ETHERNET_II_802_3_SIZE +
2949					 HEADER_802_2_SIZE +
2950					 HEADER_SNAP_SIZE,
2951					 PCI_DMA_FROMDEVICE);
2952		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2953			rxdp3 = (struct RxD3 *)rxdp;
2954			pci_dma_sync_single_for_cpu(ring_data->pdev,
2955						    (dma_addr_t)rxdp3->Buffer0_ptr,
2956						    BUF0_LEN,
2957						    PCI_DMA_FROMDEVICE);
2958			pci_unmap_single(ring_data->pdev,
2959					 (dma_addr_t)rxdp3->Buffer2_ptr,
2960					 ring_data->mtu + 4,
2961					 PCI_DMA_FROMDEVICE);
2962		}
2963		prefetch(skb->data);
2964		rx_osm_handler(ring_data, rxdp);
2965		get_info.offset++;
2966		ring_data->rx_curr_get_info.offset = get_info.offset;
2967		rxdp = ring_data->rx_blocks[get_block].
2968			rxds[get_info.offset].virt_addr;
2969		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2970			get_info.offset = 0;
2971			ring_data->rx_curr_get_info.offset = get_info.offset;
2972			get_block++;
2973			if (get_block == ring_data->block_count)
2974				get_block = 0;
2975			ring_data->rx_curr_get_info.block_index = get_block;
2976			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2977		}
2978
2979		if (ring_data->nic->config.napi) {
2980			budget--;
2981			napi_pkts++;
2982			if (!budget)
2983				break;
2984		}
2985		pkt_cnt++;
2986		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2987			break;
2988	}
2989	if (ring_data->lro) {
2990		/* Clear all LRO sessions before exiting */
2991		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2992			struct lro *lro = &ring_data->lro0_n[i];
2993			if (lro->in_use) {
2994				update_L3L4_header(ring_data->nic, lro);
2995				queue_rx_frame(lro->parent, lro->vlan_tag);
2996				clear_lro_session(lro);
2997			}
2998		}
2999	}
3000	return napi_pkts;
3001}
3002
3003/**
3004 *  tx_intr_handler - Transmit interrupt handler
3005 *  @nic : device private variable
3006 *  Description:
3007 *  If an interrupt was raised to indicate DMA complete of the
3008 *  Tx packet, this function is called. It identifies the last TxD
3009 *  whose buffer was freed and frees all skbs whose data have already
3010 *  DMA'ed into the NICs internal memory.
3011 *  Return Value:
3012 *  NONE
3013 */
3014
3015static void tx_intr_handler(struct fifo_info *fifo_data)
3016{
3017	struct s2io_nic *nic = fifo_data->nic;
3018	struct tx_curr_get_info get_info, put_info;
3019	struct sk_buff *skb = NULL;
3020	struct TxD *txdlp;
3021	int pkt_cnt = 0;
3022	unsigned long flags = 0;
3023	u8 err_mask;
3024	struct stat_block *stats = nic->mac_control.stats_info;
3025	struct swStat *swstats = &stats->sw_stat;
3026
3027	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3028		return;
3029
3030	get_info = fifo_data->tx_curr_get_info;
3031	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3032	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3033	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3034	       (get_info.offset != put_info.offset) &&
3035	       (txdlp->Host_Control)) {
3036		/* Check for TxD errors */
3037		if (txdlp->Control_1 & TXD_T_CODE) {
3038			unsigned long long err;
3039			err = txdlp->Control_1 & TXD_T_CODE;
3040			if (err & 0x1) {
3041				swstats->parity_err_cnt++;
3042			}
3043
3044			/* update t_code statistics */
3045			err_mask = err >> 48;
3046			switch (err_mask) {
3047			case 2:
3048				swstats->tx_buf_abort_cnt++;
3049				break;
3050
3051			case 3:
3052				swstats->tx_desc_abort_cnt++;
3053				break;
3054
3055			case 7:
3056				swstats->tx_parity_err_cnt++;
3057				break;
3058
3059			case 10:
3060				swstats->tx_link_loss_cnt++;
3061				break;
3062
3063			case 15:
3064				swstats->tx_list_proc_err_cnt++;
3065				break;
3066			}
3067		}
3068
3069		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3070		if (skb == NULL) {
3071			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3072			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3073				  __func__);
3074			return;
3075		}
3076		pkt_cnt++;
3077
3078		/* Updating the statistics block */
3079		swstats->mem_freed += skb->truesize;
3080		dev_kfree_skb_irq(skb);
3081
3082		get_info.offset++;
3083		if (get_info.offset == get_info.fifo_len + 1)
3084			get_info.offset = 0;
3085		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3086		fifo_data->tx_curr_get_info.offset = get_info.offset;
3087	}
3088
3089	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3090
3091	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3092}
3093
3094/**
3095 *  s2io_mdio_write - Function to write in to MDIO registers
3096 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3097 *  @addr     : address value
3098 *  @value    : data value
3099 *  @dev      : pointer to net_device structure
3100 *  Description:
3101 *  This function is used to write values to the MDIO registers
3102 *  NONE
3103 */
3104static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3105			    struct net_device *dev)
3106{
3107	u64 val64;
3108	struct s2io_nic *sp = netdev_priv(dev);
3109	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3110
3111	/* address transaction */
3112	val64 = MDIO_MMD_INDX_ADDR(addr) |
3113		MDIO_MMD_DEV_ADDR(mmd_type) |
3114		MDIO_MMS_PRT_ADDR(0x0);
3115	writeq(val64, &bar0->mdio_control);
3116	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3117	writeq(val64, &bar0->mdio_control);
3118	udelay(100);
3119
3120	/* Data transaction */
3121	val64 = MDIO_MMD_INDX_ADDR(addr) |
3122		MDIO_MMD_DEV_ADDR(mmd_type) |
3123		MDIO_MMS_PRT_ADDR(0x0) |
3124		MDIO_MDIO_DATA(value) |
3125		MDIO_OP(MDIO_OP_WRITE_TRANS);
3126	writeq(val64, &bar0->mdio_control);
3127	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3128	writeq(val64, &bar0->mdio_control);
3129	udelay(100);
3130
3131	val64 = MDIO_MMD_INDX_ADDR(addr) |
3132		MDIO_MMD_DEV_ADDR(mmd_type) |
3133		MDIO_MMS_PRT_ADDR(0x0) |
3134		MDIO_OP(MDIO_OP_READ_TRANS);
3135	writeq(val64, &bar0->mdio_control);
3136	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3137	writeq(val64, &bar0->mdio_control);
3138	udelay(100);
3139}
3140
3141/**
3142 *  s2io_mdio_read - Function to write in to MDIO registers
3143 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3144 *  @addr     : address value
3145 *  @dev      : pointer to net_device structure
3146 *  Description:
3147 *  This function is used to read values to the MDIO registers
3148 *  NONE
3149 */
3150static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3151{
3152	u64 val64 = 0x0;
3153	u64 rval64 = 0x0;
3154	struct s2io_nic *sp = netdev_priv(dev);
3155	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3156
3157	/* address transaction */
3158	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3159			 | MDIO_MMD_DEV_ADDR(mmd_type)
3160			 | MDIO_MMS_PRT_ADDR(0x0));
3161	writeq(val64, &bar0->mdio_control);
3162	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3163	writeq(val64, &bar0->mdio_control);
3164	udelay(100);
3165
3166	/* Data transaction */
3167	val64 = MDIO_MMD_INDX_ADDR(addr) |
3168		MDIO_MMD_DEV_ADDR(mmd_type) |
3169		MDIO_MMS_PRT_ADDR(0x0) |
3170		MDIO_OP(MDIO_OP_READ_TRANS);
3171	writeq(val64, &bar0->mdio_control);
3172	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3173	writeq(val64, &bar0->mdio_control);
3174	udelay(100);
3175
3176	/* Read the value from regs */
3177	rval64 = readq(&bar0->mdio_control);
3178	rval64 = rval64 & 0xFFFF0000;
3179	rval64 = rval64 >> 16;
3180	return rval64;
3181}
3182
3183/**
3184 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3185 *  @counter      : counter value to be updated
3186 *  @flag         : flag to indicate the status
3187 *  @type         : counter type
3188 *  Description:
3189 *  This function is to check the status of the xpak counters value
3190 *  NONE
3191 */
3192
3193static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3194				  u16 flag, u16 type)
3195{
3196	u64 mask = 0x3;
3197	u64 val64;
3198	int i;
3199	for (i = 0; i < index; i++)
3200		mask = mask << 0x2;
3201
3202	if (flag > 0) {
3203		*counter = *counter + 1;
3204		val64 = *regs_stat & mask;
3205		val64 = val64 >> (index * 0x2);
3206		val64 = val64 + 1;
3207		if (val64 == 3) {
3208			switch (type) {
3209			case 1:
3210				DBG_PRINT(ERR_DBG,
3211					  "Take Xframe NIC out of service.\n");
3212				DBG_PRINT(ERR_DBG,
3213"Excessive temperatures may result in premature transceiver failure.\n");
3214				break;
3215			case 2:
3216				DBG_PRINT(ERR_DBG,
3217					  "Take Xframe NIC out of service.\n");
3218				DBG_PRINT(ERR_DBG,
3219"Excessive bias currents may indicate imminent laser diode failure.\n");
3220				break;
3221			case 3:
3222				DBG_PRINT(ERR_DBG,
3223					  "Take Xframe NIC out of service.\n");
3224				DBG_PRINT(ERR_DBG,
3225"Excessive laser output power may saturate far-end receiver.\n");
3226				break;
3227			default:
3228				DBG_PRINT(ERR_DBG,
3229					  "Incorrect XPAK Alarm type\n");
3230			}
3231			val64 = 0x0;
3232		}
3233		val64 = val64 << (index * 0x2);
3234		*regs_stat = (*regs_stat & (~mask)) | (val64);
3235
3236	} else {
3237		*regs_stat = *regs_stat & (~mask);
3238	}
3239}
3240
3241/**
3242 *  s2io_updt_xpak_counter - Function to update the xpak counters
3243 *  @dev         : pointer to net_device struct
3244 *  Description:
3245 *  This function is to upate the status of the xpak counters value
3246 *  NONE
3247 */
3248static void s2io_updt_xpak_counter(struct net_device *dev)
3249{
3250	u16 flag  = 0x0;
3251	u16 type  = 0x0;
3252	u16 val16 = 0x0;
3253	u64 val64 = 0x0;
3254	u64 addr  = 0x0;
3255
3256	struct s2io_nic *sp = netdev_priv(dev);
3257	struct stat_block *stats = sp->mac_control.stats_info;
3258	struct xpakStat *xstats = &stats->xpak_stat;
3259
3260	/* Check the communication with the MDIO slave */
3261	addr = MDIO_CTRL1;
3262	val64 = 0x0;
3263	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3264	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3265		DBG_PRINT(ERR_DBG,
3266			  "ERR: MDIO slave access failed - Returned %llx\n",
3267			  (unsigned long long)val64);
3268		return;
3269	}
3270
3271	/* Check for the expected value of control reg 1 */
3272	if (val64 != MDIO_CTRL1_SPEED10G) {
3273		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3274			  "Returned: %llx- Expected: 0x%x\n",
3275			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3276		return;
3277	}
3278
3279	/* Loading the DOM register to MDIO register */
3280	addr = 0xA100;
3281	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3282	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3283
3284	/* Reading the Alarm flags */
3285	addr = 0xA070;
3286	val64 = 0x0;
3287	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3288
3289	flag = CHECKBIT(val64, 0x7);
3290	type = 1;
3291	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3292			      &xstats->xpak_regs_stat,
3293			      0x0, flag, type);
3294
3295	if (CHECKBIT(val64, 0x6))
3296		xstats->alarm_transceiver_temp_low++;
3297
3298	flag = CHECKBIT(val64, 0x3);
3299	type = 2;
3300	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3301			      &xstats->xpak_regs_stat,
3302			      0x2, flag, type);
3303
3304	if (CHECKBIT(val64, 0x2))
3305		xstats->alarm_laser_bias_current_low++;
3306
3307	flag = CHECKBIT(val64, 0x1);
3308	type = 3;
3309	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3310			      &xstats->xpak_regs_stat,
3311			      0x4, flag, type);
3312
3313	if (CHECKBIT(val64, 0x0))
3314		xstats->alarm_laser_output_power_low++;
3315
3316	/* Reading the Warning flags */
3317	addr = 0xA074;
3318	val64 = 0x0;
3319	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3320
3321	if (CHECKBIT(val64, 0x7))
3322		xstats->warn_transceiver_temp_high++;
3323
3324	if (CHECKBIT(val64, 0x6))
3325		xstats->warn_transceiver_temp_low++;
3326
3327	if (CHECKBIT(val64, 0x3))
3328		xstats->warn_laser_bias_current_high++;
3329
3330	if (CHECKBIT(val64, 0x2))
3331		xstats->warn_laser_bias_current_low++;
3332
3333	if (CHECKBIT(val64, 0x1))
3334		xstats->warn_laser_output_power_high++;
3335
3336	if (CHECKBIT(val64, 0x0))
3337		xstats->warn_laser_output_power_low++;
3338}
3339
3340/**
3341 *  wait_for_cmd_complete - waits for a command to complete.
3342 *  @sp : private member of the device structure, which is a pointer to the
3343 *  s2io_nic structure.
3344 *  Description: Function that waits for a command to Write into RMAC
3345 *  ADDR DATA registers to be completed and returns either success or
3346 *  error depending on whether the command was complete or not.
3347 *  Return value:
3348 *   SUCCESS on success and FAILURE on failure.
3349 */
3350
3351static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3352				 int bit_state)
3353{
3354	int ret = FAILURE, cnt = 0, delay = 1;
3355	u64 val64;
3356
3357	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3358		return FAILURE;
3359
3360	do {
3361		val64 = readq(addr);
3362		if (bit_state == S2IO_BIT_RESET) {
3363			if (!(val64 & busy_bit)) {
3364				ret = SUCCESS;
3365				break;
3366			}
3367		} else {
3368			if (val64 & busy_bit) {
3369				ret = SUCCESS;
3370				break;
3371			}
3372		}
3373
3374		if (in_interrupt())
3375			mdelay(delay);
3376		else
3377			msleep(delay);
3378
3379		if (++cnt >= 10)
3380			delay = 50;
3381	} while (cnt < 20);
3382	return ret;
3383}
3384/**
3385 * check_pci_device_id - Checks if the device id is supported
3386 * @id : device id
3387 * Description: Function to check if the pci device id is supported by driver.
3388 * Return value: Actual device id if supported else PCI_ANY_ID
3389 */
3390static u16 check_pci_device_id(u16 id)
3391{
3392	switch (id) {
3393	case PCI_DEVICE_ID_HERC_WIN:
3394	case PCI_DEVICE_ID_HERC_UNI:
3395		return XFRAME_II_DEVICE;
3396	case PCI_DEVICE_ID_S2IO_UNI:
3397	case PCI_DEVICE_ID_S2IO_WIN:
3398		return XFRAME_I_DEVICE;
3399	default:
3400		return PCI_ANY_ID;
3401	}
3402}
3403
3404/**
3405 *  s2io_reset - Resets the card.
3406 *  @sp : private member of the device structure.
3407 *  Description: Function to Reset the card. This function then also
3408 *  restores the previously saved PCI configuration space registers as
3409 *  the card reset also resets the configuration space.
3410 *  Return value:
3411 *  void.
3412 */
3413
3414static void s2io_reset(struct s2io_nic *sp)
3415{
3416	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3417	u64 val64;
3418	u16 subid, pci_cmd;
3419	int i;
3420	u16 val16;
3421	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3422	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3423	struct stat_block *stats;
3424	struct swStat *swstats;
3425
3426	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3427		  __func__, pci_name(sp->pdev));
3428
3429	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3430	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3431
3432	val64 = SW_RESET_ALL;
3433	writeq(val64, &bar0->sw_reset);
3434	if (strstr(sp->product_name, "CX4"))
3435		msleep(750);
3436	msleep(250);
3437	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3438
3439		/* Restore the PCI state saved during initialization. */
3440		pci_restore_state(sp->pdev);
3441		pci_save_state(sp->pdev);
3442		pci_read_config_word(sp->pdev, 0x2, &val16);
3443		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3444			break;
3445		msleep(200);
3446	}
3447
3448	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3449		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3450
3451	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3452
3453	s2io_init_pci(sp);
3454
3455	/* Set swapper to enable I/O register access */
3456	s2io_set_swapper(sp);
3457
3458	/* restore mac_addr entries */
3459	do_s2io_restore_unicast_mc(sp);
3460
3461	/* Restore the MSIX table entries from local variables */
3462	restore_xmsi_data(sp);
3463
3464	/* Clear certain PCI/PCI-X fields after reset */
3465	if (sp->device_type == XFRAME_II_DEVICE) {
3466		/* Clear "detected parity error" bit */
3467		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3468
3469		/* Clearing PCIX Ecc status register */
3470		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3471
3472		/* Clearing PCI_STATUS error reflected here */
3473		writeq(s2BIT(62), &bar0->txpic_int_reg);
3474	}
3475
3476	/* Reset device statistics maintained by OS */
3477	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3478
3479	stats = sp->mac_control.stats_info;
3480	swstats = &stats->sw_stat;
3481
3482	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3483	up_cnt = swstats->link_up_cnt;
3484	down_cnt = swstats->link_down_cnt;
3485	up_time = swstats->link_up_time;
3486	down_time = swstats->link_down_time;
3487	reset_cnt = swstats->soft_reset_cnt;
3488	mem_alloc_cnt = swstats->mem_allocated;
3489	mem_free_cnt = swstats->mem_freed;
3490	watchdog_cnt = swstats->watchdog_timer_cnt;
3491
3492	memset(stats, 0, sizeof(struct stat_block));
3493
3494	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3495	swstats->link_up_cnt = up_cnt;
3496	swstats->link_down_cnt = down_cnt;
3497	swstats->link_up_time = up_time;
3498	swstats->link_down_time = down_time;
3499	swstats->soft_reset_cnt = reset_cnt;
3500	swstats->mem_allocated = mem_alloc_cnt;
3501	swstats->mem_freed = mem_free_cnt;
3502	swstats->watchdog_timer_cnt = watchdog_cnt;
3503
3504	/* SXE-002: Configure link and activity LED to turn it off */
3505	subid = sp->pdev->subsystem_device;
3506	if (((subid & 0xFF) >= 0x07) &&
3507	    (sp->device_type == XFRAME_I_DEVICE)) {
3508		val64 = readq(&bar0->gpio_control);
3509		val64 |= 0x0000800000000000ULL;
3510		writeq(val64, &bar0->gpio_control);
3511		val64 = 0x0411040400000000ULL;
3512		writeq(val64, (void __iomem *)bar0 + 0x2700);
3513	}
3514
3515	/*
3516	 * Clear spurious ECC interrupts that would have occurred on
3517	 * XFRAME II cards after reset.
3518	 */
3519	if (sp->device_type == XFRAME_II_DEVICE) {
3520		val64 = readq(&bar0->pcc_err_reg);
3521		writeq(val64, &bar0->pcc_err_reg);
3522	}
3523
3524	sp->device_enabled_once = false;
3525}
3526
3527/**
3528 *  s2io_set_swapper - to set the swapper controle on the card
3529 *  @sp : private member of the device structure,
3530 *  pointer to the s2io_nic structure.
3531 *  Description: Function to set the swapper control on the card
3532 *  correctly depending on the 'endianness' of the system.
3533 *  Return value:
3534 *  SUCCESS on success and FAILURE on failure.
3535 */
3536
3537static int s2io_set_swapper(struct s2io_nic *sp)
3538{
3539	struct net_device *dev = sp->dev;
3540	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3541	u64 val64, valt, valr;
3542
3543	/*
3544	 * Set proper endian settings and verify the same by reading
3545	 * the PIF Feed-back register.
3546	 */
3547
3548	val64 = readq(&bar0->pif_rd_swapper_fb);
3549	if (val64 != 0x0123456789ABCDEFULL) {
3550		int i = 0;
3551		static const u64 value[] = {
3552			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3553			0x8100008181000081ULL,	/* FE=1, SE=0 */
3554			0x4200004242000042ULL,	/* FE=0, SE=1 */
3555			0			/* FE=0, SE=0 */
3556		};
3557
3558		while (i < 4) {
3559			writeq(value[i], &bar0->swapper_ctrl);
3560			val64 = readq(&bar0->pif_rd_swapper_fb);
3561			if (val64 == 0x0123456789ABCDEFULL)
3562				break;
3563			i++;
3564		}
3565		if (i == 4) {
3566			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3567				  "feedback read %llx\n",
3568				  dev->name, (unsigned long long)val64);
3569			return FAILURE;
3570		}
3571		valr = value[i];
3572	} else {
3573		valr = readq(&bar0->swapper_ctrl);
3574	}
3575
3576	valt = 0x0123456789ABCDEFULL;
3577	writeq(valt, &bar0->xmsi_address);
3578	val64 = readq(&bar0->xmsi_address);
3579
3580	if (val64 != valt) {
3581		int i = 0;
3582		static const u64 value[] = {
3583			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3584			0x0081810000818100ULL,	/* FE=1, SE=0 */
3585			0x0042420000424200ULL,	/* FE=0, SE=1 */
3586			0			/* FE=0, SE=0 */
3587		};
3588
3589		while (i < 4) {
3590			writeq((value[i] | valr), &bar0->swapper_ctrl);
3591			writeq(valt, &bar0->xmsi_address);
3592			val64 = readq(&bar0->xmsi_address);
3593			if (val64 == valt)
3594				break;
3595			i++;
3596		}
3597		if (i == 4) {
3598			unsigned long long x = val64;
3599			DBG_PRINT(ERR_DBG,
3600				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3601			return FAILURE;
3602		}
3603	}
3604	val64 = readq(&bar0->swapper_ctrl);
3605	val64 &= 0xFFFF000000000000ULL;
3606
3607#ifdef __BIG_ENDIAN
3608	/*
3609	 * The device by default set to a big endian format, so a
3610	 * big endian driver need not set anything.
3611	 */
3612	val64 |= (SWAPPER_CTRL_TXP_FE |
3613		  SWAPPER_CTRL_TXP_SE |
3614		  SWAPPER_CTRL_TXD_R_FE |
3615		  SWAPPER_CTRL_TXD_W_FE |
3616		  SWAPPER_CTRL_TXF_R_FE |
3617		  SWAPPER_CTRL_RXD_R_FE |
3618		  SWAPPER_CTRL_RXD_W_FE |
3619		  SWAPPER_CTRL_RXF_W_FE |
3620		  SWAPPER_CTRL_XMSI_FE |
3621		  SWAPPER_CTRL_STATS_FE |
3622		  SWAPPER_CTRL_STATS_SE);
3623	if (sp->config.intr_type == INTA)
3624		val64 |= SWAPPER_CTRL_XMSI_SE;
3625	writeq(val64, &bar0->swapper_ctrl);
3626#else
3627	/*
3628	 * Initially we enable all bits to make it accessible by the
3629	 * driver, then we selectively enable only those bits that
3630	 * we want to set.
3631	 */
3632	val64 |= (SWAPPER_CTRL_TXP_FE |
3633		  SWAPPER_CTRL_TXP_SE |
3634		  SWAPPER_CTRL_TXD_R_FE |
3635		  SWAPPER_CTRL_TXD_R_SE |
3636		  SWAPPER_CTRL_TXD_W_FE |
3637		  SWAPPER_CTRL_TXD_W_SE |
3638		  SWAPPER_CTRL_TXF_R_FE |
3639		  SWAPPER_CTRL_RXD_R_FE |
3640		  SWAPPER_CTRL_RXD_R_SE |
3641		  SWAPPER_CTRL_RXD_W_FE |
3642		  SWAPPER_CTRL_RXD_W_SE |
3643		  SWAPPER_CTRL_RXF_W_FE |
3644		  SWAPPER_CTRL_XMSI_FE |
3645		  SWAPPER_CTRL_STATS_FE |
3646		  SWAPPER_CTRL_STATS_SE);
3647	if (sp->config.intr_type == INTA)
3648		val64 |= SWAPPER_CTRL_XMSI_SE;
3649	writeq(val64, &bar0->swapper_ctrl);
3650#endif
3651	val64 = readq(&bar0->swapper_ctrl);
3652
3653	/*
3654	 * Verifying if endian settings are accurate by reading a
3655	 * feedback register.
3656	 */
3657	val64 = readq(&bar0->pif_rd_swapper_fb);
3658	if (val64 != 0x0123456789ABCDEFULL) {
3659		/* Endian settings are incorrect, calls for another dekko. */
3660		DBG_PRINT(ERR_DBG,
3661			  "%s: Endian settings are wrong, feedback read %llx\n",
3662			  dev->name, (unsigned long long)val64);
3663		return FAILURE;
3664	}
3665
3666	return SUCCESS;
3667}
3668
3669static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3670{
3671	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3672	u64 val64;
3673	int ret = 0, cnt = 0;
3674
3675	do {
3676		val64 = readq(&bar0->xmsi_access);
3677		if (!(val64 & s2BIT(15)))
3678			break;
3679		mdelay(1);
3680		cnt++;
3681	} while (cnt < 5);
3682	if (cnt == 5) {
3683		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3684		ret = 1;
3685	}
3686
3687	return ret;
3688}
3689
3690static void restore_xmsi_data(struct s2io_nic *nic)
3691{
3692	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3693	u64 val64;
3694	int i, msix_index;
3695
3696	if (nic->device_type == XFRAME_I_DEVICE)
3697		return;
3698
3699	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3700		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3701		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3702		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3703		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3704		writeq(val64, &bar0->xmsi_access);
3705		if (wait_for_msix_trans(nic, msix_index)) {
3706			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3707				  __func__, msix_index);
3708			continue;
3709		}
3710	}
3711}
3712
3713static void store_xmsi_data(struct s2io_nic *nic)
3714{
3715	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3716	u64 val64, addr, data;
3717	int i, msix_index;
3718
3719	if (nic->device_type == XFRAME_I_DEVICE)
3720		return;
3721
3722	/* Store and display */
3723	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3724		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3725		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3726		writeq(val64, &bar0->xmsi_access);
3727		if (wait_for_msix_trans(nic, msix_index)) {
3728			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3729				  __func__, msix_index);
3730			continue;
3731		}
3732		addr = readq(&bar0->xmsi_address);
3733		data = readq(&bar0->xmsi_data);
3734		if (addr && data) {
3735			nic->msix_info[i].addr = addr;
3736			nic->msix_info[i].data = data;
3737		}
3738	}
3739}
3740
3741static int s2io_enable_msi_x(struct s2io_nic *nic)
3742{
3743	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3744	u64 rx_mat;
3745	u16 msi_control; /* Temp variable */
3746	int ret, i, j, msix_indx = 1;
3747	int size;
3748	struct stat_block *stats = nic->mac_control.stats_info;
3749	struct swStat *swstats = &stats->sw_stat;
3750
3751	size = nic->num_entries * sizeof(struct msix_entry);
3752	nic->entries = kzalloc(size, GFP_KERNEL);
3753	if (!nic->entries) {
3754		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3755			  __func__);
3756		swstats->mem_alloc_fail_cnt++;
3757		return -ENOMEM;
3758	}
3759	swstats->mem_allocated += size;
3760
3761	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3762	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3763	if (!nic->s2io_entries) {
3764		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3765			  __func__);
3766		swstats->mem_alloc_fail_cnt++;
3767		kfree(nic->entries);
3768		swstats->mem_freed
3769			+= (nic->num_entries * sizeof(struct msix_entry));
3770		return -ENOMEM;
3771	}
3772	swstats->mem_allocated += size;
3773
3774	nic->entries[0].entry = 0;
3775	nic->s2io_entries[0].entry = 0;
3776	nic->s2io_entries[0].in_use = MSIX_FLG;
3777	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3778	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3779
3780	for (i = 1; i < nic->num_entries; i++) {
3781		nic->entries[i].entry = ((i - 1) * 8) + 1;
3782		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3783		nic->s2io_entries[i].arg = NULL;
3784		nic->s2io_entries[i].in_use = 0;
3785	}
3786
3787	rx_mat = readq(&bar0->rx_mat);
3788	for (j = 0; j < nic->config.rx_ring_num; j++) {
3789		rx_mat |= RX_MAT_SET(j, msix_indx);
3790		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3791		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3792		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3793		msix_indx += 8;
3794	}
3795	writeq(rx_mat, &bar0->rx_mat);
3796	readq(&bar0->rx_mat);
3797
3798	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3799				    nic->num_entries, nic->num_entries);
3800	/* We fail init if error or we get less vectors than min required */
3801	if (ret < 0) {
3802		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3803		kfree(nic->entries);
3804		swstats->mem_freed += nic->num_entries *
3805			sizeof(struct msix_entry);
3806		kfree(nic->s2io_entries);
3807		swstats->mem_freed += nic->num_entries *
3808			sizeof(struct s2io_msix_entry);
3809		nic->entries = NULL;
3810		nic->s2io_entries = NULL;
3811		return -ENOMEM;
3812	}
3813
3814	/*
3815	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3816	 * in the herc NIC. (Temp change, needs to be removed later)
3817	 */
3818	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3819	msi_control |= 0x1; /* Enable MSI */
3820	pci_write_config_word(nic->pdev, 0x42, msi_control);
3821
3822	return 0;
3823}
3824
3825/* Handle software interrupt used during MSI(X) test */
3826static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3827{
3828	struct s2io_nic *sp = dev_id;
3829
3830	sp->msi_detected = 1;
3831	wake_up(&sp->msi_wait);
3832
3833	return IRQ_HANDLED;
3834}
3835
3836/* Test interrupt path by forcing a a software IRQ */
3837static int s2io_test_msi(struct s2io_nic *sp)
3838{
3839	struct pci_dev *pdev = sp->pdev;
3840	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3841	int err;
3842	u64 val64, saved64;
3843
3844	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3845			  sp->name, sp);
3846	if (err) {
3847		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3848			  sp->dev->name, pci_name(pdev), pdev->irq);
3849		return err;
3850	}
3851
3852	init_waitqueue_head(&sp->msi_wait);
3853	sp->msi_detected = 0;
3854
3855	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3856	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3857	val64 |= SCHED_INT_CTRL_TIMER_EN;
3858	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3859	writeq(val64, &bar0->scheduled_int_ctrl);
3860
3861	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3862
3863	if (!sp->msi_detected) {
3864		/* MSI(X) test failed, go back to INTx mode */
3865		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3866			  "using MSI(X) during test\n",
3867			  sp->dev->name, pci_name(pdev));
3868
3869		err = -EOPNOTSUPP;
3870	}
3871
3872	free_irq(sp->entries[1].vector, sp);
3873
3874	writeq(saved64, &bar0->scheduled_int_ctrl);
3875
3876	return err;
3877}
3878
3879static void remove_msix_isr(struct s2io_nic *sp)
3880{
3881	int i;
3882	u16 msi_control;
3883
3884	for (i = 0; i < sp->num_entries; i++) {
3885		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3886			int vector = sp->entries[i].vector;
3887			void *arg = sp->s2io_entries[i].arg;
3888			free_irq(vector, arg);
3889		}
3890	}
3891
3892	kfree(sp->entries);
3893	kfree(sp->s2io_entries);
3894	sp->entries = NULL;
3895	sp->s2io_entries = NULL;
3896
3897	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3898	msi_control &= 0xFFFE; /* Disable MSI */
3899	pci_write_config_word(sp->pdev, 0x42, msi_control);
3900
3901	pci_disable_msix(sp->pdev);
3902}
3903
3904static void remove_inta_isr(struct s2io_nic *sp)
3905{
3906	free_irq(sp->pdev->irq, sp->dev);
3907}
3908
3909/* ********************************************************* *
3910 * Functions defined below concern the OS part of the driver *
3911 * ********************************************************* */
3912
3913/**
3914 *  s2io_open - open entry point of the driver
3915 *  @dev : pointer to the device structure.
3916 *  Description:
3917 *  This function is the open entry point of the driver. It mainly calls a
3918 *  function to allocate Rx buffers and inserts them into the buffer
3919 *  descriptors and then enables the Rx part of the NIC.
3920 *  Return value:
3921 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3922 *   file on failure.
3923 */
3924
3925static int s2io_open(struct net_device *dev)
3926{
3927	struct s2io_nic *sp = netdev_priv(dev);
3928	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3929	int err = 0;
3930
3931	/*
3932	 * Make sure you have link off by default every time
3933	 * Nic is initialized
3934	 */
3935	netif_carrier_off(dev);
3936	sp->last_link_state = 0;
3937
3938	/* Initialize H/W and enable interrupts */
3939	err = s2io_card_up(sp);
3940	if (err) {
3941		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3942			  dev->name);
3943		goto hw_init_failed;
3944	}
3945
3946	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3947		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3948		s2io_card_down(sp);
3949		err = -ENODEV;
3950		goto hw_init_failed;
3951	}
3952	s2io_start_all_tx_queue(sp);
3953	return 0;
3954
3955hw_init_failed:
3956	if (sp->config.intr_type == MSI_X) {
3957		if (sp->entries) {
3958			kfree(sp->entries);
3959			swstats->mem_freed += sp->num_entries *
3960				sizeof(struct msix_entry);
3961		}
3962		if (sp->s2io_entries) {
3963			kfree(sp->s2io_entries);
3964			swstats->mem_freed += sp->num_entries *
3965				sizeof(struct s2io_msix_entry);
3966		}
3967	}
3968	return err;
3969}
3970
3971/**
3972 *  s2io_close -close entry point of the driver
3973 *  @dev : device pointer.
3974 *  Description:
3975 *  This is the stop entry point of the driver. It needs to undo exactly
3976 *  whatever was done by the open entry point,thus it's usually referred to
3977 *  as the close function.Among other things this function mainly stops the
3978 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3979 *  Return value:
3980 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3981 *  file on failure.
3982 */
3983
3984static int s2io_close(struct net_device *dev)
3985{
3986	struct s2io_nic *sp = netdev_priv(dev);
3987	struct config_param *config = &sp->config;
3988	u64 tmp64;
3989	int offset;
3990
3991	/* Return if the device is already closed               *
3992	 *  Can happen when s2io_card_up failed in change_mtu    *
3993	 */
3994	if (!is_s2io_card_up(sp))
3995		return 0;
3996
3997	s2io_stop_all_tx_queue(sp);
3998	/* delete all populated mac entries */
3999	for (offset = 1; offset < config->max_mc_addr; offset++) {
4000		tmp64 = do_s2io_read_unicast_mc(sp, offset);
4001		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4002			do_s2io_delete_unicast_mc(sp, tmp64);
4003	}
4004
4005	s2io_card_down(sp);
4006
4007	return 0;
4008}
4009
4010/**
4011 *  s2io_xmit - Tx entry point of te driver
4012 *  @skb : the socket buffer containing the Tx data.
4013 *  @dev : device pointer.
4014 *  Description :
4015 *  This function is the Tx entry point of the driver. S2IO NIC supports
4016 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4017 *  NOTE: when device can't queue the pkt,just the trans_start variable will
4018 *  not be upadted.
4019 *  Return value:
4020 *  0 on success & 1 on failure.
4021 */
4022
4023static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4024{
4025	struct s2io_nic *sp = netdev_priv(dev);
4026	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4027	register u64 val64;
4028	struct TxD *txdp;
4029	struct TxFIFO_element __iomem *tx_fifo;
4030	unsigned long flags = 0;
4031	u16 vlan_tag = 0;
4032	struct fifo_info *fifo = NULL;
4033	int do_spin_lock = 1;
4034	int offload_type;
4035	int enable_per_list_interrupt = 0;
4036	struct config_param *config = &sp->config;
4037	struct mac_info *mac_control = &sp->mac_control;
4038	struct stat_block *stats = mac_control->stats_info;
4039	struct swStat *swstats = &stats->sw_stat;
4040
4041	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4042
4043	if (unlikely(skb->len <= 0)) {
4044		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4045		dev_kfree_skb_any(skb);
4046		return NETDEV_TX_OK;
4047	}
4048
4049	if (!is_s2io_card_up(sp)) {
4050		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4051			  dev->name);
4052		dev_kfree_skb_any(skb);
4053		return NETDEV_TX_OK;
4054	}
4055
4056	queue = 0;
4057	if (vlan_tx_tag_present(skb))
4058		vlan_tag = vlan_tx_tag_get(skb);
4059	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4060		if (skb->protocol == htons(ETH_P_IP)) {
4061			struct iphdr *ip;
4062			struct tcphdr *th;
4063			ip = ip_hdr(skb);
4064
4065			if (!ip_is_fragment(ip)) {
4066				th = (struct tcphdr *)(((unsigned char *)ip) +
4067						       ip->ihl*4);
4068
4069				if (ip->protocol == IPPROTO_TCP) {
4070					queue_len = sp->total_tcp_fifos;
4071					queue = (ntohs(th->source) +
4072						 ntohs(th->dest)) &
4073						sp->fifo_selector[queue_len - 1];
4074					if (queue >= queue_len)
4075						queue = queue_len - 1;
4076				} else if (ip->protocol == IPPROTO_UDP) {
4077					queue_len = sp->total_udp_fifos;
4078					queue = (ntohs(th->source) +
4079						 ntohs(th->dest)) &
4080						sp->fifo_selector[queue_len - 1];
4081					if (queue >= queue_len)
4082						queue = queue_len - 1;
4083					queue += sp->udp_fifo_idx;
4084					if (skb->len > 1024)
4085						enable_per_list_interrupt = 1;
4086					do_spin_lock = 0;
4087				}
4088			}
4089		}
4090	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4091		/* get fifo number based on skb->priority value */
4092		queue = config->fifo_mapping
4093			[skb->priority & (MAX_TX_FIFOS - 1)];
4094	fifo = &mac_control->fifos[queue];
4095
4096	if (do_spin_lock)
4097		spin_lock_irqsave(&fifo->tx_lock, flags);
4098	else {
4099		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4100			return NETDEV_TX_LOCKED;
4101	}
4102
4103	if (sp->config.multiq) {
4104		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4105			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4106			return NETDEV_TX_BUSY;
4107		}
4108	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4109		if (netif_queue_stopped(dev)) {
4110			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4111			return NETDEV_TX_BUSY;
4112		}
4113	}
4114
4115	put_off = (u16)fifo->tx_curr_put_info.offset;
4116	get_off = (u16)fifo->tx_curr_get_info.offset;
4117	txdp = fifo->list_info[put_off].list_virt_addr;
4118
4119	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4120	/* Avoid "put" pointer going beyond "get" pointer */
4121	if (txdp->Host_Control ||
4122	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4123		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4124		s2io_stop_tx_queue(sp, fifo->fifo_no);
4125		dev_kfree_skb_any(skb);
4126		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4127		return NETDEV_TX_OK;
4128	}
4129
4130	offload_type = s2io_offload_type(skb);
4131	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4132		txdp->Control_1 |= TXD_TCP_LSO_EN;
4133		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4134	}
4135	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4136		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4137				    TXD_TX_CKO_TCP_EN |
4138				    TXD_TX_CKO_UDP_EN);
4139	}
4140	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4141	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4142	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4143	if (enable_per_list_interrupt)
4144		if (put_off & (queue_len >> 5))
4145			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4146	if (vlan_tag) {
4147		txdp->Control_2 |= TXD_VLAN_ENABLE;
4148		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4149	}
4150
4151	frg_len = skb_headlen(skb);
4152	if (offload_type == SKB_GSO_UDP) {
4153		int ufo_size;
4154
4155		ufo_size = s2io_udp_mss(skb);
4156		ufo_size &= ~7;
4157		txdp->Control_1 |= TXD_UFO_EN;
4158		txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4159		txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4160#ifdef __BIG_ENDIAN
4161		/* both variants do cpu_to_be64(be32_to_cpu(...)) */
4162		fifo->ufo_in_band_v[put_off] =
4163			(__force u64)skb_shinfo(skb)->ip6_frag_id;
4164#else
4165		fifo->ufo_in_band_v[put_off] =
4166			(__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4167#endif
4168		txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4169		txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4170						      fifo->ufo_in_band_v,
4171						      sizeof(u64),
4172						      PCI_DMA_TODEVICE);
4173		if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4174			goto pci_map_failed;
4175		txdp++;
4176	}
4177
4178	txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4179					      frg_len, PCI_DMA_TODEVICE);
4180	if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4181		goto pci_map_failed;
4182
4183	txdp->Host_Control = (unsigned long)skb;
4184	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4185	if (offload_type == SKB_GSO_UDP)
4186		txdp->Control_1 |= TXD_UFO_EN;
4187
4188	frg_cnt = skb_shinfo(skb)->nr_frags;
4189	/* For fragmented SKB. */
4190	for (i = 0; i < frg_cnt; i++) {
4191		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4192		/* A '0' length fragment will be ignored */
4193		if (!skb_frag_size(frag))
4194			continue;
4195		txdp++;
4196		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4197							     frag, 0,
4198							     skb_frag_size(frag),
4199							     DMA_TO_DEVICE);
4200		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4201		if (offload_type == SKB_GSO_UDP)
4202			txdp->Control_1 |= TXD_UFO_EN;
4203	}
4204	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4205
4206	if (offload_type == SKB_GSO_UDP)
4207		frg_cnt++; /* as Txd0 was used for inband header */
4208
4209	tx_fifo = mac_control->tx_FIFO_start[queue];
4210	val64 = fifo->list_info[put_off].list_phy_addr;
4211	writeq(val64, &tx_fifo->TxDL_Pointer);
4212
4213	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4214		 TX_FIFO_LAST_LIST);
4215	if (offload_type)
4216		val64 |= TX_FIFO_SPECIAL_FUNC;
4217
4218	writeq(val64, &tx_fifo->List_Control);
4219
4220	mmiowb();
4221
4222	put_off++;
4223	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4224		put_off = 0;
4225	fifo->tx_curr_put_info.offset = put_off;
4226
4227	/* Avoid "put" pointer going beyond "get" pointer */
4228	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4229		swstats->fifo_full_cnt++;
4230		DBG_PRINT(TX_DBG,
4231			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4232			  put_off, get_off);
4233		s2io_stop_tx_queue(sp, fifo->fifo_no);
4234	}
4235	swstats->mem_allocated += skb->truesize;
4236	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4237
4238	if (sp->config.intr_type == MSI_X)
4239		tx_intr_handler(fifo);
4240
4241	return NETDEV_TX_OK;
4242
4243pci_map_failed:
4244	swstats->pci_map_fail_cnt++;
4245	s2io_stop_tx_queue(sp, fifo->fifo_no);
4246	swstats->mem_freed += skb->truesize;
4247	dev_kfree_skb_any(skb);
4248	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4249	return NETDEV_TX_OK;
4250}
4251
4252static void
4253s2io_alarm_handle(unsigned long data)
4254{
4255	struct s2io_nic *sp = (struct s2io_nic *)data;
4256	struct net_device *dev = sp->dev;
4257
4258	s2io_handle_errors(dev);
4259	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4260}
4261
4262static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4263{
4264	struct ring_info *ring = (struct ring_info *)dev_id;
4265	struct s2io_nic *sp = ring->nic;
4266	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4267
4268	if (unlikely(!is_s2io_card_up(sp)))
4269		return IRQ_HANDLED;
4270
4271	if (sp->config.napi) {
4272		u8 __iomem *addr = NULL;
4273		u8 val8 = 0;
4274
4275		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4276		addr += (7 - ring->ring_no);
4277		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4278		writeb(val8, addr);
4279		val8 = readb(addr);
4280		napi_schedule(&ring->napi);
4281	} else {
4282		rx_intr_handler(ring, 0);
4283		s2io_chk_rx_buffers(sp, ring);
4284	}
4285
4286	return IRQ_HANDLED;
4287}
4288
4289static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4290{
4291	int i;
4292	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4293	struct s2io_nic *sp = fifos->nic;
4294	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4295	struct config_param *config  = &sp->config;
4296	u64 reason;
4297
4298	if (unlikely(!is_s2io_card_up(sp)))
4299		return IRQ_NONE;
4300
4301	reason = readq(&bar0->general_int_status);
4302	if (unlikely(reason == S2IO_MINUS_ONE))
4303		/* Nothing much can be done. Get out */
4304		return IRQ_HANDLED;
4305
4306	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4307		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4308
4309		if (reason & GEN_INTR_TXPIC)
4310			s2io_txpic_intr_handle(sp);
4311
4312		if (reason & GEN_INTR_TXTRAFFIC)
4313			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4314
4315		for (i = 0; i < config->tx_fifo_num; i++)
4316			tx_intr_handler(&fifos[i]);
4317
4318		writeq(sp->general_int_mask, &bar0->general_int_mask);
4319		readl(&bar0->general_int_status);
4320		return IRQ_HANDLED;
4321	}
4322	/* The interrupt was not raised by us */
4323	return IRQ_NONE;
4324}
4325
4326static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4327{
4328	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4329	u64 val64;
4330
4331	val64 = readq(&bar0->pic_int_status);
4332	if (val64 & PIC_INT_GPIO) {
4333		val64 = readq(&bar0->gpio_int_reg);
4334		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4335		    (val64 & GPIO_INT_REG_LINK_UP)) {
4336			/*
4337			 * This is unstable state so clear both up/down
4338			 * interrupt and adapter to re-evaluate the link state.
4339			 */
4340			val64 |= GPIO_INT_REG_LINK_DOWN;
4341			val64 |= GPIO_INT_REG_LINK_UP;
4342			writeq(val64, &bar0->gpio_int_reg);
4343			val64 = readq(&bar0->gpio_int_mask);
4344			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4345				   GPIO_INT_MASK_LINK_DOWN);
4346			writeq(val64, &bar0->gpio_int_mask);
4347		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4348			val64 = readq(&bar0->adapter_status);
4349			/* Enable Adapter */
4350			val64 = readq(&bar0->adapter_control);
4351			val64 |= ADAPTER_CNTL_EN;
4352			writeq(val64, &bar0->adapter_control);
4353			val64 |= ADAPTER_LED_ON;
4354			writeq(val64, &bar0->adapter_control);
4355			if (!sp->device_enabled_once)
4356				sp->device_enabled_once = 1;
4357
4358			s2io_link(sp, LINK_UP);
4359			/*
4360			 * unmask link down interrupt and mask link-up
4361			 * intr
4362			 */
4363			val64 = readq(&bar0->gpio_int_mask);
4364			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4365			val64 |= GPIO_INT_MASK_LINK_UP;
4366			writeq(val64, &bar0->gpio_int_mask);
4367
4368		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4369			val64 = readq(&bar0->adapter_status);
4370			s2io_link(sp, LINK_DOWN);
4371			/* Link is down so unmaks link up interrupt */
4372			val64 = readq(&bar0->gpio_int_mask);
4373			val64 &= ~GPIO_INT_MASK_LINK_UP;
4374			val64 |= GPIO_INT_MASK_LINK_DOWN;
4375			writeq(val64, &bar0->gpio_int_mask);
4376
4377			/* turn off LED */
4378			val64 = readq(&bar0->adapter_control);
4379			val64 = val64 & (~ADAPTER_LED_ON);
4380			writeq(val64, &bar0->adapter_control);
4381		}
4382	}
4383	val64 = readq(&bar0->gpio_int_mask);
4384}
4385
4386/**
4387 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4388 *  @value: alarm bits
4389 *  @addr: address value
4390 *  @cnt: counter variable
4391 *  Description: Check for alarm and increment the counter
4392 *  Return Value:
4393 *  1 - if alarm bit set
4394 *  0 - if alarm bit is not set
4395 */
4396static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4397				 unsigned long long *cnt)
4398{
4399	u64 val64;
4400	val64 = readq(addr);
4401	if (val64 & value) {
4402		writeq(val64, addr);
4403		(*cnt)++;
4404		return 1;
4405	}
4406	return 0;
4407
4408}
4409
4410/**
4411 *  s2io_handle_errors - Xframe error indication handler
4412 *  @nic: device private variable
4413 *  Description: Handle alarms such as loss of link, single or
4414 *  double ECC errors, critical and serious errors.
4415 *  Return Value:
4416 *  NONE
4417 */
4418static void s2io_handle_errors(void *dev_id)
4419{
4420	struct net_device *dev = (struct net_device *)dev_id;
4421	struct s2io_nic *sp = netdev_priv(dev);
4422	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4423	u64 temp64 = 0, val64 = 0;
4424	int i = 0;
4425
4426	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4427	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4428
4429	if (!is_s2io_card_up(sp))
4430		return;
4431
4432	if (pci_channel_offline(sp->pdev))
4433		return;
4434
4435	memset(&sw_stat->ring_full_cnt, 0,
4436	       sizeof(sw_stat->ring_full_cnt));
4437
4438	/* Handling the XPAK counters update */
4439	if (stats->xpak_timer_count < 72000) {
4440		/* waiting for an hour */
4441		stats->xpak_timer_count++;
4442	} else {
4443		s2io_updt_xpak_counter(dev);
4444		/* reset the count to zero */
4445		stats->xpak_timer_count = 0;
4446	}
4447
4448	/* Handling link status change error Intr */
4449	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4450		val64 = readq(&bar0->mac_rmac_err_reg);
4451		writeq(val64, &bar0->mac_rmac_err_reg);
4452		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4453			schedule_work(&sp->set_link_task);
4454	}
4455
4456	/* In case of a serious error, the device will be Reset. */
4457	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4458				  &sw_stat->serious_err_cnt))
4459		goto reset;
4460
4461	/* Check for data parity error */
4462	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4463				  &sw_stat->parity_err_cnt))
4464		goto reset;
4465
4466	/* Check for ring full counter */
4467	if (sp->device_type == XFRAME_II_DEVICE) {
4468		val64 = readq(&bar0->ring_bump_counter1);
4469		for (i = 0; i < 4; i++) {
4470			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4471			temp64 >>= 64 - ((i+1)*16);
4472			sw_stat->ring_full_cnt[i] += temp64;
4473		}
4474
4475		val64 = readq(&bar0->ring_bump_counter2);
4476		for (i = 0; i < 4; i++) {
4477			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4478			temp64 >>= 64 - ((i+1)*16);
4479			sw_stat->ring_full_cnt[i+4] += temp64;
4480		}
4481	}
4482
4483	val64 = readq(&bar0->txdma_int_status);
4484	/*check for pfc_err*/
4485	if (val64 & TXDMA_PFC_INT) {
4486		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4487					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4488					  PFC_PCIX_ERR,
4489					  &bar0->pfc_err_reg,
4490					  &sw_stat->pfc_err_cnt))
4491			goto reset;
4492		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4493				      &bar0->pfc_err_reg,
4494				      &sw_stat->pfc_err_cnt);
4495	}
4496
4497	/*check for tda_err*/
4498	if (val64 & TXDMA_TDA_INT) {
4499		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4500					  TDA_SM0_ERR_ALARM |
4501					  TDA_SM1_ERR_ALARM,
4502					  &bar0->tda_err_reg,
4503					  &sw_stat->tda_err_cnt))
4504			goto reset;
4505		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4506				      &bar0->tda_err_reg,
4507				      &sw_stat->tda_err_cnt);
4508	}
4509	/*check for pcc_err*/
4510	if (val64 & TXDMA_PCC_INT) {
4511		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4512					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4513					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4514					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4515					  PCC_TXB_ECC_DB_ERR,
4516					  &bar0->pcc_err_reg,
4517					  &sw_stat->pcc_err_cnt))
4518			goto reset;
4519		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4520				      &bar0->pcc_err_reg,
4521				      &sw_stat->pcc_err_cnt);
4522	}
4523
4524	/*check for tti_err*/
4525	if (val64 & TXDMA_TTI_INT) {
4526		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4527					  &bar0->tti_err_reg,
4528					  &sw_stat->tti_err_cnt))
4529			goto reset;
4530		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4531				      &bar0->tti_err_reg,
4532				      &sw_stat->tti_err_cnt);
4533	}
4534
4535	/*check for lso_err*/
4536	if (val64 & TXDMA_LSO_INT) {
4537		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4538					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4539					  &bar0->lso_err_reg,
4540					  &sw_stat->lso_err_cnt))
4541			goto reset;
4542		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4543				      &bar0->lso_err_reg,
4544				      &sw_stat->lso_err_cnt);
4545	}
4546
4547	/*check for tpa_err*/
4548	if (val64 & TXDMA_TPA_INT) {
4549		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4550					  &bar0->tpa_err_reg,
4551					  &sw_stat->tpa_err_cnt))
4552			goto reset;
4553		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4554				      &bar0->tpa_err_reg,
4555				      &sw_stat->tpa_err_cnt);
4556	}
4557
4558	/*check for sm_err*/
4559	if (val64 & TXDMA_SM_INT) {
4560		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4561					  &bar0->sm_err_reg,
4562					  &sw_stat->sm_err_cnt))
4563			goto reset;
4564	}
4565
4566	val64 = readq(&bar0->mac_int_status);
4567	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4568		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4569					  &bar0->mac_tmac_err_reg,
4570					  &sw_stat->mac_tmac_err_cnt))
4571			goto reset;
4572		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4573				      TMAC_DESC_ECC_SG_ERR |
4574				      TMAC_DESC_ECC_DB_ERR,
4575				      &bar0->mac_tmac_err_reg,
4576				      &sw_stat->mac_tmac_err_cnt);
4577	}
4578
4579	val64 = readq(&bar0->xgxs_int_status);
4580	if (val64 & XGXS_INT_STATUS_TXGXS) {
4581		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4582					  &bar0->xgxs_txgxs_err_reg,
4583					  &sw_stat->xgxs_txgxs_err_cnt))
4584			goto reset;
4585		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4586				      &bar0->xgxs_txgxs_err_reg,
4587				      &sw_stat->xgxs_txgxs_err_cnt);
4588	}
4589
4590	val64 = readq(&bar0->rxdma_int_status);
4591	if (val64 & RXDMA_INT_RC_INT_M) {
4592		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4593					  RC_FTC_ECC_DB_ERR |
4594					  RC_PRCn_SM_ERR_ALARM |
4595					  RC_FTC_SM_ERR_ALARM,
4596					  &bar0->rc_err_reg,
4597					  &sw_stat->rc_err_cnt))
4598			goto reset;
4599		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4600				      RC_FTC_ECC_SG_ERR |
4601				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4602				      &sw_stat->rc_err_cnt);
4603		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4604					  PRC_PCI_AB_WR_Rn |
4605					  PRC_PCI_AB_F_WR_Rn,
4606					  &bar0->prc_pcix_err_reg,
4607					  &sw_stat->prc_pcix_err_cnt))
4608			goto reset;
4609		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4610				      PRC_PCI_DP_WR_Rn |
4611				      PRC_PCI_DP_F_WR_Rn,
4612				      &bar0->prc_pcix_err_reg,
4613				      &sw_stat->prc_pcix_err_cnt);
4614	}
4615
4616	if (val64 & RXDMA_INT_RPA_INT_M) {
4617		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4618					  &bar0->rpa_err_reg,
4619					  &sw_stat->rpa_err_cnt))
4620			goto reset;
4621		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4622				      &bar0->rpa_err_reg,
4623				      &sw_stat->rpa_err_cnt);
4624	}
4625
4626	if (val64 & RXDMA_INT_RDA_INT_M) {
4627		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4628					  RDA_FRM_ECC_DB_N_AERR |
4629					  RDA_SM1_ERR_ALARM |
4630					  RDA_SM0_ERR_ALARM |
4631					  RDA_RXD_ECC_DB_SERR,
4632					  &bar0->rda_err_reg,
4633					  &sw_stat->rda_err_cnt))
4634			goto reset;
4635		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4636				      RDA_FRM_ECC_SG_ERR |
4637				      RDA_MISC_ERR |
4638				      RDA_PCIX_ERR,
4639				      &bar0->rda_err_reg,
4640				      &sw_stat->rda_err_cnt);
4641	}
4642
4643	if (val64 & RXDMA_INT_RTI_INT_M) {
4644		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4645					  &bar0->rti_err_reg,
4646					  &sw_stat->rti_err_cnt))
4647			goto reset;
4648		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4649				      &bar0->rti_err_reg,
4650				      &sw_stat->rti_err_cnt);
4651	}
4652
4653	val64 = readq(&bar0->mac_int_status);
4654	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4655		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4656					  &bar0->mac_rmac_err_reg,
4657					  &sw_stat->mac_rmac_err_cnt))
4658			goto reset;
4659		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4660				      RMAC_SINGLE_ECC_ERR |
4661				      RMAC_DOUBLE_ECC_ERR,
4662				      &bar0->mac_rmac_err_reg,
4663				      &sw_stat->mac_rmac_err_cnt);
4664	}
4665
4666	val64 = readq(&bar0->xgxs_int_status);
4667	if (val64 & XGXS_INT_STATUS_RXGXS) {
4668		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4669					  &bar0->xgxs_rxgxs_err_reg,
4670					  &sw_stat->xgxs_rxgxs_err_cnt))
4671			goto reset;
4672	}
4673
4674	val64 = readq(&bar0->mc_int_status);
4675	if (val64 & MC_INT_STATUS_MC_INT) {
4676		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4677					  &bar0->mc_err_reg,
4678					  &sw_stat->mc_err_cnt))
4679			goto reset;
4680
4681		/* Handling Ecc errors */
4682		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4683			writeq(val64, &bar0->mc_err_reg);
4684			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4685				sw_stat->double_ecc_errs++;
4686				if (sp->device_type != XFRAME_II_DEVICE) {
4687					/*
4688					 * Reset XframeI only if critical error
4689					 */
4690					if (val64 &
4691					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4692					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4693						goto reset;
4694				}
4695			} else
4696				sw_stat->single_ecc_errs++;
4697		}
4698	}
4699	return;
4700
4701reset:
4702	s2io_stop_all_tx_queue(sp);
4703	schedule_work(&sp->rst_timer_task);
4704	sw_stat->soft_reset_cnt++;
4705}
4706
4707/**
4708 *  s2io_isr - ISR handler of the device .
4709 *  @irq: the irq of the device.
4710 *  @dev_id: a void pointer to the dev structure of the NIC.
4711 *  Description:  This function is the ISR handler of the device. It
4712 *  identifies the reason for the interrupt and calls the relevant
4713 *  service routines. As a contongency measure, this ISR allocates the
4714 *  recv buffers, if their numbers are below the panic value which is
4715 *  presently set to 25% of the original number of rcv buffers allocated.
4716 *  Return value:
4717 *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4718 *   IRQ_NONE: will be returned if interrupt is not from our device
4719 */
4720static irqreturn_t s2io_isr(int irq, void *dev_id)
4721{
4722	struct net_device *dev = (struct net_device *)dev_id;
4723	struct s2io_nic *sp = netdev_priv(dev);
4724	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4725	int i;
4726	u64 reason = 0;
4727	struct mac_info *mac_control;
4728	struct config_param *config;
4729
4730	/* Pretend we handled any irq's from a disconnected card */
4731	if (pci_channel_offline(sp->pdev))
4732		return IRQ_NONE;
4733
4734	if (!is_s2io_card_up(sp))
4735		return IRQ_NONE;
4736
4737	config = &sp->config;
4738	mac_control = &sp->mac_control;
4739
4740	/*
4741	 * Identify the cause for interrupt and call the appropriate
4742	 * interrupt handler. Causes for the interrupt could be;
4743	 * 1. Rx of packet.
4744	 * 2. Tx complete.
4745	 * 3. Link down.
4746	 */
4747	reason = readq(&bar0->general_int_status);
4748
4749	if (unlikely(reason == S2IO_MINUS_ONE))
4750		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4751
4752	if (reason &
4753	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4754		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4755
4756		if (config->napi) {
4757			if (reason & GEN_INTR_RXTRAFFIC) {
4758				napi_schedule(&sp->napi);
4759				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4760				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4761				readl(&bar0->rx_traffic_int);
4762			}
4763		} else {
4764			/*
4765			 * rx_traffic_int reg is an R1 register, writing all 1's
4766			 * will ensure that the actual interrupt causing bit
4767			 * get's cleared and hence a read can be avoided.
4768			 */
4769			if (reason & GEN_INTR_RXTRAFFIC)
4770				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4771
4772			for (i = 0; i < config->rx_ring_num; i++) {
4773				struct ring_info *ring = &mac_control->rings[i];
4774
4775				rx_intr_handler(ring, 0);
4776			}
4777		}
4778
4779		/*
4780		 * tx_traffic_int reg is an R1 register, writing all 1's
4781		 * will ensure that the actual interrupt causing bit get's
4782		 * cleared and hence a read can be avoided.
4783		 */
4784		if (reason & GEN_INTR_TXTRAFFIC)
4785			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4786
4787		for (i = 0; i < config->tx_fifo_num; i++)
4788			tx_intr_handler(&mac_control->fifos[i]);
4789
4790		if (reason & GEN_INTR_TXPIC)
4791			s2io_txpic_intr_handle(sp);
4792
4793		/*
4794		 * Reallocate the buffers from the interrupt handler itself.
4795		 */
4796		if (!config->napi) {
4797			for (i = 0; i < config->rx_ring_num; i++) {
4798				struct ring_info *ring = &mac_control->rings[i];
4799
4800				s2io_chk_rx_buffers(sp, ring);
4801			}
4802		}
4803		writeq(sp->general_int_mask, &bar0->general_int_mask);
4804		readl(&bar0->general_int_status);
4805
4806		return IRQ_HANDLED;
4807
4808	} else if (!reason) {
4809		/* The interrupt was not raised by us */
4810		return IRQ_NONE;
4811	}
4812
4813	return IRQ_HANDLED;
4814}
4815
4816/**
4817 * s2io_updt_stats -
4818 */
4819static void s2io_updt_stats(struct s2io_nic *sp)
4820{
4821	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4822	u64 val64;
4823	int cnt = 0;
4824
4825	if (is_s2io_card_up(sp)) {
4826		/* Apprx 30us on a 133 MHz bus */
4827		val64 = SET_UPDT_CLICKS(10) |
4828			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4829		writeq(val64, &bar0->stat_cfg);
4830		do {
4831			udelay(100);
4832			val64 = readq(&bar0->stat_cfg);
4833			if (!(val64 & s2BIT(0)))
4834				break;
4835			cnt++;
4836			if (cnt == 5)
4837				break; /* Updt failed */
4838		} while (1);
4839	}
4840}
4841
4842/**
4843 *  s2io_get_stats - Updates the device statistics structure.
4844 *  @dev : pointer to the device structure.
4845 *  Description:
4846 *  This function updates the device statistics structure in the s2io_nic
4847 *  structure and returns a pointer to the same.
4848 *  Return value:
4849 *  pointer to the updated net_device_stats structure.
4850 */
4851static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4852{
4853	struct s2io_nic *sp = netdev_priv(dev);
4854	struct mac_info *mac_control = &sp->mac_control;
4855	struct stat_block *stats = mac_control->stats_info;
4856	u64 delta;
4857
4858	/* Configure Stats for immediate updt */
4859	s2io_updt_stats(sp);
4860
4861	/* A device reset will cause the on-adapter statistics to be zero'ed.
4862	 * This can be done while running by changing the MTU.  To prevent the
4863	 * system from having the stats zero'ed, the driver keeps a copy of the
4864	 * last update to the system (which is also zero'ed on reset).  This
4865	 * enables the driver to accurately know the delta between the last
4866	 * update and the current update.
4867	 */
4868	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4869		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4870	sp->stats.rx_packets += delta;
4871	dev->stats.rx_packets += delta;
4872
4873	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4874		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4875	sp->stats.tx_packets += delta;
4876	dev->stats.tx_packets += delta;
4877
4878	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4879		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4880	sp->stats.rx_bytes += delta;
4881	dev->stats.rx_bytes += delta;
4882
4883	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4884		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4885	sp->stats.tx_bytes += delta;
4886	dev->stats.tx_bytes += delta;
4887
4888	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4889	sp->stats.rx_errors += delta;
4890	dev->stats.rx_errors += delta;
4891
4892	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4893		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4894	sp->stats.tx_errors += delta;
4895	dev->stats.tx_errors += delta;
4896
4897	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4898	sp->stats.rx_dropped += delta;
4899	dev->stats.rx_dropped += delta;
4900
4901	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4902	sp->stats.tx_dropped += delta;
4903	dev->stats.tx_dropped += delta;
4904
4905	/* The adapter MAC interprets pause frames as multicast packets, but
4906	 * does not pass them up.  This erroneously increases the multicast
4907	 * packet count and needs to be deducted when the multicast frame count
4908	 * is queried.
4909	 */
4910	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4911		le32_to_cpu(stats->rmac_vld_mcst_frms);
4912	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4913	delta -= sp->stats.multicast;
4914	sp->stats.multicast += delta;
4915	dev->stats.multicast += delta;
4916
4917	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4918		le32_to_cpu(stats->rmac_usized_frms)) +
4919		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4920	sp->stats.rx_length_errors += delta;
4921	dev->stats.rx_length_errors += delta;
4922
4923	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4924	sp->stats.rx_crc_errors += delta;
4925	dev->stats.rx_crc_errors += delta;
4926
4927	return &dev->stats;
4928}
4929
4930/**
4931 *  s2io_set_multicast - entry point for multicast address enable/disable.
4932 *  @dev : pointer to the device structure
4933 *  Description:
4934 *  This function is a driver entry point which gets called by the kernel
4935 *  whenever multicast addresses must be enabled/disabled. This also gets
4936 *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4937 *  determine, if multicast address must be enabled or if promiscuous mode
4938 *  is to be disabled etc.
4939 *  Return value:
4940 *  void.
4941 */
4942
4943static void s2io_set_multicast(struct net_device *dev)
4944{
4945	int i, j, prev_cnt;
4946	struct netdev_hw_addr *ha;
4947	struct s2io_nic *sp = netdev_priv(dev);
4948	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4949	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4950		0xfeffffffffffULL;
4951	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4952	void __iomem *add;
4953	struct config_param *config = &sp->config;
4954
4955	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4956		/*  Enable all Multicast addresses */
4957		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4958		       &bar0->rmac_addr_data0_mem);
4959		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4960		       &bar0->rmac_addr_data1_mem);
4961		val64 = RMAC_ADDR_CMD_MEM_WE |
4962			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4963			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4964		writeq(val64, &bar0->rmac_addr_cmd_mem);
4965		/* Wait till command completes */
4966		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4967				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4968				      S2IO_BIT_RESET);
4969
4970		sp->m_cast_flg = 1;
4971		sp->all_multi_pos = config->max_mc_addr - 1;
4972	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4973		/*  Disable all Multicast addresses */
4974		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4975		       &bar0->rmac_addr_data0_mem);
4976		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4977		       &bar0->rmac_addr_data1_mem);
4978		val64 = RMAC_ADDR_CMD_MEM_WE |
4979			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4980			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4981		writeq(val64, &bar0->rmac_addr_cmd_mem);
4982		/* Wait till command completes */
4983		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4984				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4985				      S2IO_BIT_RESET);
4986
4987		sp->m_cast_flg = 0;
4988		sp->all_multi_pos = 0;
4989	}
4990
4991	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4992		/*  Put the NIC into promiscuous mode */
4993		add = &bar0->mac_cfg;
4994		val64 = readq(&bar0->mac_cfg);
4995		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4996
4997		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4998		writel((u32)val64, add);
4999		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5000		writel((u32) (val64 >> 32), (add + 4));
5001
5002		if (vlan_tag_strip != 1) {
5003			val64 = readq(&bar0->rx_pa_cfg);
5004			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5005			writeq(val64, &bar0->rx_pa_cfg);
5006			sp->vlan_strip_flag = 0;
5007		}
5008
5009		val64 = readq(&bar0->mac_cfg);
5010		sp->promisc_flg = 1;
5011		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5012			  dev->name);
5013	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5014		/*  Remove the NIC from promiscuous mode */
5015		add = &bar0->mac_cfg;
5016		val64 = readq(&bar0->mac_cfg);
5017		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5018
5019		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5020		writel((u32)val64, add);
5021		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5022		writel((u32) (val64 >> 32), (add + 4));
5023
5024		if (vlan_tag_strip != 0) {
5025			val64 = readq(&bar0->rx_pa_cfg);
5026			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5027			writeq(val64, &bar0->rx_pa_cfg);
5028			sp->vlan_strip_flag = 1;
5029		}
5030
5031		val64 = readq(&bar0->mac_cfg);
5032		sp->promisc_flg = 0;
5033		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
5034	}
5035
5036	/*  Update individual M_CAST address list */
5037	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
5038		if (netdev_mc_count(dev) >
5039		    (config->max_mc_addr - config->max_mac_addr)) {
5040			DBG_PRINT(ERR_DBG,
5041				  "%s: No more Rx filters can be added - "
5042				  "please enable ALL_MULTI instead\n",
5043				  dev->name);
5044			return;
5045		}
5046
5047		prev_cnt = sp->mc_addr_count;
5048		sp->mc_addr_count = netdev_mc_count(dev);
5049
5050		/* Clear out the previous list of Mc in the H/W. */
5051		for (i = 0; i < prev_cnt; i++) {
5052			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5053			       &bar0->rmac_addr_data0_mem);
5054			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5055			       &bar0->rmac_addr_data1_mem);
5056			val64 = RMAC_ADDR_CMD_MEM_WE |
5057				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5058				RMAC_ADDR_CMD_MEM_OFFSET
5059				(config->mc_start_offset + i);
5060			writeq(val64, &bar0->rmac_addr_cmd_mem);
5061
5062			/* Wait for command completes */
5063			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5064						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5065						  S2IO_BIT_RESET)) {
5066				DBG_PRINT(ERR_DBG,
5067					  "%s: Adding Multicasts failed\n",
5068					  dev->name);
5069				return;
5070			}
5071		}
5072
5073		/* Create the new Rx filter list and update the same in H/W. */
5074		i = 0;
5075		netdev_for_each_mc_addr(ha, dev) {
5076			mac_addr = 0;
5077			for (j = 0; j < ETH_ALEN; j++) {
5078				mac_addr |= ha->addr[j];
5079				mac_addr <<= 8;
5080			}
5081			mac_addr >>= 8;
5082			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5083			       &bar0->rmac_addr_data0_mem);
5084			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5085			       &bar0->rmac_addr_data1_mem);
5086			val64 = RMAC_ADDR_CMD_MEM_WE |
5087				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5088				RMAC_ADDR_CMD_MEM_OFFSET
5089				(i + config->mc_start_offset);
5090			writeq(val64, &bar0->rmac_addr_cmd_mem);
5091
5092			/* Wait for command completes */
5093			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5094						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5095						  S2IO_BIT_RESET)) {
5096				DBG_PRINT(ERR_DBG,
5097					  "%s: Adding Multicasts failed\n",
5098					  dev->name);
5099				return;
5100			}
5101			i++;
5102		}
5103	}
5104}
5105
5106/* read from CAM unicast & multicast addresses and store it in
5107 * def_mac_addr structure
5108 */
5109static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5110{
5111	int offset;
5112	u64 mac_addr = 0x0;
5113	struct config_param *config = &sp->config;
5114
5115	/* store unicast & multicast mac addresses */
5116	for (offset = 0; offset < config->max_mc_addr; offset++) {
5117		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5118		/* if read fails disable the entry */
5119		if (mac_addr == FAILURE)
5120			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5121		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5122	}
5123}
5124
5125/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5126static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5127{
5128	int offset;
5129	struct config_param *config = &sp->config;
5130	/* restore unicast mac address */
5131	for (offset = 0; offset < config->max_mac_addr; offset++)
5132		do_s2io_prog_unicast(sp->dev,
5133				     sp->def_mac_addr[offset].mac_addr);
5134
5135	/* restore multicast mac address */
5136	for (offset = config->mc_start_offset;
5137	     offset < config->max_mc_addr; offset++)
5138		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5139}
5140
5141/* add a multicast MAC address to CAM */
5142static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5143{
5144	int i;
5145	u64 mac_addr = 0;
5146	struct config_param *config = &sp->config;
5147
5148	for (i = 0; i < ETH_ALEN; i++) {
5149		mac_addr <<= 8;
5150		mac_addr |= addr[i];
5151	}
5152	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5153		return SUCCESS;
5154
5155	/* check if the multicast mac already preset in CAM */
5156	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5157		u64 tmp64;
5158		tmp64 = do_s2io_read_unicast_mc(sp, i);
5159		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5160			break;
5161
5162		if (tmp64 == mac_addr)
5163			return SUCCESS;
5164	}
5165	if (i == config->max_mc_addr) {
5166		DBG_PRINT(ERR_DBG,
5167			  "CAM full no space left for multicast MAC\n");
5168		return FAILURE;
5169	}
5170	/* Update the internal structure with this new mac address */
5171	do_s2io_copy_mac_addr(sp, i, mac_addr);
5172
5173	return do_s2io_add_mac(sp, mac_addr, i);
5174}
5175
5176/* add MAC address to CAM */
5177static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5178{
5179	u64 val64;
5180	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5181
5182	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5183	       &bar0->rmac_addr_data0_mem);
5184
5185	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5186		RMAC_ADDR_CMD_MEM_OFFSET(off);
5187	writeq(val64, &bar0->rmac_addr_cmd_mem);
5188
5189	/* Wait till command completes */
5190	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5191				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5192				  S2IO_BIT_RESET)) {
5193		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5194		return FAILURE;
5195	}
5196	return SUCCESS;
5197}
5198/* deletes a specified unicast/multicast mac entry from CAM */
5199static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5200{
5201	int offset;
5202	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5203	struct config_param *config = &sp->config;
5204
5205	for (offset = 1;
5206	     offset < config->max_mc_addr; offset++) {
5207		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5208		if (tmp64 == addr) {
5209			/* disable the entry by writing  0xffffffffffffULL */
5210			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5211				return FAILURE;
5212			/* store the new mac list from CAM */
5213			do_s2io_store_unicast_mc(sp);
5214			return SUCCESS;
5215		}
5216	}
5217	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5218		  (unsigned long long)addr);
5219	return FAILURE;
5220}
5221
5222/* read mac entries from CAM */
5223static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5224{
5225	u64 tmp64 = 0xffffffffffff0000ULL, val64;
5226	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5227
5228	/* read mac addr */
5229	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5230		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5231	writeq(val64, &bar0->rmac_addr_cmd_mem);
5232
5233	/* Wait till command completes */
5234	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5235				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5236				  S2IO_BIT_RESET)) {
5237		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5238		return FAILURE;
5239	}
5240	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5241
5242	return tmp64 >> 16;
5243}
5244
5245/**
5246 * s2io_set_mac_addr - driver entry point
5247 */
5248
5249static int s2io_set_mac_addr(struct net_device *dev, void *p)
5250{
5251	struct sockaddr *addr = p;
5252
5253	if (!is_valid_ether_addr(addr->sa_data))
5254		return -EADDRNOTAVAIL;
5255
5256	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5257
5258	/* store the MAC address in CAM */
5259	return do_s2io_prog_unicast(dev, dev->dev_addr);
5260}
5261/**
5262 *  do_s2io_prog_unicast - Programs the Xframe mac address
5263 *  @dev : pointer to the device structure.
5264 *  @addr: a uchar pointer to the new mac address which is to be set.
5265 *  Description : This procedure will program the Xframe to receive
5266 *  frames with new Mac Address
5267 *  Return value: SUCCESS on success and an appropriate (-)ve integer
5268 *  as defined in errno.h file on failure.
5269 */
5270
5271static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5272{
5273	struct s2io_nic *sp = netdev_priv(dev);
5274	register u64 mac_addr = 0, perm_addr = 0;
5275	int i;
5276	u64 tmp64;
5277	struct config_param *config = &sp->config;
5278
5279	/*
5280	 * Set the new MAC address as the new unicast filter and reflect this
5281	 * change on the device address registered with the OS. It will be
5282	 * at offset 0.
5283	 */
5284	for (i = 0; i < ETH_ALEN; i++) {
5285		mac_addr <<= 8;
5286		mac_addr |= addr[i];
5287		perm_addr <<= 8;
5288		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5289	}
5290
5291	/* check if the dev_addr is different than perm_addr */
5292	if (mac_addr == perm_addr)
5293		return SUCCESS;
5294
5295	/* check if the mac already preset in CAM */
5296	for (i = 1; i < config->max_mac_addr; i++) {
5297		tmp64 = do_s2io_read_unicast_mc(sp, i);
5298		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5299			break;
5300
5301		if (tmp64 == mac_addr) {
5302			DBG_PRINT(INFO_DBG,
5303				  "MAC addr:0x%llx already present in CAM\n",
5304				  (unsigned long long)mac_addr);
5305			return SUCCESS;
5306		}
5307	}
5308	if (i == config->max_mac_addr) {
5309		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5310		return FAILURE;
5311	}
5312	/* Update the internal structure with this new mac address */
5313	do_s2io_copy_mac_addr(sp, i, mac_addr);
5314
5315	return do_s2io_add_mac(sp, mac_addr, i);
5316}
5317
5318/**
5319 * s2io_ethtool_sset - Sets different link parameters.
5320 * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5321 * @info: pointer to the structure with parameters given by ethtool to set
 
5322 * link information.
5323 * Description:
5324 * The function sets different link parameters provided by the user onto
5325 * the NIC.
5326 * Return value:
5327 * 0 on success.
5328 */
5329
5330static int s2io_ethtool_sset(struct net_device *dev,
5331			     struct ethtool_cmd *info)
 
5332{
5333	struct s2io_nic *sp = netdev_priv(dev);
5334	if ((info->autoneg == AUTONEG_ENABLE) ||
5335	    (ethtool_cmd_speed(info) != SPEED_10000) ||
5336	    (info->duplex != DUPLEX_FULL))
5337		return -EINVAL;
5338	else {
5339		s2io_close(sp->dev);
5340		s2io_open(sp->dev);
5341	}
5342
5343	return 0;
5344}
5345
5346/**
5347 * s2io_ethtol_gset - Return link specific information.
5348 * @sp : private member of the device structure, pointer to the
5349 *      s2io_nic structure.
5350 * @info : pointer to the structure with parameters given by ethtool
5351 * to return link information.
5352 * Description:
5353 * Returns link specific information like speed, duplex etc.. to ethtool.
5354 * Return value :
5355 * return 0 on success.
5356 */
5357
5358static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
 
 
5359{
5360	struct s2io_nic *sp = netdev_priv(dev);
5361	info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5362	info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5363	info->port = PORT_FIBRE;
5364
5365	/* info->transceiver */
5366	info->transceiver = XCVR_EXTERNAL;
 
 
 
 
 
 
 
5367
5368	if (netif_carrier_ok(sp->dev)) {
5369		ethtool_cmd_speed_set(info, SPEED_10000);
5370		info->duplex = DUPLEX_FULL;
5371	} else {
5372		ethtool_cmd_speed_set(info, -1);
5373		info->duplex = -1;
5374	}
5375
5376	info->autoneg = AUTONEG_DISABLE;
5377	return 0;
5378}
5379
5380/**
5381 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5382 * @sp : private member of the device structure, which is a pointer to the
5383 * s2io_nic structure.
5384 * @info : pointer to the structure with parameters given by ethtool to
5385 * return driver information.
5386 * Description:
5387 * Returns driver specefic information like name, version etc.. to ethtool.
5388 * Return value:
5389 *  void
5390 */
5391
5392static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5393				  struct ethtool_drvinfo *info)
5394{
5395	struct s2io_nic *sp = netdev_priv(dev);
5396
5397	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5398	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5399	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5400	info->regdump_len = XENA_REG_SPACE;
5401	info->eedump_len = XENA_EEPROM_SPACE;
5402}
5403
5404/**
5405 *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5406 *  @sp: private member of the device structure, which is a pointer to the
5407 *  s2io_nic structure.
5408 *  @regs : pointer to the structure with parameters given by ethtool for
5409 *  dumping the registers.
5410 *  @reg_space: The input argumnet into which all the registers are dumped.
5411 *  Description:
5412 *  Dumps the entire register space of xFrame NIC into the user given
5413 *  buffer area.
5414 * Return value :
5415 * void .
5416 */
5417
5418static void s2io_ethtool_gregs(struct net_device *dev,
5419			       struct ethtool_regs *regs, void *space)
5420{
5421	int i;
5422	u64 reg;
5423	u8 *reg_space = (u8 *)space;
5424	struct s2io_nic *sp = netdev_priv(dev);
5425
5426	regs->len = XENA_REG_SPACE;
5427	regs->version = sp->pdev->subsystem_device;
5428
5429	for (i = 0; i < regs->len; i += 8) {
5430		reg = readq(sp->bar0 + i);
5431		memcpy((reg_space + i), &reg, 8);
5432	}
5433}
5434
5435/*
5436 *  s2io_set_led - control NIC led
5437 */
5438static void s2io_set_led(struct s2io_nic *sp, bool on)
5439{
5440	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5441	u16 subid = sp->pdev->subsystem_device;
5442	u64 val64;
5443
5444	if ((sp->device_type == XFRAME_II_DEVICE) ||
5445	    ((subid & 0xFF) >= 0x07)) {
5446		val64 = readq(&bar0->gpio_control);
5447		if (on)
5448			val64 |= GPIO_CTRL_GPIO_0;
5449		else
5450			val64 &= ~GPIO_CTRL_GPIO_0;
5451
5452		writeq(val64, &bar0->gpio_control);
5453	} else {
5454		val64 = readq(&bar0->adapter_control);
5455		if (on)
5456			val64 |= ADAPTER_LED_ON;
5457		else
5458			val64 &= ~ADAPTER_LED_ON;
5459
5460		writeq(val64, &bar0->adapter_control);
5461	}
5462
5463}
5464
5465/**
5466 * s2io_ethtool_set_led - To physically identify the nic on the system.
5467 * @dev : network device
5468 * @state: led setting
5469 *
5470 * Description: Used to physically identify the NIC on the system.
5471 * The Link LED will blink for a time specified by the user for
5472 * identification.
5473 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5474 * identification is possible only if it's link is up.
5475 */
5476
5477static int s2io_ethtool_set_led(struct net_device *dev,
5478				enum ethtool_phys_id_state state)
5479{
5480	struct s2io_nic *sp = netdev_priv(dev);
5481	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5482	u16 subid = sp->pdev->subsystem_device;
5483
5484	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5485		u64 val64 = readq(&bar0->adapter_control);
5486		if (!(val64 & ADAPTER_CNTL_EN)) {
5487			pr_err("Adapter Link down, cannot blink LED\n");
5488			return -EAGAIN;
5489		}
5490	}
5491
5492	switch (state) {
5493	case ETHTOOL_ID_ACTIVE:
5494		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5495		return 1;	/* cycle on/off once per second */
5496
5497	case ETHTOOL_ID_ON:
5498		s2io_set_led(sp, true);
5499		break;
5500
5501	case ETHTOOL_ID_OFF:
5502		s2io_set_led(sp, false);
5503		break;
5504
5505	case ETHTOOL_ID_INACTIVE:
5506		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5507			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5508	}
5509
5510	return 0;
5511}
5512
5513static void s2io_ethtool_gringparam(struct net_device *dev,
5514				    struct ethtool_ringparam *ering)
5515{
5516	struct s2io_nic *sp = netdev_priv(dev);
5517	int i, tx_desc_count = 0, rx_desc_count = 0;
5518
5519	if (sp->rxd_mode == RXD_MODE_1) {
5520		ering->rx_max_pending = MAX_RX_DESC_1;
5521		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5522	} else {
5523		ering->rx_max_pending = MAX_RX_DESC_2;
5524		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5525	}
5526
5527	ering->tx_max_pending = MAX_TX_DESC;
5528
5529	for (i = 0; i < sp->config.rx_ring_num; i++)
5530		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5531	ering->rx_pending = rx_desc_count;
5532	ering->rx_jumbo_pending = rx_desc_count;
5533
5534	for (i = 0; i < sp->config.tx_fifo_num; i++)
5535		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5536	ering->tx_pending = tx_desc_count;
5537	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5538}
5539
5540/**
5541 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5542 * @sp : private member of the device structure, which is a pointer to the
5543 *	s2io_nic structure.
5544 * @ep : pointer to the structure with pause parameters given by ethtool.
5545 * Description:
5546 * Returns the Pause frame generation and reception capability of the NIC.
5547 * Return value:
5548 *  void
5549 */
5550static void s2io_ethtool_getpause_data(struct net_device *dev,
5551				       struct ethtool_pauseparam *ep)
5552{
5553	u64 val64;
5554	struct s2io_nic *sp = netdev_priv(dev);
5555	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5556
5557	val64 = readq(&bar0->rmac_pause_cfg);
5558	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5559		ep->tx_pause = true;
5560	if (val64 & RMAC_PAUSE_RX_ENABLE)
5561		ep->rx_pause = true;
5562	ep->autoneg = false;
5563}
5564
5565/**
5566 * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5567 * @sp : private member of the device structure, which is a pointer to the
5568 *      s2io_nic structure.
5569 * @ep : pointer to the structure with pause parameters given by ethtool.
5570 * Description:
5571 * It can be used to set or reset Pause frame generation or reception
5572 * support of the NIC.
5573 * Return value:
5574 * int, returns 0 on Success
5575 */
5576
5577static int s2io_ethtool_setpause_data(struct net_device *dev,
5578				      struct ethtool_pauseparam *ep)
5579{
5580	u64 val64;
5581	struct s2io_nic *sp = netdev_priv(dev);
5582	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5583
5584	val64 = readq(&bar0->rmac_pause_cfg);
5585	if (ep->tx_pause)
5586		val64 |= RMAC_PAUSE_GEN_ENABLE;
5587	else
5588		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5589	if (ep->rx_pause)
5590		val64 |= RMAC_PAUSE_RX_ENABLE;
5591	else
5592		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5593	writeq(val64, &bar0->rmac_pause_cfg);
5594	return 0;
5595}
5596
5597/**
5598 * read_eeprom - reads 4 bytes of data from user given offset.
5599 * @sp : private member of the device structure, which is a pointer to the
5600 *      s2io_nic structure.
5601 * @off : offset at which the data must be written
5602 * @data : Its an output parameter where the data read at the given
5603 *	offset is stored.
5604 * Description:
5605 * Will read 4 bytes of data from the user given offset and return the
5606 * read data.
5607 * NOTE: Will allow to read only part of the EEPROM visible through the
5608 *   I2C bus.
5609 * Return value:
5610 *  -1 on failure and 0 on success.
5611 */
5612
5613#define S2IO_DEV_ID		5
5614static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5615{
5616	int ret = -1;
5617	u32 exit_cnt = 0;
5618	u64 val64;
5619	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5620
5621	if (sp->device_type == XFRAME_I_DEVICE) {
5622		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5623			I2C_CONTROL_ADDR(off) |
5624			I2C_CONTROL_BYTE_CNT(0x3) |
5625			I2C_CONTROL_READ |
5626			I2C_CONTROL_CNTL_START;
5627		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5628
5629		while (exit_cnt < 5) {
5630			val64 = readq(&bar0->i2c_control);
5631			if (I2C_CONTROL_CNTL_END(val64)) {
5632				*data = I2C_CONTROL_GET_DATA(val64);
5633				ret = 0;
5634				break;
5635			}
5636			msleep(50);
5637			exit_cnt++;
5638		}
5639	}
5640
5641	if (sp->device_type == XFRAME_II_DEVICE) {
5642		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5643			SPI_CONTROL_BYTECNT(0x3) |
5644			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5645		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5646		val64 |= SPI_CONTROL_REQ;
5647		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5648		while (exit_cnt < 5) {
5649			val64 = readq(&bar0->spi_control);
5650			if (val64 & SPI_CONTROL_NACK) {
5651				ret = 1;
5652				break;
5653			} else if (val64 & SPI_CONTROL_DONE) {
5654				*data = readq(&bar0->spi_data);
5655				*data &= 0xffffff;
5656				ret = 0;
5657				break;
5658			}
5659			msleep(50);
5660			exit_cnt++;
5661		}
5662	}
5663	return ret;
5664}
5665
5666/**
5667 *  write_eeprom - actually writes the relevant part of the data value.
5668 *  @sp : private member of the device structure, which is a pointer to the
5669 *       s2io_nic structure.
5670 *  @off : offset at which the data must be written
5671 *  @data : The data that is to be written
5672 *  @cnt : Number of bytes of the data that are actually to be written into
5673 *  the Eeprom. (max of 3)
5674 * Description:
5675 *  Actually writes the relevant part of the data value into the Eeprom
5676 *  through the I2C bus.
5677 * Return value:
5678 *  0 on success, -1 on failure.
5679 */
5680
5681static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5682{
5683	int exit_cnt = 0, ret = -1;
5684	u64 val64;
5685	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5686
5687	if (sp->device_type == XFRAME_I_DEVICE) {
5688		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5689			I2C_CONTROL_ADDR(off) |
5690			I2C_CONTROL_BYTE_CNT(cnt) |
5691			I2C_CONTROL_SET_DATA((u32)data) |
5692			I2C_CONTROL_CNTL_START;
5693		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5694
5695		while (exit_cnt < 5) {
5696			val64 = readq(&bar0->i2c_control);
5697			if (I2C_CONTROL_CNTL_END(val64)) {
5698				if (!(val64 & I2C_CONTROL_NACK))
5699					ret = 0;
5700				break;
5701			}
5702			msleep(50);
5703			exit_cnt++;
5704		}
5705	}
5706
5707	if (sp->device_type == XFRAME_II_DEVICE) {
5708		int write_cnt = (cnt == 8) ? 0 : cnt;
5709		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5710
5711		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5712			SPI_CONTROL_BYTECNT(write_cnt) |
5713			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5714		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5715		val64 |= SPI_CONTROL_REQ;
5716		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5717		while (exit_cnt < 5) {
5718			val64 = readq(&bar0->spi_control);
5719			if (val64 & SPI_CONTROL_NACK) {
5720				ret = 1;
5721				break;
5722			} else if (val64 & SPI_CONTROL_DONE) {
5723				ret = 0;
5724				break;
5725			}
5726			msleep(50);
5727			exit_cnt++;
5728		}
5729	}
5730	return ret;
5731}
5732static void s2io_vpd_read(struct s2io_nic *nic)
5733{
5734	u8 *vpd_data;
5735	u8 data;
5736	int i = 0, cnt, len, fail = 0;
5737	int vpd_addr = 0x80;
5738	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5739
5740	if (nic->device_type == XFRAME_II_DEVICE) {
5741		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5742		vpd_addr = 0x80;
5743	} else {
5744		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5745		vpd_addr = 0x50;
5746	}
5747	strcpy(nic->serial_num, "NOT AVAILABLE");
5748
5749	vpd_data = kmalloc(256, GFP_KERNEL);
5750	if (!vpd_data) {
5751		swstats->mem_alloc_fail_cnt++;
5752		return;
5753	}
5754	swstats->mem_allocated += 256;
5755
5756	for (i = 0; i < 256; i += 4) {
5757		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5758		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5759		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5760		for (cnt = 0; cnt < 5; cnt++) {
5761			msleep(2);
5762			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5763			if (data == 0x80)
5764				break;
5765		}
5766		if (cnt >= 5) {
5767			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5768			fail = 1;
5769			break;
5770		}
5771		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5772				      (u32 *)&vpd_data[i]);
5773	}
5774
5775	if (!fail) {
5776		/* read serial number of adapter */
5777		for (cnt = 0; cnt < 252; cnt++) {
5778			if ((vpd_data[cnt] == 'S') &&
5779			    (vpd_data[cnt+1] == 'N')) {
5780				len = vpd_data[cnt+2];
5781				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5782					memcpy(nic->serial_num,
5783					       &vpd_data[cnt + 3],
5784					       len);
5785					memset(nic->serial_num+len,
5786					       0,
5787					       VPD_STRING_LEN-len);
5788					break;
5789				}
5790			}
5791		}
5792	}
5793
5794	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5795		len = vpd_data[1];
5796		memcpy(nic->product_name, &vpd_data[3], len);
5797		nic->product_name[len] = 0;
5798	}
5799	kfree(vpd_data);
5800	swstats->mem_freed += 256;
5801}
5802
5803/**
5804 *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5805 *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
 
5806 *  @eeprom : pointer to the user level structure provided by ethtool,
5807 *  containing all relevant information.
5808 *  @data_buf : user defined value to be written into Eeprom.
5809 *  Description: Reads the values stored in the Eeprom at given offset
5810 *  for a given length. Stores these values int the input argument data
5811 *  buffer 'data_buf' and returns these to the caller (ethtool.)
5812 *  Return value:
5813 *  int  0 on success
5814 */
5815
5816static int s2io_ethtool_geeprom(struct net_device *dev,
5817				struct ethtool_eeprom *eeprom, u8 * data_buf)
5818{
5819	u32 i, valid;
5820	u64 data;
5821	struct s2io_nic *sp = netdev_priv(dev);
5822
5823	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5824
5825	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5826		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5827
5828	for (i = 0; i < eeprom->len; i += 4) {
5829		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5830			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5831			return -EFAULT;
5832		}
5833		valid = INV(data);
5834		memcpy((data_buf + i), &valid, 4);
5835	}
5836	return 0;
5837}
5838
5839/**
5840 *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5841 *  @sp : private member of the device structure, which is a pointer to the
5842 *  s2io_nic structure.
5843 *  @eeprom : pointer to the user level structure provided by ethtool,
5844 *  containing all relevant information.
5845 *  @data_buf ; user defined value to be written into Eeprom.
5846 *  Description:
5847 *  Tries to write the user provided value in the Eeprom, at the offset
5848 *  given by the user.
5849 *  Return value:
5850 *  0 on success, -EFAULT on failure.
5851 */
5852
5853static int s2io_ethtool_seeprom(struct net_device *dev,
5854				struct ethtool_eeprom *eeprom,
5855				u8 *data_buf)
5856{
5857	int len = eeprom->len, cnt = 0;
5858	u64 valid = 0, data;
5859	struct s2io_nic *sp = netdev_priv(dev);
5860
5861	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5862		DBG_PRINT(ERR_DBG,
5863			  "ETHTOOL_WRITE_EEPROM Err: "
5864			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5865			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5866			  eeprom->magic);
5867		return -EFAULT;
5868	}
5869
5870	while (len) {
5871		data = (u32)data_buf[cnt] & 0x000000FF;
5872		if (data)
5873			valid = (u32)(data << 24);
5874		else
5875			valid = data;
5876
5877		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5878			DBG_PRINT(ERR_DBG,
5879				  "ETHTOOL_WRITE_EEPROM Err: "
5880				  "Cannot write into the specified offset\n");
5881			return -EFAULT;
5882		}
5883		cnt++;
5884		len--;
5885	}
5886
5887	return 0;
5888}
5889
5890/**
5891 * s2io_register_test - reads and writes into all clock domains.
5892 * @sp : private member of the device structure, which is a pointer to the
5893 * s2io_nic structure.
5894 * @data : variable that returns the result of each of the test conducted b
5895 * by the driver.
5896 * Description:
5897 * Read and write into all clock domains. The NIC has 3 clock domains,
5898 * see that registers in all the three regions are accessible.
5899 * Return value:
5900 * 0 on success.
5901 */
5902
5903static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5904{
5905	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5906	u64 val64 = 0, exp_val;
5907	int fail = 0;
5908
5909	val64 = readq(&bar0->pif_rd_swapper_fb);
5910	if (val64 != 0x123456789abcdefULL) {
5911		fail = 1;
5912		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5913	}
5914
5915	val64 = readq(&bar0->rmac_pause_cfg);
5916	if (val64 != 0xc000ffff00000000ULL) {
5917		fail = 1;
5918		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5919	}
5920
5921	val64 = readq(&bar0->rx_queue_cfg);
5922	if (sp->device_type == XFRAME_II_DEVICE)
5923		exp_val = 0x0404040404040404ULL;
5924	else
5925		exp_val = 0x0808080808080808ULL;
5926	if (val64 != exp_val) {
5927		fail = 1;
5928		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5929	}
5930
5931	val64 = readq(&bar0->xgxs_efifo_cfg);
5932	if (val64 != 0x000000001923141EULL) {
5933		fail = 1;
5934		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5935	}
5936
5937	val64 = 0x5A5A5A5A5A5A5A5AULL;
5938	writeq(val64, &bar0->xmsi_data);
5939	val64 = readq(&bar0->xmsi_data);
5940	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5941		fail = 1;
5942		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5943	}
5944
5945	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5946	writeq(val64, &bar0->xmsi_data);
5947	val64 = readq(&bar0->xmsi_data);
5948	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5949		fail = 1;
5950		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5951	}
5952
5953	*data = fail;
5954	return fail;
5955}
5956
5957/**
5958 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5959 * @sp : private member of the device structure, which is a pointer to the
5960 * s2io_nic structure.
5961 * @data:variable that returns the result of each of the test conducted by
5962 * the driver.
5963 * Description:
5964 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5965 * register.
5966 * Return value:
5967 * 0 on success.
5968 */
5969
5970static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5971{
5972	int fail = 0;
5973	u64 ret_data, org_4F0, org_7F0;
5974	u8 saved_4F0 = 0, saved_7F0 = 0;
5975	struct net_device *dev = sp->dev;
5976
5977	/* Test Write Error at offset 0 */
5978	/* Note that SPI interface allows write access to all areas
5979	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5980	 */
5981	if (sp->device_type == XFRAME_I_DEVICE)
5982		if (!write_eeprom(sp, 0, 0, 3))
5983			fail = 1;
5984
5985	/* Save current values at offsets 0x4F0 and 0x7F0 */
5986	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5987		saved_4F0 = 1;
5988	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5989		saved_7F0 = 1;
5990
5991	/* Test Write at offset 4f0 */
5992	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5993		fail = 1;
5994	if (read_eeprom(sp, 0x4F0, &ret_data))
5995		fail = 1;
5996
5997	if (ret_data != 0x012345) {
5998		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5999			  "Data written %llx Data read %llx\n",
6000			  dev->name, (unsigned long long)0x12345,
6001			  (unsigned long long)ret_data);
6002		fail = 1;
6003	}
6004
6005	/* Reset the EEPROM data go FFFF */
6006	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6007
6008	/* Test Write Request Error at offset 0x7c */
6009	if (sp->device_type == XFRAME_I_DEVICE)
6010		if (!write_eeprom(sp, 0x07C, 0, 3))
6011			fail = 1;
6012
6013	/* Test Write Request at offset 0x7f0 */
6014	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6015		fail = 1;
6016	if (read_eeprom(sp, 0x7F0, &ret_data))
6017		fail = 1;
6018
6019	if (ret_data != 0x012345) {
6020		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6021			  "Data written %llx Data read %llx\n",
6022			  dev->name, (unsigned long long)0x12345,
6023			  (unsigned long long)ret_data);
6024		fail = 1;
6025	}
6026
6027	/* Reset the EEPROM data go FFFF */
6028	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6029
6030	if (sp->device_type == XFRAME_I_DEVICE) {
6031		/* Test Write Error at offset 0x80 */
6032		if (!write_eeprom(sp, 0x080, 0, 3))
6033			fail = 1;
6034
6035		/* Test Write Error at offset 0xfc */
6036		if (!write_eeprom(sp, 0x0FC, 0, 3))
6037			fail = 1;
6038
6039		/* Test Write Error at offset 0x100 */
6040		if (!write_eeprom(sp, 0x100, 0, 3))
6041			fail = 1;
6042
6043		/* Test Write Error at offset 4ec */
6044		if (!write_eeprom(sp, 0x4EC, 0, 3))
6045			fail = 1;
6046	}
6047
6048	/* Restore values at offsets 0x4F0 and 0x7F0 */
6049	if (saved_4F0)
6050		write_eeprom(sp, 0x4F0, org_4F0, 3);
6051	if (saved_7F0)
6052		write_eeprom(sp, 0x7F0, org_7F0, 3);
6053
6054	*data = fail;
6055	return fail;
6056}
6057
6058/**
6059 * s2io_bist_test - invokes the MemBist test of the card .
6060 * @sp : private member of the device structure, which is a pointer to the
6061 * s2io_nic structure.
6062 * @data:variable that returns the result of each of the test conducted by
6063 * the driver.
6064 * Description:
6065 * This invokes the MemBist test of the card. We give around
6066 * 2 secs time for the Test to complete. If it's still not complete
6067 * within this peiod, we consider that the test failed.
6068 * Return value:
6069 * 0 on success and -1 on failure.
6070 */
6071
6072static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6073{
6074	u8 bist = 0;
6075	int cnt = 0, ret = -1;
6076
6077	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6078	bist |= PCI_BIST_START;
6079	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6080
6081	while (cnt < 20) {
6082		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6083		if (!(bist & PCI_BIST_START)) {
6084			*data = (bist & PCI_BIST_CODE_MASK);
6085			ret = 0;
6086			break;
6087		}
6088		msleep(100);
6089		cnt++;
6090	}
6091
6092	return ret;
6093}
6094
6095/**
6096 * s2io_link_test - verifies the link state of the nic
6097 * @sp ; private member of the device structure, which is a pointer to the
6098 * s2io_nic structure.
6099 * @data: variable that returns the result of each of the test conducted by
6100 * the driver.
6101 * Description:
6102 * The function verifies the link state of the NIC and updates the input
6103 * argument 'data' appropriately.
6104 * Return value:
6105 * 0 on success.
6106 */
6107
6108static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6109{
6110	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6111	u64 val64;
6112
6113	val64 = readq(&bar0->adapter_status);
6114	if (!(LINK_IS_UP(val64)))
6115		*data = 1;
6116	else
6117		*data = 0;
6118
6119	return *data;
6120}
6121
6122/**
6123 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6124 * @sp: private member of the device structure, which is a pointer to the
6125 * s2io_nic structure.
6126 * @data: variable that returns the result of each of the test
6127 * conducted by the driver.
6128 * Description:
6129 *  This is one of the offline test that tests the read and write
6130 *  access to the RldRam chip on the NIC.
6131 * Return value:
6132 *  0 on success.
6133 */
6134
6135static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6136{
6137	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6138	u64 val64;
6139	int cnt, iteration = 0, test_fail = 0;
6140
6141	val64 = readq(&bar0->adapter_control);
6142	val64 &= ~ADAPTER_ECC_EN;
6143	writeq(val64, &bar0->adapter_control);
6144
6145	val64 = readq(&bar0->mc_rldram_test_ctrl);
6146	val64 |= MC_RLDRAM_TEST_MODE;
6147	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6148
6149	val64 = readq(&bar0->mc_rldram_mrs);
6150	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6151	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6152
6153	val64 |= MC_RLDRAM_MRS_ENABLE;
6154	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6155
6156	while (iteration < 2) {
6157		val64 = 0x55555555aaaa0000ULL;
6158		if (iteration == 1)
6159			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6160		writeq(val64, &bar0->mc_rldram_test_d0);
6161
6162		val64 = 0xaaaa5a5555550000ULL;
6163		if (iteration == 1)
6164			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6165		writeq(val64, &bar0->mc_rldram_test_d1);
6166
6167		val64 = 0x55aaaaaaaa5a0000ULL;
6168		if (iteration == 1)
6169			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6170		writeq(val64, &bar0->mc_rldram_test_d2);
6171
6172		val64 = (u64) (0x0000003ffffe0100ULL);
6173		writeq(val64, &bar0->mc_rldram_test_add);
6174
6175		val64 = MC_RLDRAM_TEST_MODE |
6176			MC_RLDRAM_TEST_WRITE |
6177			MC_RLDRAM_TEST_GO;
6178		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6179
6180		for (cnt = 0; cnt < 5; cnt++) {
6181			val64 = readq(&bar0->mc_rldram_test_ctrl);
6182			if (val64 & MC_RLDRAM_TEST_DONE)
6183				break;
6184			msleep(200);
6185		}
6186
6187		if (cnt == 5)
6188			break;
6189
6190		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6191		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6192
6193		for (cnt = 0; cnt < 5; cnt++) {
6194			val64 = readq(&bar0->mc_rldram_test_ctrl);
6195			if (val64 & MC_RLDRAM_TEST_DONE)
6196				break;
6197			msleep(500);
6198		}
6199
6200		if (cnt == 5)
6201			break;
6202
6203		val64 = readq(&bar0->mc_rldram_test_ctrl);
6204		if (!(val64 & MC_RLDRAM_TEST_PASS))
6205			test_fail = 1;
6206
6207		iteration++;
6208	}
6209
6210	*data = test_fail;
6211
6212	/* Bring the adapter out of test mode */
6213	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6214
6215	return test_fail;
6216}
6217
6218/**
6219 *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6220 *  @sp : private member of the device structure, which is a pointer to the
6221 *  s2io_nic structure.
6222 *  @ethtest : pointer to a ethtool command specific structure that will be
6223 *  returned to the user.
6224 *  @data : variable that returns the result of each of the test
6225 * conducted by the driver.
6226 * Description:
6227 *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6228 *  the health of the card.
6229 * Return value:
6230 *  void
6231 */
6232
6233static void s2io_ethtool_test(struct net_device *dev,
6234			      struct ethtool_test *ethtest,
6235			      uint64_t *data)
6236{
6237	struct s2io_nic *sp = netdev_priv(dev);
6238	int orig_state = netif_running(sp->dev);
6239
6240	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6241		/* Offline Tests. */
6242		if (orig_state)
6243			s2io_close(sp->dev);
6244
6245		if (s2io_register_test(sp, &data[0]))
6246			ethtest->flags |= ETH_TEST_FL_FAILED;
6247
6248		s2io_reset(sp);
6249
6250		if (s2io_rldram_test(sp, &data[3]))
6251			ethtest->flags |= ETH_TEST_FL_FAILED;
6252
6253		s2io_reset(sp);
6254
6255		if (s2io_eeprom_test(sp, &data[1]))
6256			ethtest->flags |= ETH_TEST_FL_FAILED;
6257
6258		if (s2io_bist_test(sp, &data[4]))
6259			ethtest->flags |= ETH_TEST_FL_FAILED;
6260
6261		if (orig_state)
6262			s2io_open(sp->dev);
6263
6264		data[2] = 0;
6265	} else {
6266		/* Online Tests. */
6267		if (!orig_state) {
6268			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6269				  dev->name);
6270			data[0] = -1;
6271			data[1] = -1;
6272			data[2] = -1;
6273			data[3] = -1;
6274			data[4] = -1;
6275		}
6276
6277		if (s2io_link_test(sp, &data[2]))
6278			ethtest->flags |= ETH_TEST_FL_FAILED;
6279
6280		data[0] = 0;
6281		data[1] = 0;
6282		data[3] = 0;
6283		data[4] = 0;
6284	}
6285}
6286
6287static void s2io_get_ethtool_stats(struct net_device *dev,
6288				   struct ethtool_stats *estats,
6289				   u64 *tmp_stats)
6290{
6291	int i = 0, k;
6292	struct s2io_nic *sp = netdev_priv(dev);
6293	struct stat_block *stats = sp->mac_control.stats_info;
6294	struct swStat *swstats = &stats->sw_stat;
6295	struct xpakStat *xstats = &stats->xpak_stat;
6296
6297	s2io_updt_stats(sp);
6298	tmp_stats[i++] =
6299		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6300		le32_to_cpu(stats->tmac_frms);
6301	tmp_stats[i++] =
6302		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6303		le32_to_cpu(stats->tmac_data_octets);
6304	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6305	tmp_stats[i++] =
6306		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6307		le32_to_cpu(stats->tmac_mcst_frms);
6308	tmp_stats[i++] =
6309		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6310		le32_to_cpu(stats->tmac_bcst_frms);
6311	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6312	tmp_stats[i++] =
6313		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6314		le32_to_cpu(stats->tmac_ttl_octets);
6315	tmp_stats[i++] =
6316		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6317		le32_to_cpu(stats->tmac_ucst_frms);
6318	tmp_stats[i++] =
6319		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6320		le32_to_cpu(stats->tmac_nucst_frms);
6321	tmp_stats[i++] =
6322		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6323		le32_to_cpu(stats->tmac_any_err_frms);
6324	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6325	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6326	tmp_stats[i++] =
6327		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6328		le32_to_cpu(stats->tmac_vld_ip);
6329	tmp_stats[i++] =
6330		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6331		le32_to_cpu(stats->tmac_drop_ip);
6332	tmp_stats[i++] =
6333		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6334		le32_to_cpu(stats->tmac_icmp);
6335	tmp_stats[i++] =
6336		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6337		le32_to_cpu(stats->tmac_rst_tcp);
6338	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6339	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6340		le32_to_cpu(stats->tmac_udp);
6341	tmp_stats[i++] =
6342		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6343		le32_to_cpu(stats->rmac_vld_frms);
6344	tmp_stats[i++] =
6345		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6346		le32_to_cpu(stats->rmac_data_octets);
6347	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6348	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6349	tmp_stats[i++] =
6350		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6351		le32_to_cpu(stats->rmac_vld_mcst_frms);
6352	tmp_stats[i++] =
6353		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6354		le32_to_cpu(stats->rmac_vld_bcst_frms);
6355	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6356	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6357	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6358	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6359	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6360	tmp_stats[i++] =
6361		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6362		le32_to_cpu(stats->rmac_ttl_octets);
6363	tmp_stats[i++] =
6364		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6365		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6366	tmp_stats[i++] =
6367		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6368		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6369	tmp_stats[i++] =
6370		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6371		le32_to_cpu(stats->rmac_discarded_frms);
6372	tmp_stats[i++] =
6373		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6374		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6375	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6376	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6377	tmp_stats[i++] =
6378		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6379		le32_to_cpu(stats->rmac_usized_frms);
6380	tmp_stats[i++] =
6381		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6382		le32_to_cpu(stats->rmac_osized_frms);
6383	tmp_stats[i++] =
6384		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6385		le32_to_cpu(stats->rmac_frag_frms);
6386	tmp_stats[i++] =
6387		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6388		le32_to_cpu(stats->rmac_jabber_frms);
6389	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6390	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6391	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6392	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6393	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6394	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6395	tmp_stats[i++] =
6396		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6397		le32_to_cpu(stats->rmac_ip);
6398	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6399	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6400	tmp_stats[i++] =
6401		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6402		le32_to_cpu(stats->rmac_drop_ip);
6403	tmp_stats[i++] =
6404		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6405		le32_to_cpu(stats->rmac_icmp);
6406	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6407	tmp_stats[i++] =
6408		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6409		le32_to_cpu(stats->rmac_udp);
6410	tmp_stats[i++] =
6411		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6412		le32_to_cpu(stats->rmac_err_drp_udp);
6413	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6414	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6415	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6416	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6417	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6418	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6419	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6420	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6421	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6422	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6423	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6424	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6425	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6426	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6427	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6428	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6429	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6430	tmp_stats[i++] =
6431		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6432		le32_to_cpu(stats->rmac_pause_cnt);
6433	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6434	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6435	tmp_stats[i++] =
6436		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6437		le32_to_cpu(stats->rmac_accepted_ip);
6438	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6439	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6440	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6441	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6442	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6443	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6444	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6445	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6446	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6447	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6448	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6449	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6450	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6451	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6452	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6453	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6454	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6455	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6456	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6457
6458	/* Enhanced statistics exist only for Hercules */
6459	if (sp->device_type == XFRAME_II_DEVICE) {
6460		tmp_stats[i++] =
6461			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6462		tmp_stats[i++] =
6463			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6464		tmp_stats[i++] =
6465			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6466		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6467		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6468		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6469		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6470		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6471		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6472		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6473		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6474		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6475		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6476		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6477		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6478		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6479	}
6480
6481	tmp_stats[i++] = 0;
6482	tmp_stats[i++] = swstats->single_ecc_errs;
6483	tmp_stats[i++] = swstats->double_ecc_errs;
6484	tmp_stats[i++] = swstats->parity_err_cnt;
6485	tmp_stats[i++] = swstats->serious_err_cnt;
6486	tmp_stats[i++] = swstats->soft_reset_cnt;
6487	tmp_stats[i++] = swstats->fifo_full_cnt;
6488	for (k = 0; k < MAX_RX_RINGS; k++)
6489		tmp_stats[i++] = swstats->ring_full_cnt[k];
6490	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6491	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6492	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6493	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6494	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6495	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6496	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6497	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6498	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6499	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6500	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6501	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6502	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6503	tmp_stats[i++] = swstats->sending_both;
6504	tmp_stats[i++] = swstats->outof_sequence_pkts;
6505	tmp_stats[i++] = swstats->flush_max_pkts;
6506	if (swstats->num_aggregations) {
6507		u64 tmp = swstats->sum_avg_pkts_aggregated;
6508		int count = 0;
6509		/*
6510		 * Since 64-bit divide does not work on all platforms,
6511		 * do repeated subtraction.
6512		 */
6513		while (tmp >= swstats->num_aggregations) {
6514			tmp -= swstats->num_aggregations;
6515			count++;
6516		}
6517		tmp_stats[i++] = count;
6518	} else
6519		tmp_stats[i++] = 0;
6520	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6521	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6522	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6523	tmp_stats[i++] = swstats->mem_allocated;
6524	tmp_stats[i++] = swstats->mem_freed;
6525	tmp_stats[i++] = swstats->link_up_cnt;
6526	tmp_stats[i++] = swstats->link_down_cnt;
6527	tmp_stats[i++] = swstats->link_up_time;
6528	tmp_stats[i++] = swstats->link_down_time;
6529
6530	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6531	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6532	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6533	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6534	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6535
6536	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6537	tmp_stats[i++] = swstats->rx_abort_cnt;
6538	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6539	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6540	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6541	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6542	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6543	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6544	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6545	tmp_stats[i++] = swstats->tda_err_cnt;
6546	tmp_stats[i++] = swstats->pfc_err_cnt;
6547	tmp_stats[i++] = swstats->pcc_err_cnt;
6548	tmp_stats[i++] = swstats->tti_err_cnt;
6549	tmp_stats[i++] = swstats->tpa_err_cnt;
6550	tmp_stats[i++] = swstats->sm_err_cnt;
6551	tmp_stats[i++] = swstats->lso_err_cnt;
6552	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6553	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6554	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6555	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6556	tmp_stats[i++] = swstats->rc_err_cnt;
6557	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6558	tmp_stats[i++] = swstats->rpa_err_cnt;
6559	tmp_stats[i++] = swstats->rda_err_cnt;
6560	tmp_stats[i++] = swstats->rti_err_cnt;
6561	tmp_stats[i++] = swstats->mc_err_cnt;
6562}
6563
6564static int s2io_ethtool_get_regs_len(struct net_device *dev)
6565{
6566	return XENA_REG_SPACE;
6567}
6568
6569
6570static int s2io_get_eeprom_len(struct net_device *dev)
6571{
6572	return XENA_EEPROM_SPACE;
6573}
6574
6575static int s2io_get_sset_count(struct net_device *dev, int sset)
6576{
6577	struct s2io_nic *sp = netdev_priv(dev);
6578
6579	switch (sset) {
6580	case ETH_SS_TEST:
6581		return S2IO_TEST_LEN;
6582	case ETH_SS_STATS:
6583		switch (sp->device_type) {
6584		case XFRAME_I_DEVICE:
6585			return XFRAME_I_STAT_LEN;
6586		case XFRAME_II_DEVICE:
6587			return XFRAME_II_STAT_LEN;
6588		default:
6589			return 0;
6590		}
6591	default:
6592		return -EOPNOTSUPP;
6593	}
6594}
6595
6596static void s2io_ethtool_get_strings(struct net_device *dev,
6597				     u32 stringset, u8 *data)
6598{
6599	int stat_size = 0;
6600	struct s2io_nic *sp = netdev_priv(dev);
6601
6602	switch (stringset) {
6603	case ETH_SS_TEST:
6604		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6605		break;
6606	case ETH_SS_STATS:
6607		stat_size = sizeof(ethtool_xena_stats_keys);
6608		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6609		if (sp->device_type == XFRAME_II_DEVICE) {
6610			memcpy(data + stat_size,
6611			       &ethtool_enhanced_stats_keys,
6612			       sizeof(ethtool_enhanced_stats_keys));
6613			stat_size += sizeof(ethtool_enhanced_stats_keys);
6614		}
6615
6616		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6617		       sizeof(ethtool_driver_stats_keys));
6618	}
6619}
6620
6621static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6622{
6623	struct s2io_nic *sp = netdev_priv(dev);
6624	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6625
6626	if (changed && netif_running(dev)) {
6627		int rc;
6628
6629		s2io_stop_all_tx_queue(sp);
6630		s2io_card_down(sp);
6631		dev->features = features;
6632		rc = s2io_card_up(sp);
6633		if (rc)
6634			s2io_reset(sp);
6635		else
6636			s2io_start_all_tx_queue(sp);
6637
6638		return rc ? rc : 1;
6639	}
6640
6641	return 0;
6642}
6643
6644static const struct ethtool_ops netdev_ethtool_ops = {
6645	.get_settings = s2io_ethtool_gset,
6646	.set_settings = s2io_ethtool_sset,
6647	.get_drvinfo = s2io_ethtool_gdrvinfo,
6648	.get_regs_len = s2io_ethtool_get_regs_len,
6649	.get_regs = s2io_ethtool_gregs,
6650	.get_link = ethtool_op_get_link,
6651	.get_eeprom_len = s2io_get_eeprom_len,
6652	.get_eeprom = s2io_ethtool_geeprom,
6653	.set_eeprom = s2io_ethtool_seeprom,
6654	.get_ringparam = s2io_ethtool_gringparam,
6655	.get_pauseparam = s2io_ethtool_getpause_data,
6656	.set_pauseparam = s2io_ethtool_setpause_data,
6657	.self_test = s2io_ethtool_test,
6658	.get_strings = s2io_ethtool_get_strings,
6659	.set_phys_id = s2io_ethtool_set_led,
6660	.get_ethtool_stats = s2io_get_ethtool_stats,
6661	.get_sset_count = s2io_get_sset_count,
 
 
6662};
6663
6664/**
6665 *  s2io_ioctl - Entry point for the Ioctl
6666 *  @dev :  Device pointer.
6667 *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6668 *  a proprietary structure used to pass information to the driver.
6669 *  @cmd :  This is used to distinguish between the different commands that
6670 *  can be passed to the IOCTL functions.
6671 *  Description:
6672 *  Currently there are no special functionality supported in IOCTL, hence
6673 *  function always return EOPNOTSUPPORTED
6674 */
6675
6676static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6677{
6678	return -EOPNOTSUPP;
6679}
6680
6681/**
6682 *  s2io_change_mtu - entry point to change MTU size for the device.
6683 *   @dev : device pointer.
6684 *   @new_mtu : the new MTU size for the device.
6685 *   Description: A driver entry point to change MTU size for the device.
6686 *   Before changing the MTU the device must be stopped.
6687 *  Return value:
6688 *   0 on success and an appropriate (-)ve integer as defined in errno.h
6689 *   file on failure.
6690 */
6691
6692static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6693{
6694	struct s2io_nic *sp = netdev_priv(dev);
6695	int ret = 0;
6696
6697	if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6698		DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n", dev->name);
6699		return -EPERM;
6700	}
6701
6702	dev->mtu = new_mtu;
6703	if (netif_running(dev)) {
6704		s2io_stop_all_tx_queue(sp);
6705		s2io_card_down(sp);
6706		ret = s2io_card_up(sp);
6707		if (ret) {
6708			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6709				  __func__);
6710			return ret;
6711		}
6712		s2io_wake_all_tx_queue(sp);
6713	} else { /* Device is down */
6714		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6715		u64 val64 = new_mtu;
6716
6717		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6718	}
6719
6720	return ret;
6721}
6722
6723/**
6724 * s2io_set_link - Set the LInk status
6725 * @data: long pointer to device private structue
6726 * Description: Sets the link status for the adapter
6727 */
6728
6729static void s2io_set_link(struct work_struct *work)
6730{
6731	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6732					    set_link_task);
6733	struct net_device *dev = nic->dev;
6734	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6735	register u64 val64;
6736	u16 subid;
6737
6738	rtnl_lock();
6739
6740	if (!netif_running(dev))
6741		goto out_unlock;
6742
6743	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6744		/* The card is being reset, no point doing anything */
6745		goto out_unlock;
6746	}
6747
6748	subid = nic->pdev->subsystem_device;
6749	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6750		/*
6751		 * Allow a small delay for the NICs self initiated
6752		 * cleanup to complete.
6753		 */
6754		msleep(100);
6755	}
6756
6757	val64 = readq(&bar0->adapter_status);
6758	if (LINK_IS_UP(val64)) {
6759		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6760			if (verify_xena_quiescence(nic)) {
6761				val64 = readq(&bar0->adapter_control);
6762				val64 |= ADAPTER_CNTL_EN;
6763				writeq(val64, &bar0->adapter_control);
6764				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6765					    nic->device_type, subid)) {
6766					val64 = readq(&bar0->gpio_control);
6767					val64 |= GPIO_CTRL_GPIO_0;
6768					writeq(val64, &bar0->gpio_control);
6769					val64 = readq(&bar0->gpio_control);
6770				} else {
6771					val64 |= ADAPTER_LED_ON;
6772					writeq(val64, &bar0->adapter_control);
6773				}
6774				nic->device_enabled_once = true;
6775			} else {
6776				DBG_PRINT(ERR_DBG,
6777					  "%s: Error: device is not Quiescent\n",
6778					  dev->name);
6779				s2io_stop_all_tx_queue(nic);
6780			}
6781		}
6782		val64 = readq(&bar0->adapter_control);
6783		val64 |= ADAPTER_LED_ON;
6784		writeq(val64, &bar0->adapter_control);
6785		s2io_link(nic, LINK_UP);
6786	} else {
6787		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6788						      subid)) {
6789			val64 = readq(&bar0->gpio_control);
6790			val64 &= ~GPIO_CTRL_GPIO_0;
6791			writeq(val64, &bar0->gpio_control);
6792			val64 = readq(&bar0->gpio_control);
6793		}
6794		/* turn off LED */
6795		val64 = readq(&bar0->adapter_control);
6796		val64 = val64 & (~ADAPTER_LED_ON);
6797		writeq(val64, &bar0->adapter_control);
6798		s2io_link(nic, LINK_DOWN);
6799	}
6800	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6801
6802out_unlock:
6803	rtnl_unlock();
6804}
6805
6806static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6807				  struct buffAdd *ba,
6808				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6809				  u64 *temp2, int size)
6810{
6811	struct net_device *dev = sp->dev;
6812	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6813
6814	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6815		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6816		/* allocate skb */
6817		if (*skb) {
6818			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6819			/*
6820			 * As Rx frame are not going to be processed,
6821			 * using same mapped address for the Rxd
6822			 * buffer pointer
6823			 */
6824			rxdp1->Buffer0_ptr = *temp0;
6825		} else {
6826			*skb = netdev_alloc_skb(dev, size);
6827			if (!(*skb)) {
6828				DBG_PRINT(INFO_DBG,
6829					  "%s: Out of memory to allocate %s\n",
6830					  dev->name, "1 buf mode SKBs");
6831				stats->mem_alloc_fail_cnt++;
6832				return -ENOMEM ;
6833			}
6834			stats->mem_allocated += (*skb)->truesize;
6835			/* storing the mapped addr in a temp variable
6836			 * such it will be used for next rxd whose
6837			 * Host Control is NULL
6838			 */
6839			rxdp1->Buffer0_ptr = *temp0 =
6840				pci_map_single(sp->pdev, (*skb)->data,
6841					       size - NET_IP_ALIGN,
6842					       PCI_DMA_FROMDEVICE);
6843			if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6844				goto memalloc_failed;
6845			rxdp->Host_Control = (unsigned long) (*skb);
6846		}
6847	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6848		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6849		/* Two buffer Mode */
6850		if (*skb) {
6851			rxdp3->Buffer2_ptr = *temp2;
6852			rxdp3->Buffer0_ptr = *temp0;
6853			rxdp3->Buffer1_ptr = *temp1;
6854		} else {
6855			*skb = netdev_alloc_skb(dev, size);
6856			if (!(*skb)) {
6857				DBG_PRINT(INFO_DBG,
6858					  "%s: Out of memory to allocate %s\n",
6859					  dev->name,
6860					  "2 buf mode SKBs");
6861				stats->mem_alloc_fail_cnt++;
6862				return -ENOMEM;
6863			}
6864			stats->mem_allocated += (*skb)->truesize;
6865			rxdp3->Buffer2_ptr = *temp2 =
6866				pci_map_single(sp->pdev, (*skb)->data,
6867					       dev->mtu + 4,
6868					       PCI_DMA_FROMDEVICE);
6869			if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6870				goto memalloc_failed;
6871			rxdp3->Buffer0_ptr = *temp0 =
6872				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6873					       PCI_DMA_FROMDEVICE);
6874			if (pci_dma_mapping_error(sp->pdev,
6875						  rxdp3->Buffer0_ptr)) {
6876				pci_unmap_single(sp->pdev,
6877						 (dma_addr_t)rxdp3->Buffer2_ptr,
6878						 dev->mtu + 4,
6879						 PCI_DMA_FROMDEVICE);
6880				goto memalloc_failed;
6881			}
6882			rxdp->Host_Control = (unsigned long) (*skb);
6883
6884			/* Buffer-1 will be dummy buffer not used */
6885			rxdp3->Buffer1_ptr = *temp1 =
6886				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6887					       PCI_DMA_FROMDEVICE);
6888			if (pci_dma_mapping_error(sp->pdev,
6889						  rxdp3->Buffer1_ptr)) {
6890				pci_unmap_single(sp->pdev,
6891						 (dma_addr_t)rxdp3->Buffer0_ptr,
6892						 BUF0_LEN, PCI_DMA_FROMDEVICE);
6893				pci_unmap_single(sp->pdev,
6894						 (dma_addr_t)rxdp3->Buffer2_ptr,
6895						 dev->mtu + 4,
6896						 PCI_DMA_FROMDEVICE);
6897				goto memalloc_failed;
6898			}
6899		}
6900	}
6901	return 0;
6902
6903memalloc_failed:
6904	stats->pci_map_fail_cnt++;
6905	stats->mem_freed += (*skb)->truesize;
6906	dev_kfree_skb(*skb);
6907	return -ENOMEM;
6908}
6909
6910static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6911				int size)
6912{
6913	struct net_device *dev = sp->dev;
6914	if (sp->rxd_mode == RXD_MODE_1) {
6915		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6916	} else if (sp->rxd_mode == RXD_MODE_3B) {
6917		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6918		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6919		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6920	}
6921}
6922
6923static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6924{
6925	int i, j, k, blk_cnt = 0, size;
6926	struct config_param *config = &sp->config;
6927	struct mac_info *mac_control = &sp->mac_control;
6928	struct net_device *dev = sp->dev;
6929	struct RxD_t *rxdp = NULL;
6930	struct sk_buff *skb = NULL;
6931	struct buffAdd *ba = NULL;
6932	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6933
6934	/* Calculate the size based on ring mode */
6935	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6936		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6937	if (sp->rxd_mode == RXD_MODE_1)
6938		size += NET_IP_ALIGN;
6939	else if (sp->rxd_mode == RXD_MODE_3B)
6940		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6941
6942	for (i = 0; i < config->rx_ring_num; i++) {
6943		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6944		struct ring_info *ring = &mac_control->rings[i];
6945
6946		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6947
6948		for (j = 0; j < blk_cnt; j++) {
6949			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6950				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6951				if (sp->rxd_mode == RXD_MODE_3B)
6952					ba = &ring->ba[j][k];
6953				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6954							   &temp0_64,
6955							   &temp1_64,
6956							   &temp2_64,
6957							   size) == -ENOMEM) {
6958					return 0;
6959				}
6960
6961				set_rxd_buffer_size(sp, rxdp, size);
6962				wmb();
6963				/* flip the Ownership bit to Hardware */
6964				rxdp->Control_1 |= RXD_OWN_XENA;
6965			}
6966		}
6967	}
6968	return 0;
6969
6970}
6971
6972static int s2io_add_isr(struct s2io_nic *sp)
6973{
6974	int ret = 0;
6975	struct net_device *dev = sp->dev;
6976	int err = 0;
6977
6978	if (sp->config.intr_type == MSI_X)
6979		ret = s2io_enable_msi_x(sp);
6980	if (ret) {
6981		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6982		sp->config.intr_type = INTA;
6983	}
6984
6985	/*
6986	 * Store the values of the MSIX table in
6987	 * the struct s2io_nic structure
6988	 */
6989	store_xmsi_data(sp);
6990
6991	/* After proper initialization of H/W, register ISR */
6992	if (sp->config.intr_type == MSI_X) {
6993		int i, msix_rx_cnt = 0;
6994
6995		for (i = 0; i < sp->num_entries; i++) {
6996			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6997				if (sp->s2io_entries[i].type ==
6998				    MSIX_RING_TYPE) {
6999					sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
 
 
7000						dev->name, i);
7001					err = request_irq(sp->entries[i].vector,
7002							  s2io_msix_ring_handle,
7003							  0,
7004							  sp->desc[i],
7005							  sp->s2io_entries[i].arg);
7006				} else if (sp->s2io_entries[i].type ==
7007					   MSIX_ALARM_TYPE) {
7008					sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
 
 
7009						dev->name, i);
7010					err = request_irq(sp->entries[i].vector,
7011							  s2io_msix_fifo_handle,
7012							  0,
7013							  sp->desc[i],
7014							  sp->s2io_entries[i].arg);
7015
7016				}
7017				/* if either data or addr is zero print it. */
7018				if (!(sp->msix_info[i].addr &&
7019				      sp->msix_info[i].data)) {
7020					DBG_PRINT(ERR_DBG,
7021						  "%s @Addr:0x%llx Data:0x%llx\n",
7022						  sp->desc[i],
7023						  (unsigned long long)
7024						  sp->msix_info[i].addr,
7025						  (unsigned long long)
7026						  ntohl(sp->msix_info[i].data));
7027				} else
7028					msix_rx_cnt++;
7029				if (err) {
7030					remove_msix_isr(sp);
7031
7032					DBG_PRINT(ERR_DBG,
7033						  "%s:MSI-X-%d registration "
7034						  "failed\n", dev->name, i);
7035
7036					DBG_PRINT(ERR_DBG,
7037						  "%s: Defaulting to INTA\n",
7038						  dev->name);
7039					sp->config.intr_type = INTA;
7040					break;
7041				}
7042				sp->s2io_entries[i].in_use =
7043					MSIX_REGISTERED_SUCCESS;
7044			}
7045		}
7046		if (!err) {
7047			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
7048			DBG_PRINT(INFO_DBG,
7049				  "MSI-X-TX entries enabled through alarm vector\n");
7050		}
7051	}
7052	if (sp->config.intr_type == INTA) {
7053		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7054				  sp->name, dev);
7055		if (err) {
7056			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7057				  dev->name);
7058			return -1;
7059		}
7060	}
7061	return 0;
7062}
7063
7064static void s2io_rem_isr(struct s2io_nic *sp)
7065{
7066	if (sp->config.intr_type == MSI_X)
7067		remove_msix_isr(sp);
7068	else
7069		remove_inta_isr(sp);
7070}
7071
7072static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7073{
7074	int cnt = 0;
7075	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7076	register u64 val64 = 0;
7077	struct config_param *config;
7078	config = &sp->config;
7079
7080	if (!is_s2io_card_up(sp))
7081		return;
7082
7083	del_timer_sync(&sp->alarm_timer);
7084	/* If s2io_set_link task is executing, wait till it completes. */
7085	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7086		msleep(50);
7087	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7088
7089	/* Disable napi */
7090	if (sp->config.napi) {
7091		int off = 0;
7092		if (config->intr_type ==  MSI_X) {
7093			for (; off < sp->config.rx_ring_num; off++)
7094				napi_disable(&sp->mac_control.rings[off].napi);
7095		}
7096		else
7097			napi_disable(&sp->napi);
7098	}
7099
7100	/* disable Tx and Rx traffic on the NIC */
7101	if (do_io)
7102		stop_nic(sp);
7103
7104	s2io_rem_isr(sp);
7105
7106	/* stop the tx queue, indicate link down */
7107	s2io_link(sp, LINK_DOWN);
7108
7109	/* Check if the device is Quiescent and then Reset the NIC */
7110	while (do_io) {
7111		/* As per the HW requirement we need to replenish the
7112		 * receive buffer to avoid the ring bump. Since there is
7113		 * no intention of processing the Rx frame at this pointwe are
7114		 * just setting the ownership bit of rxd in Each Rx
7115		 * ring to HW and set the appropriate buffer size
7116		 * based on the ring mode
7117		 */
7118		rxd_owner_bit_reset(sp);
7119
7120		val64 = readq(&bar0->adapter_status);
7121		if (verify_xena_quiescence(sp)) {
7122			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7123				break;
7124		}
7125
7126		msleep(50);
7127		cnt++;
7128		if (cnt == 10) {
7129			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7130				  "adapter status reads 0x%llx\n",
7131				  (unsigned long long)val64);
7132			break;
7133		}
7134	}
7135	if (do_io)
7136		s2io_reset(sp);
7137
7138	/* Free all Tx buffers */
7139	free_tx_buffers(sp);
7140
7141	/* Free all Rx buffers */
7142	free_rx_buffers(sp);
7143
7144	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7145}
7146
7147static void s2io_card_down(struct s2io_nic *sp)
7148{
7149	do_s2io_card_down(sp, 1);
7150}
7151
7152static int s2io_card_up(struct s2io_nic *sp)
7153{
7154	int i, ret = 0;
7155	struct config_param *config;
7156	struct mac_info *mac_control;
7157	struct net_device *dev = sp->dev;
7158	u16 interruptible;
7159
7160	/* Initialize the H/W I/O registers */
7161	ret = init_nic(sp);
7162	if (ret != 0) {
7163		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7164			  dev->name);
7165		if (ret != -EIO)
7166			s2io_reset(sp);
7167		return ret;
7168	}
7169
7170	/*
7171	 * Initializing the Rx buffers. For now we are considering only 1
7172	 * Rx ring and initializing buffers into 30 Rx blocks
7173	 */
7174	config = &sp->config;
7175	mac_control = &sp->mac_control;
7176
7177	for (i = 0; i < config->rx_ring_num; i++) {
7178		struct ring_info *ring = &mac_control->rings[i];
7179
7180		ring->mtu = dev->mtu;
7181		ring->lro = !!(dev->features & NETIF_F_LRO);
7182		ret = fill_rx_buffers(sp, ring, 1);
7183		if (ret) {
7184			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7185				  dev->name);
7186			s2io_reset(sp);
7187			free_rx_buffers(sp);
7188			return -ENOMEM;
7189		}
7190		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7191			  ring->rx_bufs_left);
7192	}
7193
7194	/* Initialise napi */
7195	if (config->napi) {
7196		if (config->intr_type ==  MSI_X) {
7197			for (i = 0; i < sp->config.rx_ring_num; i++)
7198				napi_enable(&sp->mac_control.rings[i].napi);
7199		} else {
7200			napi_enable(&sp->napi);
7201		}
7202	}
7203
7204	/* Maintain the state prior to the open */
7205	if (sp->promisc_flg)
7206		sp->promisc_flg = 0;
7207	if (sp->m_cast_flg) {
7208		sp->m_cast_flg = 0;
7209		sp->all_multi_pos = 0;
7210	}
7211
7212	/* Setting its receive mode */
7213	s2io_set_multicast(dev);
7214
7215	if (dev->features & NETIF_F_LRO) {
7216		/* Initialize max aggregatable pkts per session based on MTU */
7217		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7218		/* Check if we can use (if specified) user provided value */
7219		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7220			sp->lro_max_aggr_per_sess = lro_max_pkts;
7221	}
7222
7223	/* Enable Rx Traffic and interrupts on the NIC */
7224	if (start_nic(sp)) {
7225		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7226		s2io_reset(sp);
7227		free_rx_buffers(sp);
7228		return -ENODEV;
7229	}
7230
7231	/* Add interrupt service routine */
7232	if (s2io_add_isr(sp) != 0) {
7233		if (sp->config.intr_type == MSI_X)
7234			s2io_rem_isr(sp);
7235		s2io_reset(sp);
7236		free_rx_buffers(sp);
7237		return -ENODEV;
7238	}
7239
7240	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
 
7241
7242	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7243
7244	/*  Enable select interrupts */
7245	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7246	if (sp->config.intr_type != INTA) {
7247		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7248		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7249	} else {
7250		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7251		interruptible |= TX_PIC_INTR;
7252		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7253	}
7254
7255	return 0;
7256}
7257
7258/**
7259 * s2io_restart_nic - Resets the NIC.
7260 * @data : long pointer to the device private structure
7261 * Description:
7262 * This function is scheduled to be run by the s2io_tx_watchdog
7263 * function after 0.5 secs to reset the NIC. The idea is to reduce
7264 * the run time of the watch dog routine which is run holding a
7265 * spin lock.
7266 */
7267
7268static void s2io_restart_nic(struct work_struct *work)
7269{
7270	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7271	struct net_device *dev = sp->dev;
7272
7273	rtnl_lock();
7274
7275	if (!netif_running(dev))
7276		goto out_unlock;
7277
7278	s2io_card_down(sp);
7279	if (s2io_card_up(sp)) {
7280		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7281	}
7282	s2io_wake_all_tx_queue(sp);
7283	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7284out_unlock:
7285	rtnl_unlock();
7286}
7287
7288/**
7289 *  s2io_tx_watchdog - Watchdog for transmit side.
7290 *  @dev : Pointer to net device structure
7291 *  Description:
7292 *  This function is triggered if the Tx Queue is stopped
7293 *  for a pre-defined amount of time when the Interface is still up.
7294 *  If the Interface is jammed in such a situation, the hardware is
7295 *  reset (by s2io_close) and restarted again (by s2io_open) to
7296 *  overcome any problem that might have been caused in the hardware.
7297 *  Return value:
7298 *  void
7299 */
7300
7301static void s2io_tx_watchdog(struct net_device *dev)
7302{
7303	struct s2io_nic *sp = netdev_priv(dev);
7304	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7305
7306	if (netif_carrier_ok(dev)) {
7307		swstats->watchdog_timer_cnt++;
7308		schedule_work(&sp->rst_timer_task);
7309		swstats->soft_reset_cnt++;
7310	}
7311}
7312
7313/**
7314 *   rx_osm_handler - To perform some OS related operations on SKB.
7315 *   @sp: private member of the device structure,pointer to s2io_nic structure.
7316 *   @skb : the socket buffer pointer.
7317 *   @len : length of the packet
7318 *   @cksum : FCS checksum of the frame.
7319 *   @ring_no : the ring from which this RxD was extracted.
7320 *   Description:
7321 *   This function is called by the Rx interrupt serivce routine to perform
7322 *   some OS related operations on the SKB before passing it to the upper
7323 *   layers. It mainly checks if the checksum is OK, if so adds it to the
7324 *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7325 *   to the upper layer. If the checksum is wrong, it increments the Rx
7326 *   packet error count, frees the SKB and returns error.
7327 *   Return value:
7328 *   SUCCESS on success and -1 on failure.
7329 */
7330static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7331{
7332	struct s2io_nic *sp = ring_data->nic;
7333	struct net_device *dev = ring_data->dev;
7334	struct sk_buff *skb = (struct sk_buff *)
7335		((unsigned long)rxdp->Host_Control);
7336	int ring_no = ring_data->ring_no;
7337	u16 l3_csum, l4_csum;
7338	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7339	struct lro *uninitialized_var(lro);
7340	u8 err_mask;
7341	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7342
7343	skb->dev = dev;
7344
7345	if (err) {
7346		/* Check for parity error */
7347		if (err & 0x1)
7348			swstats->parity_err_cnt++;
7349
7350		err_mask = err >> 48;
7351		switch (err_mask) {
7352		case 1:
7353			swstats->rx_parity_err_cnt++;
7354			break;
7355
7356		case 2:
7357			swstats->rx_abort_cnt++;
7358			break;
7359
7360		case 3:
7361			swstats->rx_parity_abort_cnt++;
7362			break;
7363
7364		case 4:
7365			swstats->rx_rda_fail_cnt++;
7366			break;
7367
7368		case 5:
7369			swstats->rx_unkn_prot_cnt++;
7370			break;
7371
7372		case 6:
7373			swstats->rx_fcs_err_cnt++;
7374			break;
7375
7376		case 7:
7377			swstats->rx_buf_size_err_cnt++;
7378			break;
7379
7380		case 8:
7381			swstats->rx_rxd_corrupt_cnt++;
7382			break;
7383
7384		case 15:
7385			swstats->rx_unkn_err_cnt++;
7386			break;
7387		}
7388		/*
7389		 * Drop the packet if bad transfer code. Exception being
7390		 * 0x5, which could be due to unsupported IPv6 extension header.
7391		 * In this case, we let stack handle the packet.
7392		 * Note that in this case, since checksum will be incorrect,
7393		 * stack will validate the same.
7394		 */
7395		if (err_mask != 0x5) {
7396			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7397				  dev->name, err_mask);
7398			dev->stats.rx_crc_errors++;
7399			swstats->mem_freed
7400				+= skb->truesize;
7401			dev_kfree_skb(skb);
7402			ring_data->rx_bufs_left -= 1;
7403			rxdp->Host_Control = 0;
7404			return 0;
7405		}
7406	}
7407
7408	rxdp->Host_Control = 0;
7409	if (sp->rxd_mode == RXD_MODE_1) {
7410		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7411
7412		skb_put(skb, len);
7413	} else if (sp->rxd_mode == RXD_MODE_3B) {
7414		int get_block = ring_data->rx_curr_get_info.block_index;
7415		int get_off = ring_data->rx_curr_get_info.offset;
7416		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7417		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7418		unsigned char *buff = skb_push(skb, buf0_len);
7419
7420		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7421		memcpy(buff, ba->ba_0, buf0_len);
7422		skb_put(skb, buf2_len);
7423	}
7424
7425	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7426	    ((!ring_data->lro) ||
7427	     (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7428	    (dev->features & NETIF_F_RXCSUM)) {
7429		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7430		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7431		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7432			/*
7433			 * NIC verifies if the Checksum of the received
7434			 * frame is Ok or not and accordingly returns
7435			 * a flag in the RxD.
7436			 */
7437			skb->ip_summed = CHECKSUM_UNNECESSARY;
7438			if (ring_data->lro) {
7439				u32 tcp_len = 0;
7440				u8 *tcp;
7441				int ret = 0;
7442
7443				ret = s2io_club_tcp_session(ring_data,
7444							    skb->data, &tcp,
7445							    &tcp_len, &lro,
7446							    rxdp, sp);
7447				switch (ret) {
7448				case 3: /* Begin anew */
7449					lro->parent = skb;
7450					goto aggregate;
7451				case 1: /* Aggregate */
7452					lro_append_pkt(sp, lro, skb, tcp_len);
7453					goto aggregate;
7454				case 4: /* Flush session */
7455					lro_append_pkt(sp, lro, skb, tcp_len);
7456					queue_rx_frame(lro->parent,
7457						       lro->vlan_tag);
7458					clear_lro_session(lro);
7459					swstats->flush_max_pkts++;
7460					goto aggregate;
7461				case 2: /* Flush both */
7462					lro->parent->data_len = lro->frags_len;
7463					swstats->sending_both++;
7464					queue_rx_frame(lro->parent,
7465						       lro->vlan_tag);
7466					clear_lro_session(lro);
7467					goto send_up;
7468				case 0: /* sessions exceeded */
7469				case -1: /* non-TCP or not L2 aggregatable */
7470				case 5: /*
7471					 * First pkt in session not
7472					 * L3/L4 aggregatable
7473					 */
7474					break;
7475				default:
7476					DBG_PRINT(ERR_DBG,
7477						  "%s: Samadhana!!\n",
7478						  __func__);
7479					BUG();
7480				}
7481			}
7482		} else {
7483			/*
7484			 * Packet with erroneous checksum, let the
7485			 * upper layers deal with it.
7486			 */
7487			skb_checksum_none_assert(skb);
7488		}
7489	} else
7490		skb_checksum_none_assert(skb);
7491
7492	swstats->mem_freed += skb->truesize;
7493send_up:
7494	skb_record_rx_queue(skb, ring_no);
7495	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7496aggregate:
7497	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7498	return SUCCESS;
7499}
7500
7501/**
7502 *  s2io_link - stops/starts the Tx queue.
7503 *  @sp : private member of the device structure, which is a pointer to the
7504 *  s2io_nic structure.
7505 *  @link : inidicates whether link is UP/DOWN.
7506 *  Description:
7507 *  This function stops/starts the Tx queue depending on whether the link
7508 *  status of the NIC is is down or up. This is called by the Alarm
7509 *  interrupt handler whenever a link change interrupt comes up.
7510 *  Return value:
7511 *  void.
7512 */
7513
7514static void s2io_link(struct s2io_nic *sp, int link)
7515{
7516	struct net_device *dev = sp->dev;
7517	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7518
7519	if (link != sp->last_link_state) {
7520		init_tti(sp, link);
7521		if (link == LINK_DOWN) {
7522			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7523			s2io_stop_all_tx_queue(sp);
7524			netif_carrier_off(dev);
7525			if (swstats->link_up_cnt)
7526				swstats->link_up_time =
7527					jiffies - sp->start_time;
7528			swstats->link_down_cnt++;
7529		} else {
7530			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7531			if (swstats->link_down_cnt)
7532				swstats->link_down_time =
7533					jiffies - sp->start_time;
7534			swstats->link_up_cnt++;
7535			netif_carrier_on(dev);
7536			s2io_wake_all_tx_queue(sp);
7537		}
7538	}
7539	sp->last_link_state = link;
7540	sp->start_time = jiffies;
7541}
7542
7543/**
7544 *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7545 *  @sp : private member of the device structure, which is a pointer to the
7546 *  s2io_nic structure.
7547 *  Description:
7548 *  This function initializes a few of the PCI and PCI-X configuration registers
7549 *  with recommended values.
7550 *  Return value:
7551 *  void
7552 */
7553
7554static void s2io_init_pci(struct s2io_nic *sp)
7555{
7556	u16 pci_cmd = 0, pcix_cmd = 0;
7557
7558	/* Enable Data Parity Error Recovery in PCI-X command register. */
7559	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7560			     &(pcix_cmd));
7561	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7562			      (pcix_cmd | 1));
7563	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7564			     &(pcix_cmd));
7565
7566	/* Set the PErr Response bit in PCI command register. */
7567	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7568	pci_write_config_word(sp->pdev, PCI_COMMAND,
7569			      (pci_cmd | PCI_COMMAND_PARITY));
7570	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7571}
7572
7573static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7574			    u8 *dev_multiq)
7575{
7576	int i;
7577
7578	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7579		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7580			  "(%d) not supported\n", tx_fifo_num);
7581
7582		if (tx_fifo_num < 1)
7583			tx_fifo_num = 1;
7584		else
7585			tx_fifo_num = MAX_TX_FIFOS;
7586
7587		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7588	}
7589
7590	if (multiq)
7591		*dev_multiq = multiq;
7592
7593	if (tx_steering_type && (1 == tx_fifo_num)) {
7594		if (tx_steering_type != TX_DEFAULT_STEERING)
7595			DBG_PRINT(ERR_DBG,
7596				  "Tx steering is not supported with "
7597				  "one fifo. Disabling Tx steering.\n");
7598		tx_steering_type = NO_STEERING;
7599	}
7600
7601	if ((tx_steering_type < NO_STEERING) ||
7602	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7603		DBG_PRINT(ERR_DBG,
7604			  "Requested transmit steering not supported\n");
7605		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7606		tx_steering_type = NO_STEERING;
7607	}
7608
7609	if (rx_ring_num > MAX_RX_RINGS) {
7610		DBG_PRINT(ERR_DBG,
7611			  "Requested number of rx rings not supported\n");
7612		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7613			  MAX_RX_RINGS);
7614		rx_ring_num = MAX_RX_RINGS;
7615	}
7616
7617	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7618		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7619			  "Defaulting to INTA\n");
7620		*dev_intr_type = INTA;
7621	}
7622
7623	if ((*dev_intr_type == MSI_X) &&
7624	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7625	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7626		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7627			  "Defaulting to INTA\n");
7628		*dev_intr_type = INTA;
7629	}
7630
7631	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7632		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7633		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7634		rx_ring_mode = 1;
7635	}
7636
7637	for (i = 0; i < MAX_RX_RINGS; i++)
7638		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7639			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7640				  "supported\nDefaulting to %d\n",
7641				  MAX_RX_BLOCKS_PER_RING);
7642			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7643		}
7644
7645	return SUCCESS;
7646}
7647
7648/**
7649 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7650 * or Traffic class respectively.
7651 * @nic: device private variable
7652 * Description: The function configures the receive steering to
7653 * desired receive ring.
7654 * Return Value:  SUCCESS on success and
7655 * '-1' on failure (endian settings incorrect).
7656 */
7657static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7658{
7659	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7660	register u64 val64 = 0;
7661
7662	if (ds_codepoint > 63)
7663		return FAILURE;
7664
7665	val64 = RTS_DS_MEM_DATA(ring);
7666	writeq(val64, &bar0->rts_ds_mem_data);
7667
7668	val64 = RTS_DS_MEM_CTRL_WE |
7669		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7670		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7671
7672	writeq(val64, &bar0->rts_ds_mem_ctrl);
7673
7674	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7675				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7676				     S2IO_BIT_RESET);
7677}
7678
7679static const struct net_device_ops s2io_netdev_ops = {
7680	.ndo_open	        = s2io_open,
7681	.ndo_stop	        = s2io_close,
7682	.ndo_get_stats	        = s2io_get_stats,
7683	.ndo_start_xmit    	= s2io_xmit,
7684	.ndo_validate_addr	= eth_validate_addr,
7685	.ndo_set_rx_mode	= s2io_set_multicast,
7686	.ndo_do_ioctl	   	= s2io_ioctl,
7687	.ndo_set_mac_address    = s2io_set_mac_addr,
7688	.ndo_change_mtu	   	= s2io_change_mtu,
7689	.ndo_set_features	= s2io_set_features,
7690	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7691#ifdef CONFIG_NET_POLL_CONTROLLER
7692	.ndo_poll_controller    = s2io_netpoll,
7693#endif
7694};
7695
7696/**
7697 *  s2io_init_nic - Initialization of the adapter .
7698 *  @pdev : structure containing the PCI related information of the device.
7699 *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7700 *  Description:
7701 *  The function initializes an adapter identified by the pci_dec structure.
7702 *  All OS related initialization including memory and device structure and
7703 *  initlaization of the device private variable is done. Also the swapper
7704 *  control register is initialized to enable read and write into the I/O
7705 *  registers of the device.
7706 *  Return value:
7707 *  returns 0 on success and negative on failure.
7708 */
7709
7710static int
7711s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7712{
7713	struct s2io_nic *sp;
7714	struct net_device *dev;
7715	int i, j, ret;
7716	int dma_flag = false;
7717	u32 mac_up, mac_down;
7718	u64 val64 = 0, tmp64 = 0;
7719	struct XENA_dev_config __iomem *bar0 = NULL;
7720	u16 subid;
7721	struct config_param *config;
7722	struct mac_info *mac_control;
7723	int mode;
7724	u8 dev_intr_type = intr_type;
7725	u8 dev_multiq = 0;
7726
7727	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7728	if (ret)
7729		return ret;
7730
7731	ret = pci_enable_device(pdev);
7732	if (ret) {
7733		DBG_PRINT(ERR_DBG,
7734			  "%s: pci_enable_device failed\n", __func__);
7735		return ret;
7736	}
7737
7738	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
7739		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7740		dma_flag = true;
7741		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
7742			DBG_PRINT(ERR_DBG,
7743				  "Unable to obtain 64bit DMA "
7744				  "for consistent allocations\n");
7745			pci_disable_device(pdev);
7746			return -ENOMEM;
7747		}
7748	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
7749		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7750	} else {
7751		pci_disable_device(pdev);
7752		return -ENOMEM;
7753	}
7754	ret = pci_request_regions(pdev, s2io_driver_name);
7755	if (ret) {
7756		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7757			  __func__, ret);
7758		pci_disable_device(pdev);
7759		return -ENODEV;
7760	}
7761	if (dev_multiq)
7762		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7763	else
7764		dev = alloc_etherdev(sizeof(struct s2io_nic));
7765	if (dev == NULL) {
7766		pci_disable_device(pdev);
7767		pci_release_regions(pdev);
7768		return -ENODEV;
7769	}
7770
7771	pci_set_master(pdev);
7772	pci_set_drvdata(pdev, dev);
7773	SET_NETDEV_DEV(dev, &pdev->dev);
7774
7775	/*  Private member variable initialized to s2io NIC structure */
7776	sp = netdev_priv(dev);
7777	sp->dev = dev;
7778	sp->pdev = pdev;
7779	sp->high_dma_flag = dma_flag;
7780	sp->device_enabled_once = false;
7781	if (rx_ring_mode == 1)
7782		sp->rxd_mode = RXD_MODE_1;
7783	if (rx_ring_mode == 2)
7784		sp->rxd_mode = RXD_MODE_3B;
7785
7786	sp->config.intr_type = dev_intr_type;
7787
7788	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7789	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7790		sp->device_type = XFRAME_II_DEVICE;
7791	else
7792		sp->device_type = XFRAME_I_DEVICE;
7793
7794
7795	/* Initialize some PCI/PCI-X fields of the NIC. */
7796	s2io_init_pci(sp);
7797
7798	/*
7799	 * Setting the device configuration parameters.
7800	 * Most of these parameters can be specified by the user during
7801	 * module insertion as they are module loadable parameters. If
7802	 * these parameters are not not specified during load time, they
7803	 * are initialized with default values.
7804	 */
7805	config = &sp->config;
7806	mac_control = &sp->mac_control;
7807
7808	config->napi = napi;
7809	config->tx_steering_type = tx_steering_type;
7810
7811	/* Tx side parameters. */
7812	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7813		config->tx_fifo_num = MAX_TX_FIFOS;
7814	else
7815		config->tx_fifo_num = tx_fifo_num;
7816
7817	/* Initialize the fifos used for tx steering */
7818	if (config->tx_fifo_num < 5) {
7819		if (config->tx_fifo_num  == 1)
7820			sp->total_tcp_fifos = 1;
7821		else
7822			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7823		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7824		sp->total_udp_fifos = 1;
7825		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7826	} else {
7827		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7828				       FIFO_OTHER_MAX_NUM);
7829		sp->udp_fifo_idx = sp->total_tcp_fifos;
7830		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7831		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7832	}
7833
7834	config->multiq = dev_multiq;
7835	for (i = 0; i < config->tx_fifo_num; i++) {
7836		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7837
7838		tx_cfg->fifo_len = tx_fifo_len[i];
7839		tx_cfg->fifo_priority = i;
7840	}
7841
7842	/* mapping the QoS priority to the configured fifos */
7843	for (i = 0; i < MAX_TX_FIFOS; i++)
7844		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7845
7846	/* map the hashing selector table to the configured fifos */
7847	for (i = 0; i < config->tx_fifo_num; i++)
7848		sp->fifo_selector[i] = fifo_selector[i];
7849
7850
7851	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7852	for (i = 0; i < config->tx_fifo_num; i++) {
7853		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7854
7855		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7856		if (tx_cfg->fifo_len < 65) {
7857			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7858			break;
7859		}
7860	}
7861	/* + 2 because one Txd for skb->data and one Txd for UFO */
7862	config->max_txds = MAX_SKB_FRAGS + 2;
7863
7864	/* Rx side parameters. */
7865	config->rx_ring_num = rx_ring_num;
7866	for (i = 0; i < config->rx_ring_num; i++) {
7867		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7868		struct ring_info *ring = &mac_control->rings[i];
7869
7870		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7871		rx_cfg->ring_priority = i;
7872		ring->rx_bufs_left = 0;
7873		ring->rxd_mode = sp->rxd_mode;
7874		ring->rxd_count = rxd_count[sp->rxd_mode];
7875		ring->pdev = sp->pdev;
7876		ring->dev = sp->dev;
7877	}
7878
7879	for (i = 0; i < rx_ring_num; i++) {
7880		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7881
7882		rx_cfg->ring_org = RING_ORG_BUFF1;
7883		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7884	}
7885
7886	/*  Setting Mac Control parameters */
7887	mac_control->rmac_pause_time = rmac_pause_time;
7888	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7889	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7890
7891
7892	/*  initialize the shared memory used by the NIC and the host */
7893	if (init_shared_mem(sp)) {
7894		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7895		ret = -ENOMEM;
7896		goto mem_alloc_failed;
7897	}
7898
7899	sp->bar0 = pci_ioremap_bar(pdev, 0);
7900	if (!sp->bar0) {
7901		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7902			  dev->name);
7903		ret = -ENOMEM;
7904		goto bar0_remap_failed;
7905	}
7906
7907	sp->bar1 = pci_ioremap_bar(pdev, 2);
7908	if (!sp->bar1) {
7909		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7910			  dev->name);
7911		ret = -ENOMEM;
7912		goto bar1_remap_failed;
7913	}
7914
7915	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7916	for (j = 0; j < MAX_TX_FIFOS; j++) {
7917		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7918	}
7919
7920	/*  Driver entry points */
7921	dev->netdev_ops = &s2io_netdev_ops;
7922	SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7923	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7924		NETIF_F_TSO | NETIF_F_TSO6 |
7925		NETIF_F_RXCSUM | NETIF_F_LRO;
7926	dev->features |= dev->hw_features |
7927		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
7928	if (sp->device_type & XFRAME_II_DEVICE) {
7929		dev->hw_features |= NETIF_F_UFO;
7930		if (ufo)
7931			dev->features |= NETIF_F_UFO;
7932	}
7933	if (sp->high_dma_flag == true)
7934		dev->features |= NETIF_F_HIGHDMA;
7935	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7936	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7937	INIT_WORK(&sp->set_link_task, s2io_set_link);
7938
7939	pci_save_state(sp->pdev);
7940
7941	/* Setting swapper control on the NIC, for proper reset operation */
7942	if (s2io_set_swapper(sp)) {
7943		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7944			  dev->name);
7945		ret = -EAGAIN;
7946		goto set_swap_failed;
7947	}
7948
7949	/* Verify if the Herc works on the slot its placed into */
7950	if (sp->device_type & XFRAME_II_DEVICE) {
7951		mode = s2io_verify_pci_mode(sp);
7952		if (mode < 0) {
7953			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7954				  __func__);
7955			ret = -EBADSLT;
7956			goto set_swap_failed;
7957		}
7958	}
7959
7960	if (sp->config.intr_type == MSI_X) {
7961		sp->num_entries = config->rx_ring_num + 1;
7962		ret = s2io_enable_msi_x(sp);
7963
7964		if (!ret) {
7965			ret = s2io_test_msi(sp);
7966			/* rollback MSI-X, will re-enable during add_isr() */
7967			remove_msix_isr(sp);
7968		}
7969		if (ret) {
7970
7971			DBG_PRINT(ERR_DBG,
7972				  "MSI-X requested but failed to enable\n");
7973			sp->config.intr_type = INTA;
7974		}
7975	}
7976
7977	if (config->intr_type ==  MSI_X) {
7978		for (i = 0; i < config->rx_ring_num ; i++) {
7979			struct ring_info *ring = &mac_control->rings[i];
7980
7981			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7982		}
7983	} else {
7984		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7985	}
7986
7987	/* Not needed for Herc */
7988	if (sp->device_type & XFRAME_I_DEVICE) {
7989		/*
7990		 * Fix for all "FFs" MAC address problems observed on
7991		 * Alpha platforms
7992		 */
7993		fix_mac_address(sp);
7994		s2io_reset(sp);
7995	}
7996
7997	/*
7998	 * MAC address initialization.
7999	 * For now only one mac address will be read and used.
8000	 */
8001	bar0 = sp->bar0;
8002	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8003		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8004	writeq(val64, &bar0->rmac_addr_cmd_mem);
8005	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8006			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
8007			      S2IO_BIT_RESET);
8008	tmp64 = readq(&bar0->rmac_addr_data0_mem);
8009	mac_down = (u32)tmp64;
8010	mac_up = (u32) (tmp64 >> 32);
8011
8012	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8013	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8014	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8015	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8016	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8017	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8018
8019	/*  Set the factory defined MAC address initially   */
8020	dev->addr_len = ETH_ALEN;
8021	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8022
8023	/* initialize number of multicast & unicast MAC entries variables */
8024	if (sp->device_type == XFRAME_I_DEVICE) {
8025		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8026		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8027		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8028	} else if (sp->device_type == XFRAME_II_DEVICE) {
8029		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8030		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8031		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8032	}
8033
 
 
 
 
8034	/* store mac addresses from CAM to s2io_nic structure */
8035	do_s2io_store_unicast_mc(sp);
8036
8037	/* Configure MSIX vector for number of rings configured plus one */
8038	if ((sp->device_type == XFRAME_II_DEVICE) &&
8039	    (config->intr_type == MSI_X))
8040		sp->num_entries = config->rx_ring_num + 1;
8041
8042	/* Store the values of the MSIX table in the s2io_nic structure */
8043	store_xmsi_data(sp);
8044	/* reset Nic and bring it to known state */
8045	s2io_reset(sp);
8046
8047	/*
8048	 * Initialize link state flags
8049	 * and the card state parameter
8050	 */
8051	sp->state = 0;
8052
8053	/* Initialize spinlocks */
8054	for (i = 0; i < sp->config.tx_fifo_num; i++) {
8055		struct fifo_info *fifo = &mac_control->fifos[i];
8056
8057		spin_lock_init(&fifo->tx_lock);
8058	}
8059
8060	/*
8061	 * SXE-002: Configure link and activity LED to init state
8062	 * on driver load.
8063	 */
8064	subid = sp->pdev->subsystem_device;
8065	if ((subid & 0xFF) >= 0x07) {
8066		val64 = readq(&bar0->gpio_control);
8067		val64 |= 0x0000800000000000ULL;
8068		writeq(val64, &bar0->gpio_control);
8069		val64 = 0x0411040400000000ULL;
8070		writeq(val64, (void __iomem *)bar0 + 0x2700);
8071		val64 = readq(&bar0->gpio_control);
8072	}
8073
8074	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8075
8076	if (register_netdev(dev)) {
8077		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8078		ret = -ENODEV;
8079		goto register_failed;
8080	}
8081	s2io_vpd_read(sp);
8082	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8083	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8084		  sp->product_name, pdev->revision);
8085	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8086		  s2io_driver_version);
8087	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8088	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8089	if (sp->device_type & XFRAME_II_DEVICE) {
8090		mode = s2io_print_pci_mode(sp);
8091		if (mode < 0) {
8092			ret = -EBADSLT;
8093			unregister_netdev(dev);
8094			goto set_swap_failed;
8095		}
8096	}
8097	switch (sp->rxd_mode) {
8098	case RXD_MODE_1:
8099		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8100			  dev->name);
8101		break;
8102	case RXD_MODE_3B:
8103		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8104			  dev->name);
8105		break;
8106	}
8107
8108	switch (sp->config.napi) {
8109	case 0:
8110		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8111		break;
8112	case 1:
8113		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8114		break;
8115	}
8116
8117	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8118		  sp->config.tx_fifo_num);
8119
8120	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8121		  sp->config.rx_ring_num);
8122
8123	switch (sp->config.intr_type) {
8124	case INTA:
8125		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8126		break;
8127	case MSI_X:
8128		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8129		break;
8130	}
8131	if (sp->config.multiq) {
8132		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8133			struct fifo_info *fifo = &mac_control->fifos[i];
8134
8135			fifo->multiq = config->multiq;
8136		}
8137		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8138			  dev->name);
8139	} else
8140		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8141			  dev->name);
8142
8143	switch (sp->config.tx_steering_type) {
8144	case NO_STEERING:
8145		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8146			  dev->name);
8147		break;
8148	case TX_PRIORITY_STEERING:
8149		DBG_PRINT(ERR_DBG,
8150			  "%s: Priority steering enabled for transmit\n",
8151			  dev->name);
8152		break;
8153	case TX_DEFAULT_STEERING:
8154		DBG_PRINT(ERR_DBG,
8155			  "%s: Default steering enabled for transmit\n",
8156			  dev->name);
8157	}
8158
8159	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8160		  dev->name);
8161	if (ufo)
8162		DBG_PRINT(ERR_DBG,
8163			  "%s: UDP Fragmentation Offload(UFO) enabled\n",
8164			  dev->name);
8165	/* Initialize device name */
8166	sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
 
8167
8168	if (vlan_tag_strip)
8169		sp->vlan_strip_flag = 1;
8170	else
8171		sp->vlan_strip_flag = 0;
8172
8173	/*
8174	 * Make Link state as off at this point, when the Link change
8175	 * interrupt comes the state will be automatically changed to
8176	 * the right state.
8177	 */
8178	netif_carrier_off(dev);
8179
8180	return 0;
8181
8182register_failed:
8183set_swap_failed:
8184	iounmap(sp->bar1);
8185bar1_remap_failed:
8186	iounmap(sp->bar0);
8187bar0_remap_failed:
8188mem_alloc_failed:
8189	free_shared_mem(sp);
8190	pci_disable_device(pdev);
8191	pci_release_regions(pdev);
8192	free_netdev(dev);
8193
8194	return ret;
8195}
8196
8197/**
8198 * s2io_rem_nic - Free the PCI device
8199 * @pdev: structure containing the PCI related information of the device.
8200 * Description: This function is called by the Pci subsystem to release a
8201 * PCI device and free up all resource held up by the device. This could
8202 * be in response to a Hot plug event or when the driver is to be removed
8203 * from memory.
8204 */
8205
8206static void s2io_rem_nic(struct pci_dev *pdev)
8207{
8208	struct net_device *dev = pci_get_drvdata(pdev);
8209	struct s2io_nic *sp;
8210
8211	if (dev == NULL) {
8212		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8213		return;
8214	}
8215
8216	sp = netdev_priv(dev);
8217
8218	cancel_work_sync(&sp->rst_timer_task);
8219	cancel_work_sync(&sp->set_link_task);
8220
8221	unregister_netdev(dev);
8222
8223	free_shared_mem(sp);
8224	iounmap(sp->bar0);
8225	iounmap(sp->bar1);
8226	pci_release_regions(pdev);
8227	free_netdev(dev);
8228	pci_disable_device(pdev);
8229}
8230
8231/**
8232 * s2io_starter - Entry point for the driver
8233 * Description: This function is the entry point for the driver. It verifies
8234 * the module loadable parameters and initializes PCI configuration space.
8235 */
8236
8237static int __init s2io_starter(void)
8238{
8239	return pci_register_driver(&s2io_driver);
8240}
8241
8242/**
8243 * s2io_closer - Cleanup routine for the driver
8244 * Description: This function is the cleanup routine for the driver. It
8245 * unregisters the driver.
8246 */
8247
8248static __exit void s2io_closer(void)
8249{
8250	pci_unregister_driver(&s2io_driver);
8251	DBG_PRINT(INIT_DBG, "cleanup done\n");
8252}
8253
8254module_init(s2io_starter);
8255module_exit(s2io_closer);
8256
8257static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8258				struct tcphdr **tcp, struct RxD_t *rxdp,
8259				struct s2io_nic *sp)
8260{
8261	int ip_off;
8262	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8263
8264	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8265		DBG_PRINT(INIT_DBG,
8266			  "%s: Non-TCP frames not supported for LRO\n",
8267			  __func__);
8268		return -1;
8269	}
8270
8271	/* Checking for DIX type or DIX type with VLAN */
8272	if ((l2_type == 0) || (l2_type == 4)) {
8273		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8274		/*
8275		 * If vlan stripping is disabled and the frame is VLAN tagged,
8276		 * shift the offset by the VLAN header size bytes.
8277		 */
8278		if ((!sp->vlan_strip_flag) &&
8279		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8280			ip_off += HEADER_VLAN_SIZE;
8281	} else {
8282		/* LLC, SNAP etc are considered non-mergeable */
8283		return -1;
8284	}
8285
8286	*ip = (struct iphdr *)(buffer + ip_off);
8287	ip_len = (u8)((*ip)->ihl);
8288	ip_len <<= 2;
8289	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8290
8291	return 0;
8292}
8293
8294static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8295				  struct tcphdr *tcp)
8296{
8297	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8298	if ((lro->iph->saddr != ip->saddr) ||
8299	    (lro->iph->daddr != ip->daddr) ||
8300	    (lro->tcph->source != tcp->source) ||
8301	    (lro->tcph->dest != tcp->dest))
8302		return -1;
8303	return 0;
8304}
8305
8306static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8307{
8308	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8309}
8310
8311static void initiate_new_session(struct lro *lro, u8 *l2h,
8312				 struct iphdr *ip, struct tcphdr *tcp,
8313				 u32 tcp_pyld_len, u16 vlan_tag)
8314{
8315	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8316	lro->l2h = l2h;
8317	lro->iph = ip;
8318	lro->tcph = tcp;
8319	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8320	lro->tcp_ack = tcp->ack_seq;
8321	lro->sg_num = 1;
8322	lro->total_len = ntohs(ip->tot_len);
8323	lro->frags_len = 0;
8324	lro->vlan_tag = vlan_tag;
8325	/*
8326	 * Check if we saw TCP timestamp.
8327	 * Other consistency checks have already been done.
8328	 */
8329	if (tcp->doff == 8) {
8330		__be32 *ptr;
8331		ptr = (__be32 *)(tcp+1);
8332		lro->saw_ts = 1;
8333		lro->cur_tsval = ntohl(*(ptr+1));
8334		lro->cur_tsecr = *(ptr+2);
8335	}
8336	lro->in_use = 1;
8337}
8338
8339static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8340{
8341	struct iphdr *ip = lro->iph;
8342	struct tcphdr *tcp = lro->tcph;
8343	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8344
8345	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8346
8347	/* Update L3 header */
8348	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8349	ip->tot_len = htons(lro->total_len);
8350
8351	/* Update L4 header */
8352	tcp->ack_seq = lro->tcp_ack;
8353	tcp->window = lro->window;
8354
8355	/* Update tsecr field if this session has timestamps enabled */
8356	if (lro->saw_ts) {
8357		__be32 *ptr = (__be32 *)(tcp + 1);
8358		*(ptr+2) = lro->cur_tsecr;
8359	}
8360
8361	/* Update counters required for calculation of
8362	 * average no. of packets aggregated.
8363	 */
8364	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8365	swstats->num_aggregations++;
8366}
8367
8368static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8369			     struct tcphdr *tcp, u32 l4_pyld)
8370{
8371	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8372	lro->total_len += l4_pyld;
8373	lro->frags_len += l4_pyld;
8374	lro->tcp_next_seq += l4_pyld;
8375	lro->sg_num++;
8376
8377	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8378	lro->tcp_ack = tcp->ack_seq;
8379	lro->window = tcp->window;
8380
8381	if (lro->saw_ts) {
8382		__be32 *ptr;
8383		/* Update tsecr and tsval from this packet */
8384		ptr = (__be32 *)(tcp+1);
8385		lro->cur_tsval = ntohl(*(ptr+1));
8386		lro->cur_tsecr = *(ptr + 2);
8387	}
8388}
8389
8390static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8391				    struct tcphdr *tcp, u32 tcp_pyld_len)
8392{
8393	u8 *ptr;
8394
8395	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8396
8397	if (!tcp_pyld_len) {
8398		/* Runt frame or a pure ack */
8399		return -1;
8400	}
8401
8402	if (ip->ihl != 5) /* IP has options */
8403		return -1;
8404
8405	/* If we see CE codepoint in IP header, packet is not mergeable */
8406	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8407		return -1;
8408
8409	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8410	if (tcp->urg || tcp->psh || tcp->rst ||
8411	    tcp->syn || tcp->fin ||
8412	    tcp->ece || tcp->cwr || !tcp->ack) {
8413		/*
8414		 * Currently recognize only the ack control word and
8415		 * any other control field being set would result in
8416		 * flushing the LRO session
8417		 */
8418		return -1;
8419	}
8420
8421	/*
8422	 * Allow only one TCP timestamp option. Don't aggregate if
8423	 * any other options are detected.
8424	 */
8425	if (tcp->doff != 5 && tcp->doff != 8)
8426		return -1;
8427
8428	if (tcp->doff == 8) {
8429		ptr = (u8 *)(tcp + 1);
8430		while (*ptr == TCPOPT_NOP)
8431			ptr++;
8432		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8433			return -1;
8434
8435		/* Ensure timestamp value increases monotonically */
8436		if (l_lro)
8437			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8438				return -1;
8439
8440		/* timestamp echo reply should be non-zero */
8441		if (*((__be32 *)(ptr+6)) == 0)
8442			return -1;
8443	}
8444
8445	return 0;
8446}
8447
8448static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8449				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8450				 struct RxD_t *rxdp, struct s2io_nic *sp)
8451{
8452	struct iphdr *ip;
8453	struct tcphdr *tcph;
8454	int ret = 0, i;
8455	u16 vlan_tag = 0;
8456	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8457
8458	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8459				   rxdp, sp);
8460	if (ret)
8461		return ret;
8462
8463	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8464
8465	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8466	tcph = (struct tcphdr *)*tcp;
8467	*tcp_len = get_l4_pyld_length(ip, tcph);
8468	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8469		struct lro *l_lro = &ring_data->lro0_n[i];
8470		if (l_lro->in_use) {
8471			if (check_for_socket_match(l_lro, ip, tcph))
8472				continue;
8473			/* Sock pair matched */
8474			*lro = l_lro;
8475
8476			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8477				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8478					  "expected 0x%x, actual 0x%x\n",
8479					  __func__,
8480					  (*lro)->tcp_next_seq,
8481					  ntohl(tcph->seq));
8482
8483				swstats->outof_sequence_pkts++;
8484				ret = 2;
8485				break;
8486			}
8487
8488			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8489						      *tcp_len))
8490				ret = 1; /* Aggregate */
8491			else
8492				ret = 2; /* Flush both */
8493			break;
8494		}
8495	}
8496
8497	if (ret == 0) {
8498		/* Before searching for available LRO objects,
8499		 * check if the pkt is L3/L4 aggregatable. If not
8500		 * don't create new LRO session. Just send this
8501		 * packet up.
8502		 */
8503		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8504			return 5;
8505
8506		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8507			struct lro *l_lro = &ring_data->lro0_n[i];
8508			if (!(l_lro->in_use)) {
8509				*lro = l_lro;
8510				ret = 3; /* Begin anew */
8511				break;
8512			}
8513		}
8514	}
8515
8516	if (ret == 0) { /* sessions exceeded */
8517		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8518			  __func__);
8519		*lro = NULL;
8520		return ret;
8521	}
8522
8523	switch (ret) {
8524	case 3:
8525		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8526				     vlan_tag);
8527		break;
8528	case 2:
8529		update_L3L4_header(sp, *lro);
8530		break;
8531	case 1:
8532		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8533		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8534			update_L3L4_header(sp, *lro);
8535			ret = 4; /* Flush the LRO */
8536		}
8537		break;
8538	default:
8539		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8540		break;
8541	}
8542
8543	return ret;
8544}
8545
8546static void clear_lro_session(struct lro *lro)
8547{
8548	static u16 lro_struct_size = sizeof(struct lro);
8549
8550	memset(lro, 0, lro_struct_size);
8551}
8552
8553static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8554{
8555	struct net_device *dev = skb->dev;
8556	struct s2io_nic *sp = netdev_priv(dev);
8557
8558	skb->protocol = eth_type_trans(skb, dev);
8559	if (vlan_tag && sp->vlan_strip_flag)
8560		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8561	if (sp->config.napi)
8562		netif_receive_skb(skb);
8563	else
8564		netif_rx(skb);
8565}
8566
8567static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8568			   struct sk_buff *skb, u32 tcp_len)
8569{
8570	struct sk_buff *first = lro->parent;
8571	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8572
8573	first->len += tcp_len;
8574	first->data_len = lro->frags_len;
8575	skb_pull(skb, (skb->len - tcp_len));
8576	if (skb_shinfo(first)->frag_list)
8577		lro->last_frag->next = skb;
8578	else
8579		skb_shinfo(first)->frag_list = skb;
8580	first->truesize += skb->truesize;
8581	lro->last_frag = skb;
8582	swstats->clubbed_frms_cnt++;
8583}
8584
8585/**
8586 * s2io_io_error_detected - called when PCI error is detected
8587 * @pdev: Pointer to PCI device
8588 * @state: The current pci connection state
8589 *
8590 * This function is called after a PCI bus error affecting
8591 * this device has been detected.
8592 */
8593static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8594					       pci_channel_state_t state)
8595{
8596	struct net_device *netdev = pci_get_drvdata(pdev);
8597	struct s2io_nic *sp = netdev_priv(netdev);
8598
8599	netif_device_detach(netdev);
8600
8601	if (state == pci_channel_io_perm_failure)
8602		return PCI_ERS_RESULT_DISCONNECT;
8603
8604	if (netif_running(netdev)) {
8605		/* Bring down the card, while avoiding PCI I/O */
8606		do_s2io_card_down(sp, 0);
8607	}
8608	pci_disable_device(pdev);
8609
8610	return PCI_ERS_RESULT_NEED_RESET;
8611}
8612
8613/**
8614 * s2io_io_slot_reset - called after the pci bus has been reset.
8615 * @pdev: Pointer to PCI device
8616 *
8617 * Restart the card from scratch, as if from a cold-boot.
8618 * At this point, the card has exprienced a hard reset,
8619 * followed by fixups by BIOS, and has its config space
8620 * set up identically to what it was at cold boot.
8621 */
8622static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8623{
8624	struct net_device *netdev = pci_get_drvdata(pdev);
8625	struct s2io_nic *sp = netdev_priv(netdev);
8626
8627	if (pci_enable_device(pdev)) {
8628		pr_err("Cannot re-enable PCI device after reset.\n");
8629		return PCI_ERS_RESULT_DISCONNECT;
8630	}
8631
8632	pci_set_master(pdev);
8633	s2io_reset(sp);
8634
8635	return PCI_ERS_RESULT_RECOVERED;
8636}
8637
8638/**
8639 * s2io_io_resume - called when traffic can start flowing again.
8640 * @pdev: Pointer to PCI device
8641 *
8642 * This callback is called when the error recovery driver tells
8643 * us that its OK to resume normal operation.
8644 */
8645static void s2io_io_resume(struct pci_dev *pdev)
8646{
8647	struct net_device *netdev = pci_get_drvdata(pdev);
8648	struct s2io_nic *sp = netdev_priv(netdev);
8649
8650	if (netif_running(netdev)) {
8651		if (s2io_card_up(sp)) {
8652			pr_err("Can't bring device back up after reset.\n");
8653			return;
8654		}
8655
8656		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8657			s2io_card_down(sp);
8658			pr_err("Can't restore mac addr after reset.\n");
8659			return;
8660		}
8661	}
8662
8663	netif_device_attach(netdev);
8664	netif_tx_wake_all_queues(netdev);
8665}
v5.9
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik		: For pointing out the improper error condition
  15 *			  check in the s2io_xmit routine and also some
  16 *			  issues in the Tx watch dog function. Also for
  17 *			  patiently answering all those innumerable
  18 *			  questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
  20 *			  macros available only in 2.6 Kernel.
  21 * Francois Romieu	: For pointing out all code part that were
  22 *			  deprecated and also styling related comments.
  23 * Grant Grundler	: For helping me get rid of some Architecture
  24 *			  dependent code.
  25 * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *		values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
 
 
  45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  46 *                 Possible values '1' for enable , '0' for disable.
  47 *                 Default is '2' - which means disable in promisc mode
  48 *                 and enable in non-promiscuous mode.
  49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  50 *      Possible values '1' for enable and '0' for disable. Default is '0'
  51 ************************************************************************/
  52
  53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54
  55#include <linux/module.h>
  56#include <linux/types.h>
  57#include <linux/errno.h>
  58#include <linux/ioport.h>
  59#include <linux/pci.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/kernel.h>
  62#include <linux/netdevice.h>
  63#include <linux/etherdevice.h>
  64#include <linux/mdio.h>
  65#include <linux/skbuff.h>
  66#include <linux/init.h>
  67#include <linux/delay.h>
  68#include <linux/stddef.h>
  69#include <linux/ioctl.h>
  70#include <linux/timex.h>
  71#include <linux/ethtool.h>
  72#include <linux/workqueue.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/uaccess.h>
  77#include <linux/io.h>
  78#include <linux/io-64-nonatomic-lo-hi.h>
  79#include <linux/slab.h>
  80#include <linux/prefetch.h>
  81#include <net/tcp.h>
  82#include <net/checksum.h>
  83
  84#include <asm/div64.h>
  85#include <asm/irq.h>
  86
  87/* local include */
  88#include "s2io.h"
  89#include "s2io-regs.h"
  90
  91#define DRV_VERSION "2.0.26.28"
  92
  93/* S2io Driver name & version. */
  94static const char s2io_driver_name[] = "Neterion";
  95static const char s2io_driver_version[] = DRV_VERSION;
  96
  97static const int rxd_size[2] = {32, 48};
  98static const int rxd_count[2] = {127, 85};
  99
 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 101{
 102	int ret;
 103
 104	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 105	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 106
 107	return ret;
 108}
 109
 110/*
 111 * Cards with following subsystem_id have a link state indication
 112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 113 * macro below identifies these cards given the subsystem_id.
 114 */
 115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
 116	(dev_type == XFRAME_I_DEVICE) ?					\
 117	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
 118	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 119
 120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 121				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 122
 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
 124{
 125	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 126}
 127
 128/* Ethtool related variables and Macros. */
 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 130	"Register test\t(offline)",
 131	"Eeprom test\t(offline)",
 132	"Link test\t(online)",
 133	"RLDRAM test\t(offline)",
 134	"BIST Test\t(offline)"
 135};
 136
 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 138	{"tmac_frms"},
 139	{"tmac_data_octets"},
 140	{"tmac_drop_frms"},
 141	{"tmac_mcst_frms"},
 142	{"tmac_bcst_frms"},
 143	{"tmac_pause_ctrl_frms"},
 144	{"tmac_ttl_octets"},
 145	{"tmac_ucst_frms"},
 146	{"tmac_nucst_frms"},
 147	{"tmac_any_err_frms"},
 148	{"tmac_ttl_less_fb_octets"},
 149	{"tmac_vld_ip_octets"},
 150	{"tmac_vld_ip"},
 151	{"tmac_drop_ip"},
 152	{"tmac_icmp"},
 153	{"tmac_rst_tcp"},
 154	{"tmac_tcp"},
 155	{"tmac_udp"},
 156	{"rmac_vld_frms"},
 157	{"rmac_data_octets"},
 158	{"rmac_fcs_err_frms"},
 159	{"rmac_drop_frms"},
 160	{"rmac_vld_mcst_frms"},
 161	{"rmac_vld_bcst_frms"},
 162	{"rmac_in_rng_len_err_frms"},
 163	{"rmac_out_rng_len_err_frms"},
 164	{"rmac_long_frms"},
 165	{"rmac_pause_ctrl_frms"},
 166	{"rmac_unsup_ctrl_frms"},
 167	{"rmac_ttl_octets"},
 168	{"rmac_accepted_ucst_frms"},
 169	{"rmac_accepted_nucst_frms"},
 170	{"rmac_discarded_frms"},
 171	{"rmac_drop_events"},
 172	{"rmac_ttl_less_fb_octets"},
 173	{"rmac_ttl_frms"},
 174	{"rmac_usized_frms"},
 175	{"rmac_osized_frms"},
 176	{"rmac_frag_frms"},
 177	{"rmac_jabber_frms"},
 178	{"rmac_ttl_64_frms"},
 179	{"rmac_ttl_65_127_frms"},
 180	{"rmac_ttl_128_255_frms"},
 181	{"rmac_ttl_256_511_frms"},
 182	{"rmac_ttl_512_1023_frms"},
 183	{"rmac_ttl_1024_1518_frms"},
 184	{"rmac_ip"},
 185	{"rmac_ip_octets"},
 186	{"rmac_hdr_err_ip"},
 187	{"rmac_drop_ip"},
 188	{"rmac_icmp"},
 189	{"rmac_tcp"},
 190	{"rmac_udp"},
 191	{"rmac_err_drp_udp"},
 192	{"rmac_xgmii_err_sym"},
 193	{"rmac_frms_q0"},
 194	{"rmac_frms_q1"},
 195	{"rmac_frms_q2"},
 196	{"rmac_frms_q3"},
 197	{"rmac_frms_q4"},
 198	{"rmac_frms_q5"},
 199	{"rmac_frms_q6"},
 200	{"rmac_frms_q7"},
 201	{"rmac_full_q0"},
 202	{"rmac_full_q1"},
 203	{"rmac_full_q2"},
 204	{"rmac_full_q3"},
 205	{"rmac_full_q4"},
 206	{"rmac_full_q5"},
 207	{"rmac_full_q6"},
 208	{"rmac_full_q7"},
 209	{"rmac_pause_cnt"},
 210	{"rmac_xgmii_data_err_cnt"},
 211	{"rmac_xgmii_ctrl_err_cnt"},
 212	{"rmac_accepted_ip"},
 213	{"rmac_err_tcp"},
 214	{"rd_req_cnt"},
 215	{"new_rd_req_cnt"},
 216	{"new_rd_req_rtry_cnt"},
 217	{"rd_rtry_cnt"},
 218	{"wr_rtry_rd_ack_cnt"},
 219	{"wr_req_cnt"},
 220	{"new_wr_req_cnt"},
 221	{"new_wr_req_rtry_cnt"},
 222	{"wr_rtry_cnt"},
 223	{"wr_disc_cnt"},
 224	{"rd_rtry_wr_ack_cnt"},
 225	{"txp_wr_cnt"},
 226	{"txd_rd_cnt"},
 227	{"txd_wr_cnt"},
 228	{"rxd_rd_cnt"},
 229	{"rxd_wr_cnt"},
 230	{"txf_rd_cnt"},
 231	{"rxf_wr_cnt"}
 232};
 233
 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 235	{"rmac_ttl_1519_4095_frms"},
 236	{"rmac_ttl_4096_8191_frms"},
 237	{"rmac_ttl_8192_max_frms"},
 238	{"rmac_ttl_gt_max_frms"},
 239	{"rmac_osized_alt_frms"},
 240	{"rmac_jabber_alt_frms"},
 241	{"rmac_gt_max_alt_frms"},
 242	{"rmac_vlan_frms"},
 243	{"rmac_len_discard"},
 244	{"rmac_fcs_discard"},
 245	{"rmac_pf_discard"},
 246	{"rmac_da_discard"},
 247	{"rmac_red_discard"},
 248	{"rmac_rts_discard"},
 249	{"rmac_ingm_full_discard"},
 250	{"link_fault_cnt"}
 251};
 252
 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 254	{"\n DRIVER STATISTICS"},
 255	{"single_bit_ecc_errs"},
 256	{"double_bit_ecc_errs"},
 257	{"parity_err_cnt"},
 258	{"serious_err_cnt"},
 259	{"soft_reset_cnt"},
 260	{"fifo_full_cnt"},
 261	{"ring_0_full_cnt"},
 262	{"ring_1_full_cnt"},
 263	{"ring_2_full_cnt"},
 264	{"ring_3_full_cnt"},
 265	{"ring_4_full_cnt"},
 266	{"ring_5_full_cnt"},
 267	{"ring_6_full_cnt"},
 268	{"ring_7_full_cnt"},
 269	{"alarm_transceiver_temp_high"},
 270	{"alarm_transceiver_temp_low"},
 271	{"alarm_laser_bias_current_high"},
 272	{"alarm_laser_bias_current_low"},
 273	{"alarm_laser_output_power_high"},
 274	{"alarm_laser_output_power_low"},
 275	{"warn_transceiver_temp_high"},
 276	{"warn_transceiver_temp_low"},
 277	{"warn_laser_bias_current_high"},
 278	{"warn_laser_bias_current_low"},
 279	{"warn_laser_output_power_high"},
 280	{"warn_laser_output_power_low"},
 281	{"lro_aggregated_pkts"},
 282	{"lro_flush_both_count"},
 283	{"lro_out_of_sequence_pkts"},
 284	{"lro_flush_due_to_max_pkts"},
 285	{"lro_avg_aggr_pkts"},
 286	{"mem_alloc_fail_cnt"},
 287	{"pci_map_fail_cnt"},
 288	{"watchdog_timer_cnt"},
 289	{"mem_allocated"},
 290	{"mem_freed"},
 291	{"link_up_cnt"},
 292	{"link_down_cnt"},
 293	{"link_up_time"},
 294	{"link_down_time"},
 295	{"tx_tcode_buf_abort_cnt"},
 296	{"tx_tcode_desc_abort_cnt"},
 297	{"tx_tcode_parity_err_cnt"},
 298	{"tx_tcode_link_loss_cnt"},
 299	{"tx_tcode_list_proc_err_cnt"},
 300	{"rx_tcode_parity_err_cnt"},
 301	{"rx_tcode_abort_cnt"},
 302	{"rx_tcode_parity_abort_cnt"},
 303	{"rx_tcode_rda_fail_cnt"},
 304	{"rx_tcode_unkn_prot_cnt"},
 305	{"rx_tcode_fcs_err_cnt"},
 306	{"rx_tcode_buf_size_err_cnt"},
 307	{"rx_tcode_rxd_corrupt_cnt"},
 308	{"rx_tcode_unkn_err_cnt"},
 309	{"tda_err_cnt"},
 310	{"pfc_err_cnt"},
 311	{"pcc_err_cnt"},
 312	{"tti_err_cnt"},
 313	{"tpa_err_cnt"},
 314	{"sm_err_cnt"},
 315	{"lso_err_cnt"},
 316	{"mac_tmac_err_cnt"},
 317	{"mac_rmac_err_cnt"},
 318	{"xgxs_txgxs_err_cnt"},
 319	{"xgxs_rxgxs_err_cnt"},
 320	{"rc_err_cnt"},
 321	{"prc_pcix_err_cnt"},
 322	{"rpa_err_cnt"},
 323	{"rda_err_cnt"},
 324	{"rti_err_cnt"},
 325	{"mc_err_cnt"}
 326};
 327
 328#define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
 329#define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
 330#define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
 331
 332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 334
 335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 337
 338#define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
 339#define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
 340
 
 
 
 
 
 
 341/* copy mac addr to def_mac_addr array */
 342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 343{
 344	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 345	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 346	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 347	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 348	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 349	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 350}
 351
 352/*
 353 * Constants to be programmed into the Xena's registers, to configure
 354 * the XAUI.
 355 */
 356
 357#define	END_SIGN	0x0
 358static const u64 herc_act_dtx_cfg[] = {
 359	/* Set address */
 360	0x8000051536750000ULL, 0x80000515367500E0ULL,
 361	/* Write data */
 362	0x8000051536750004ULL, 0x80000515367500E4ULL,
 363	/* Set address */
 364	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 365	/* Write data */
 366	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 367	/* Set address */
 368	0x801205150D440000ULL, 0x801205150D4400E0ULL,
 369	/* Write data */
 370	0x801205150D440004ULL, 0x801205150D4400E4ULL,
 371	/* Set address */
 372	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 373	/* Write data */
 374	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 375	/* Done */
 376	END_SIGN
 377};
 378
 379static const u64 xena_dtx_cfg[] = {
 380	/* Set address */
 381	0x8000051500000000ULL, 0x80000515000000E0ULL,
 382	/* Write data */
 383	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 384	/* Set address */
 385	0x8001051500000000ULL, 0x80010515000000E0ULL,
 386	/* Write data */
 387	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 388	/* Set address */
 389	0x8002051500000000ULL, 0x80020515000000E0ULL,
 390	/* Write data */
 391	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 392	END_SIGN
 393};
 394
 395/*
 396 * Constants for Fixing the MacAddress problem seen mostly on
 397 * Alpha machines.
 398 */
 399static const u64 fix_mac[] = {
 400	0x0060000000000000ULL, 0x0060600000000000ULL,
 401	0x0040600000000000ULL, 0x0000600000000000ULL,
 402	0x0020600000000000ULL, 0x0060600000000000ULL,
 403	0x0020600000000000ULL, 0x0060600000000000ULL,
 404	0x0020600000000000ULL, 0x0060600000000000ULL,
 405	0x0020600000000000ULL, 0x0060600000000000ULL,
 406	0x0020600000000000ULL, 0x0060600000000000ULL,
 407	0x0020600000000000ULL, 0x0060600000000000ULL,
 408	0x0020600000000000ULL, 0x0060600000000000ULL,
 409	0x0020600000000000ULL, 0x0060600000000000ULL,
 410	0x0020600000000000ULL, 0x0060600000000000ULL,
 411	0x0020600000000000ULL, 0x0060600000000000ULL,
 412	0x0020600000000000ULL, 0x0000600000000000ULL,
 413	0x0040600000000000ULL, 0x0060600000000000ULL,
 414	END_SIGN
 415};
 416
 417MODULE_LICENSE("GPL");
 418MODULE_VERSION(DRV_VERSION);
 419
 420
 421/* Module Loadable parameters. */
 422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 423S2IO_PARM_INT(rx_ring_num, 1);
 424S2IO_PARM_INT(multiq, 0);
 425S2IO_PARM_INT(rx_ring_mode, 1);
 426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 427S2IO_PARM_INT(rmac_pause_time, 0x100);
 428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 430S2IO_PARM_INT(shared_splits, 0);
 431S2IO_PARM_INT(tmac_util_period, 5);
 432S2IO_PARM_INT(rmac_util_period, 5);
 433S2IO_PARM_INT(l3l4hdr_size, 128);
 434/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 436/* Frequency of Rx desc syncs expressed as power of 2 */
 437S2IO_PARM_INT(rxsync_frequency, 3);
 438/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 439S2IO_PARM_INT(intr_type, 2);
 440/* Large receive offload feature */
 441
 442/* Max pkts to be aggregated by LRO at one time. If not specified,
 443 * aggregation happens until we hit max IP pkt size(64K)
 444 */
 445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 446S2IO_PARM_INT(indicate_max_pkts, 0);
 447
 448S2IO_PARM_INT(napi, 1);
 
 449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 450
 451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 455static unsigned int rts_frm_len[MAX_RX_RINGS] =
 456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 457
 458module_param_array(tx_fifo_len, uint, NULL, 0);
 459module_param_array(rx_ring_sz, uint, NULL, 0);
 460module_param_array(rts_frm_len, uint, NULL, 0);
 461
 462/*
 463 * S2IO device table.
 464 * This table lists all the devices that this driver supports.
 465 */
 466static const struct pci_device_id s2io_tbl[] = {
 467	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 468	 PCI_ANY_ID, PCI_ANY_ID},
 469	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 470	 PCI_ANY_ID, PCI_ANY_ID},
 471	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 472	 PCI_ANY_ID, PCI_ANY_ID},
 473	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 474	 PCI_ANY_ID, PCI_ANY_ID},
 475	{0,}
 476};
 477
 478MODULE_DEVICE_TABLE(pci, s2io_tbl);
 479
 480static const struct pci_error_handlers s2io_err_handler = {
 481	.error_detected = s2io_io_error_detected,
 482	.slot_reset = s2io_io_slot_reset,
 483	.resume = s2io_io_resume,
 484};
 485
 486static struct pci_driver s2io_driver = {
 487	.name = "S2IO",
 488	.id_table = s2io_tbl,
 489	.probe = s2io_init_nic,
 490	.remove = s2io_rem_nic,
 491	.err_handler = &s2io_err_handler,
 492};
 493
 494/* A simplifier macro used both by init and free shared_mem Fns(). */
 495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
 496
 497/* netqueue manipulation helper functions */
 498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 499{
 500	if (!sp->config.multiq) {
 501		int i;
 502
 503		for (i = 0; i < sp->config.tx_fifo_num; i++)
 504			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 505	}
 506	netif_tx_stop_all_queues(sp->dev);
 507}
 508
 509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 510{
 511	if (!sp->config.multiq)
 512		sp->mac_control.fifos[fifo_no].queue_state =
 513			FIFO_QUEUE_STOP;
 514
 515	netif_tx_stop_all_queues(sp->dev);
 516}
 517
 518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 519{
 520	if (!sp->config.multiq) {
 521		int i;
 522
 523		for (i = 0; i < sp->config.tx_fifo_num; i++)
 524			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 525	}
 526	netif_tx_start_all_queues(sp->dev);
 527}
 528
 
 
 
 
 
 
 
 
 
 529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 530{
 531	if (!sp->config.multiq) {
 532		int i;
 533
 534		for (i = 0; i < sp->config.tx_fifo_num; i++)
 535			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 536	}
 537	netif_tx_wake_all_queues(sp->dev);
 538}
 539
 540static inline void s2io_wake_tx_queue(
 541	struct fifo_info *fifo, int cnt, u8 multiq)
 542{
 543
 544	if (multiq) {
 545		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 546			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 547	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 548		if (netif_queue_stopped(fifo->dev)) {
 549			fifo->queue_state = FIFO_QUEUE_START;
 550			netif_wake_queue(fifo->dev);
 551		}
 552	}
 553}
 554
 555/**
 556 * init_shared_mem - Allocation and Initialization of Memory
 557 * @nic: Device private variable.
 558 * Description: The function allocates all the memory areas shared
 559 * between the NIC and the driver. This includes Tx descriptors,
 560 * Rx descriptors and the statistics block.
 561 */
 562
 563static int init_shared_mem(struct s2io_nic *nic)
 564{
 565	u32 size;
 566	void *tmp_v_addr, *tmp_v_addr_next;
 567	dma_addr_t tmp_p_addr, tmp_p_addr_next;
 568	struct RxD_block *pre_rxd_blk = NULL;
 569	int i, j, blk_cnt;
 570	int lst_size, lst_per_page;
 571	struct net_device *dev = nic->dev;
 572	unsigned long tmp;
 573	struct buffAdd *ba;
 574	struct config_param *config = &nic->config;
 575	struct mac_info *mac_control = &nic->mac_control;
 576	unsigned long long mem_allocated = 0;
 577
 578	/* Allocation and initialization of TXDLs in FIFOs */
 579	size = 0;
 580	for (i = 0; i < config->tx_fifo_num; i++) {
 581		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 582
 583		size += tx_cfg->fifo_len;
 584	}
 585	if (size > MAX_AVAILABLE_TXDS) {
 586		DBG_PRINT(ERR_DBG,
 587			  "Too many TxDs requested: %d, max supported: %d\n",
 588			  size, MAX_AVAILABLE_TXDS);
 589		return -EINVAL;
 590	}
 591
 592	size = 0;
 593	for (i = 0; i < config->tx_fifo_num; i++) {
 594		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 595
 596		size = tx_cfg->fifo_len;
 597		/*
 598		 * Legal values are from 2 to 8192
 599		 */
 600		if (size < 2) {
 601			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 602				  "Valid lengths are 2 through 8192\n",
 603				  i, size);
 604			return -EINVAL;
 605		}
 606	}
 607
 608	lst_size = (sizeof(struct TxD) * config->max_txds);
 609	lst_per_page = PAGE_SIZE / lst_size;
 610
 611	for (i = 0; i < config->tx_fifo_num; i++) {
 612		struct fifo_info *fifo = &mac_control->fifos[i];
 613		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 614		int fifo_len = tx_cfg->fifo_len;
 615		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 616
 617		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 618		if (!fifo->list_info) {
 619			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 620			return -ENOMEM;
 621		}
 622		mem_allocated += list_holder_size;
 623	}
 624	for (i = 0; i < config->tx_fifo_num; i++) {
 625		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 626						lst_per_page);
 627		struct fifo_info *fifo = &mac_control->fifos[i];
 628		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 629
 630		fifo->tx_curr_put_info.offset = 0;
 631		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 632		fifo->tx_curr_get_info.offset = 0;
 633		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 634		fifo->fifo_no = i;
 635		fifo->nic = nic;
 636		fifo->max_txds = MAX_SKB_FRAGS + 2;
 637		fifo->dev = dev;
 638
 639		for (j = 0; j < page_num; j++) {
 640			int k = 0;
 641			dma_addr_t tmp_p;
 642			void *tmp_v;
 643			tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
 644						   &tmp_p, GFP_KERNEL);
 645			if (!tmp_v) {
 646				DBG_PRINT(INFO_DBG,
 647					  "dma_alloc_coherent failed for TxDL\n");
 648				return -ENOMEM;
 649			}
 650			/* If we got a zero DMA address(can happen on
 651			 * certain platforms like PPC), reallocate.
 652			 * Store virtual address of page we don't want,
 653			 * to be freed later.
 654			 */
 655			if (!tmp_p) {
 656				mac_control->zerodma_virt_addr = tmp_v;
 657				DBG_PRINT(INIT_DBG,
 658					  "%s: Zero DMA address for TxDL. "
 659					  "Virtual address %p\n",
 660					  dev->name, tmp_v);
 661				tmp_v = dma_alloc_coherent(&nic->pdev->dev,
 662							   PAGE_SIZE, &tmp_p,
 663							   GFP_KERNEL);
 664				if (!tmp_v) {
 665					DBG_PRINT(INFO_DBG,
 666						  "dma_alloc_coherent failed for TxDL\n");
 667					return -ENOMEM;
 668				}
 669				mem_allocated += PAGE_SIZE;
 670			}
 671			while (k < lst_per_page) {
 672				int l = (j * lst_per_page) + k;
 673				if (l == tx_cfg->fifo_len)
 674					break;
 675				fifo->list_info[l].list_virt_addr =
 676					tmp_v + (k * lst_size);
 677				fifo->list_info[l].list_phy_addr =
 678					tmp_p + (k * lst_size);
 679				k++;
 680			}
 681		}
 682	}
 683
 684	for (i = 0; i < config->tx_fifo_num; i++) {
 685		struct fifo_info *fifo = &mac_control->fifos[i];
 686		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 687
 688		size = tx_cfg->fifo_len;
 689		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 690		if (!fifo->ufo_in_band_v)
 691			return -ENOMEM;
 692		mem_allocated += (size * sizeof(u64));
 693	}
 694
 695	/* Allocation and initialization of RXDs in Rings */
 696	size = 0;
 697	for (i = 0; i < config->rx_ring_num; i++) {
 698		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 699		struct ring_info *ring = &mac_control->rings[i];
 700
 701		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 702			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 703				  "multiple of RxDs per Block\n",
 704				  dev->name, i);
 705			return FAILURE;
 706		}
 707		size += rx_cfg->num_rxd;
 708		ring->block_count = rx_cfg->num_rxd /
 709			(rxd_count[nic->rxd_mode] + 1);
 710		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 711	}
 712	if (nic->rxd_mode == RXD_MODE_1)
 713		size = (size * (sizeof(struct RxD1)));
 714	else
 715		size = (size * (sizeof(struct RxD3)));
 716
 717	for (i = 0; i < config->rx_ring_num; i++) {
 718		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 719		struct ring_info *ring = &mac_control->rings[i];
 720
 721		ring->rx_curr_get_info.block_index = 0;
 722		ring->rx_curr_get_info.offset = 0;
 723		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 724		ring->rx_curr_put_info.block_index = 0;
 725		ring->rx_curr_put_info.offset = 0;
 726		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 727		ring->nic = nic;
 728		ring->ring_no = i;
 729
 730		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 731		/*  Allocating all the Rx blocks */
 732		for (j = 0; j < blk_cnt; j++) {
 733			struct rx_block_info *rx_blocks;
 734			int l;
 735
 736			rx_blocks = &ring->rx_blocks[j];
 737			size = SIZE_OF_BLOCK;	/* size is always page size */
 738			tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
 739							&tmp_p_addr, GFP_KERNEL);
 740			if (tmp_v_addr == NULL) {
 741				/*
 742				 * In case of failure, free_shared_mem()
 743				 * is called, which should free any
 744				 * memory that was alloced till the
 745				 * failure happened.
 746				 */
 747				rx_blocks->block_virt_addr = tmp_v_addr;
 748				return -ENOMEM;
 749			}
 750			mem_allocated += size;
 
 751
 752			size = sizeof(struct rxd_info) *
 753				rxd_count[nic->rxd_mode];
 754			rx_blocks->block_virt_addr = tmp_v_addr;
 755			rx_blocks->block_dma_addr = tmp_p_addr;
 756			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 757			if (!rx_blocks->rxds)
 758				return -ENOMEM;
 759			mem_allocated += size;
 760			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 761				rx_blocks->rxds[l].virt_addr =
 762					rx_blocks->block_virt_addr +
 763					(rxd_size[nic->rxd_mode] * l);
 764				rx_blocks->rxds[l].dma_addr =
 765					rx_blocks->block_dma_addr +
 766					(rxd_size[nic->rxd_mode] * l);
 767			}
 768		}
 769		/* Interlinking all Rx Blocks */
 770		for (j = 0; j < blk_cnt; j++) {
 771			int next = (j + 1) % blk_cnt;
 772			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 773			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 774			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 775			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 776
 777			pre_rxd_blk = tmp_v_addr;
 778			pre_rxd_blk->reserved_2_pNext_RxD_block =
 779				(unsigned long)tmp_v_addr_next;
 780			pre_rxd_blk->pNext_RxD_Blk_physical =
 781				(u64)tmp_p_addr_next;
 782		}
 783	}
 784	if (nic->rxd_mode == RXD_MODE_3B) {
 785		/*
 786		 * Allocation of Storages for buffer addresses in 2BUFF mode
 787		 * and the buffers as well.
 788		 */
 789		for (i = 0; i < config->rx_ring_num; i++) {
 790			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 791			struct ring_info *ring = &mac_control->rings[i];
 792
 793			blk_cnt = rx_cfg->num_rxd /
 794				(rxd_count[nic->rxd_mode] + 1);
 795			size = sizeof(struct buffAdd *) * blk_cnt;
 796			ring->ba = kmalloc(size, GFP_KERNEL);
 797			if (!ring->ba)
 798				return -ENOMEM;
 799			mem_allocated += size;
 800			for (j = 0; j < blk_cnt; j++) {
 801				int k = 0;
 802
 803				size = sizeof(struct buffAdd) *
 804					(rxd_count[nic->rxd_mode] + 1);
 805				ring->ba[j] = kmalloc(size, GFP_KERNEL);
 806				if (!ring->ba[j])
 807					return -ENOMEM;
 808				mem_allocated += size;
 809				while (k != rxd_count[nic->rxd_mode]) {
 810					ba = &ring->ba[j][k];
 811					size = BUF0_LEN + ALIGN_SIZE;
 812					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 813					if (!ba->ba_0_org)
 814						return -ENOMEM;
 815					mem_allocated += size;
 816					tmp = (unsigned long)ba->ba_0_org;
 817					tmp += ALIGN_SIZE;
 818					tmp &= ~((unsigned long)ALIGN_SIZE);
 819					ba->ba_0 = (void *)tmp;
 820
 821					size = BUF1_LEN + ALIGN_SIZE;
 822					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 823					if (!ba->ba_1_org)
 824						return -ENOMEM;
 825					mem_allocated += size;
 826					tmp = (unsigned long)ba->ba_1_org;
 827					tmp += ALIGN_SIZE;
 828					tmp &= ~((unsigned long)ALIGN_SIZE);
 829					ba->ba_1 = (void *)tmp;
 830					k++;
 831				}
 832			}
 833		}
 834	}
 835
 836	/* Allocation and initialization of Statistics block */
 837	size = sizeof(struct stat_block);
 838	mac_control->stats_mem =
 839		dma_alloc_coherent(&nic->pdev->dev, size,
 840				   &mac_control->stats_mem_phy, GFP_KERNEL);
 841
 842	if (!mac_control->stats_mem) {
 843		/*
 844		 * In case of failure, free_shared_mem() is called, which
 845		 * should free any memory that was alloced till the
 846		 * failure happened.
 847		 */
 848		return -ENOMEM;
 849	}
 850	mem_allocated += size;
 851	mac_control->stats_mem_sz = size;
 852
 853	tmp_v_addr = mac_control->stats_mem;
 854	mac_control->stats_info = tmp_v_addr;
 855	memset(tmp_v_addr, 0, size);
 856	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 857		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 858	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 859	return SUCCESS;
 860}
 861
 862/**
 863 * free_shared_mem - Free the allocated Memory
 864 * @nic:  Device private variable.
 865 * Description: This function is to free all memory locations allocated by
 866 * the init_shared_mem() function and return it to the kernel.
 867 */
 868
 869static void free_shared_mem(struct s2io_nic *nic)
 870{
 871	int i, j, blk_cnt, size;
 872	void *tmp_v_addr;
 873	dma_addr_t tmp_p_addr;
 874	int lst_size, lst_per_page;
 875	struct net_device *dev;
 876	int page_num = 0;
 877	struct config_param *config;
 878	struct mac_info *mac_control;
 879	struct stat_block *stats;
 880	struct swStat *swstats;
 881
 882	if (!nic)
 883		return;
 884
 885	dev = nic->dev;
 886
 887	config = &nic->config;
 888	mac_control = &nic->mac_control;
 889	stats = mac_control->stats_info;
 890	swstats = &stats->sw_stat;
 891
 892	lst_size = sizeof(struct TxD) * config->max_txds;
 893	lst_per_page = PAGE_SIZE / lst_size;
 894
 895	for (i = 0; i < config->tx_fifo_num; i++) {
 896		struct fifo_info *fifo = &mac_control->fifos[i];
 897		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 898
 899		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 900		for (j = 0; j < page_num; j++) {
 901			int mem_blks = (j * lst_per_page);
 902			struct list_info_hold *fli;
 903
 904			if (!fifo->list_info)
 905				return;
 906
 907			fli = &fifo->list_info[mem_blks];
 908			if (!fli->list_virt_addr)
 909				break;
 910			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 911					  fli->list_virt_addr,
 912					  fli->list_phy_addr);
 913			swstats->mem_freed += PAGE_SIZE;
 914		}
 915		/* If we got a zero DMA address during allocation,
 916		 * free the page now
 917		 */
 918		if (mac_control->zerodma_virt_addr) {
 919			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 920					  mac_control->zerodma_virt_addr,
 921					  (dma_addr_t)0);
 922			DBG_PRINT(INIT_DBG,
 923				  "%s: Freeing TxDL with zero DMA address. "
 924				  "Virtual address %p\n",
 925				  dev->name, mac_control->zerodma_virt_addr);
 926			swstats->mem_freed += PAGE_SIZE;
 927		}
 928		kfree(fifo->list_info);
 929		swstats->mem_freed += tx_cfg->fifo_len *
 930			sizeof(struct list_info_hold);
 931	}
 932
 933	size = SIZE_OF_BLOCK;
 934	for (i = 0; i < config->rx_ring_num; i++) {
 935		struct ring_info *ring = &mac_control->rings[i];
 936
 937		blk_cnt = ring->block_count;
 938		for (j = 0; j < blk_cnt; j++) {
 939			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 940			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 941			if (tmp_v_addr == NULL)
 942				break;
 943			dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
 944					  tmp_p_addr);
 945			swstats->mem_freed += size;
 946			kfree(ring->rx_blocks[j].rxds);
 947			swstats->mem_freed += sizeof(struct rxd_info) *
 948				rxd_count[nic->rxd_mode];
 949		}
 950	}
 951
 952	if (nic->rxd_mode == RXD_MODE_3B) {
 953		/* Freeing buffer storage addresses in 2BUFF mode. */
 954		for (i = 0; i < config->rx_ring_num; i++) {
 955			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 956			struct ring_info *ring = &mac_control->rings[i];
 957
 958			blk_cnt = rx_cfg->num_rxd /
 959				(rxd_count[nic->rxd_mode] + 1);
 960			for (j = 0; j < blk_cnt; j++) {
 961				int k = 0;
 962				if (!ring->ba[j])
 963					continue;
 964				while (k != rxd_count[nic->rxd_mode]) {
 965					struct buffAdd *ba = &ring->ba[j][k];
 966					kfree(ba->ba_0_org);
 967					swstats->mem_freed +=
 968						BUF0_LEN + ALIGN_SIZE;
 969					kfree(ba->ba_1_org);
 970					swstats->mem_freed +=
 971						BUF1_LEN + ALIGN_SIZE;
 972					k++;
 973				}
 974				kfree(ring->ba[j]);
 975				swstats->mem_freed += sizeof(struct buffAdd) *
 976					(rxd_count[nic->rxd_mode] + 1);
 977			}
 978			kfree(ring->ba);
 979			swstats->mem_freed += sizeof(struct buffAdd *) *
 980				blk_cnt;
 981		}
 982	}
 983
 984	for (i = 0; i < nic->config.tx_fifo_num; i++) {
 985		struct fifo_info *fifo = &mac_control->fifos[i];
 986		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 987
 988		if (fifo->ufo_in_band_v) {
 989			swstats->mem_freed += tx_cfg->fifo_len *
 990				sizeof(u64);
 991			kfree(fifo->ufo_in_band_v);
 992		}
 993	}
 994
 995	if (mac_control->stats_mem) {
 996		swstats->mem_freed += mac_control->stats_mem_sz;
 997		dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
 998				  mac_control->stats_mem,
 999				  mac_control->stats_mem_phy);
 
1000	}
1001}
1002
1003/**
1004 * s2io_verify_pci_mode -
1005 */
1006
1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008{
1009	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010	register u64 val64 = 0;
1011	int     mode;
1012
1013	val64 = readq(&bar0->pci_mode);
1014	mode = (u8)GET_PCI_MODE(val64);
1015
1016	if (val64 & PCI_MODE_UNKNOWN_MODE)
1017		return -1;      /* Unknown PCI mode */
1018	return mode;
1019}
1020
1021#define NEC_VENID   0x1033
1022#define NEC_DEVID   0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025	struct pci_dev *tdev = NULL;
1026	for_each_pci_dev(tdev) {
1027		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028			if (tdev->bus == s2io_pdev->bus->parent) {
1029				pci_dev_put(tdev);
1030				return 1;
1031			}
1032		}
1033	}
1034	return 0;
1035}
1036
1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038/**
1039 * s2io_print_pci_mode -
1040 */
1041static int s2io_print_pci_mode(struct s2io_nic *nic)
1042{
1043	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044	register u64 val64 = 0;
1045	int	mode;
1046	struct config_param *config = &nic->config;
1047	const char *pcimode;
1048
1049	val64 = readq(&bar0->pci_mode);
1050	mode = (u8)GET_PCI_MODE(val64);
1051
1052	if (val64 & PCI_MODE_UNKNOWN_MODE)
1053		return -1;	/* Unknown PCI mode */
1054
1055	config->bus_speed = bus_speed[mode];
1056
1057	if (s2io_on_nec_bridge(nic->pdev)) {
1058		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059			  nic->dev->name);
1060		return mode;
1061	}
1062
1063	switch (mode) {
1064	case PCI_MODE_PCI_33:
1065		pcimode = "33MHz PCI bus";
1066		break;
1067	case PCI_MODE_PCI_66:
1068		pcimode = "66MHz PCI bus";
1069		break;
1070	case PCI_MODE_PCIX_M1_66:
1071		pcimode = "66MHz PCIX(M1) bus";
1072		break;
1073	case PCI_MODE_PCIX_M1_100:
1074		pcimode = "100MHz PCIX(M1) bus";
1075		break;
1076	case PCI_MODE_PCIX_M1_133:
1077		pcimode = "133MHz PCIX(M1) bus";
1078		break;
1079	case PCI_MODE_PCIX_M2_66:
1080		pcimode = "133MHz PCIX(M2) bus";
1081		break;
1082	case PCI_MODE_PCIX_M2_100:
1083		pcimode = "200MHz PCIX(M2) bus";
1084		break;
1085	case PCI_MODE_PCIX_M2_133:
1086		pcimode = "266MHz PCIX(M2) bus";
1087		break;
1088	default:
1089		pcimode = "unsupported bus!";
1090		mode = -1;
1091	}
1092
1093	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
1096	return mode;
1097}
1098
1099/**
1100 *  init_tti - Initialization transmit traffic interrupt scheme
1101 *  @nic: device private variable
1102 *  @link: link status (UP/DOWN) used to enable/disable continuous
1103 *  transmit interrupts
1104 *  Description: The function configures transmit traffic interrupts
1105 *  Return Value:  SUCCESS on success and
1106 *  '-1' on failure
1107 */
1108
1109static int init_tti(struct s2io_nic *nic, int link)
1110{
1111	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1112	register u64 val64 = 0;
1113	int i;
1114	struct config_param *config = &nic->config;
1115
1116	for (i = 0; i < config->tx_fifo_num; i++) {
1117		/*
1118		 * TTI Initialization. Default Tx timer gets us about
1119		 * 250 interrupts per sec. Continuous interrupts are enabled
1120		 * by default.
1121		 */
1122		if (nic->device_type == XFRAME_II_DEVICE) {
1123			int count = (nic->config.bus_speed * 125)/2;
1124			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1125		} else
1126			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1127
1128		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1129			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1130			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1131			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1132		if (i == 0)
1133			if (use_continuous_tx_intrs && (link == LINK_UP))
1134				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1135		writeq(val64, &bar0->tti_data1_mem);
1136
1137		if (nic->config.intr_type == MSI_X) {
1138			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1139				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1140				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1141				TTI_DATA2_MEM_TX_UFC_D(0x300);
1142		} else {
1143			if ((nic->config.tx_steering_type ==
1144			     TX_DEFAULT_STEERING) &&
1145			    (config->tx_fifo_num > 1) &&
1146			    (i >= nic->udp_fifo_idx) &&
1147			    (i < (nic->udp_fifo_idx +
1148				  nic->total_udp_fifos)))
1149				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1150					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1151					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1152					TTI_DATA2_MEM_TX_UFC_D(0x120);
1153			else
1154				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1155					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1156					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1157					TTI_DATA2_MEM_TX_UFC_D(0x80);
1158		}
1159
1160		writeq(val64, &bar0->tti_data2_mem);
1161
1162		val64 = TTI_CMD_MEM_WE |
1163			TTI_CMD_MEM_STROBE_NEW_CMD |
1164			TTI_CMD_MEM_OFFSET(i);
1165		writeq(val64, &bar0->tti_command_mem);
1166
1167		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1168					  TTI_CMD_MEM_STROBE_NEW_CMD,
1169					  S2IO_BIT_RESET) != SUCCESS)
1170			return FAILURE;
1171	}
1172
1173	return SUCCESS;
1174}
1175
1176/**
1177 *  init_nic - Initialization of hardware
1178 *  @nic: device private variable
1179 *  Description: The function sequentially configures every block
1180 *  of the H/W from their reset values.
1181 *  Return Value:  SUCCESS on success and
1182 *  '-1' on failure (endian settings incorrect).
1183 */
1184
1185static int init_nic(struct s2io_nic *nic)
1186{
1187	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1188	struct net_device *dev = nic->dev;
1189	register u64 val64 = 0;
1190	void __iomem *add;
1191	u32 time;
1192	int i, j;
1193	int dtx_cnt = 0;
1194	unsigned long long mem_share;
1195	int mem_size;
1196	struct config_param *config = &nic->config;
1197	struct mac_info *mac_control = &nic->mac_control;
1198
1199	/* to set the swapper controle on the card */
1200	if (s2io_set_swapper(nic)) {
1201		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1202		return -EIO;
1203	}
1204
1205	/*
1206	 * Herc requires EOI to be removed from reset before XGXS, so..
1207	 */
1208	if (nic->device_type & XFRAME_II_DEVICE) {
1209		val64 = 0xA500000000ULL;
1210		writeq(val64, &bar0->sw_reset);
1211		msleep(500);
1212		val64 = readq(&bar0->sw_reset);
1213	}
1214
1215	/* Remove XGXS from reset state */
1216	val64 = 0;
1217	writeq(val64, &bar0->sw_reset);
1218	msleep(500);
1219	val64 = readq(&bar0->sw_reset);
1220
1221	/* Ensure that it's safe to access registers by checking
1222	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1223	 */
1224	if (nic->device_type == XFRAME_II_DEVICE) {
1225		for (i = 0; i < 50; i++) {
1226			val64 = readq(&bar0->adapter_status);
1227			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1228				break;
1229			msleep(10);
1230		}
1231		if (i == 50)
1232			return -ENODEV;
1233	}
1234
1235	/*  Enable Receiving broadcasts */
1236	add = &bar0->mac_cfg;
1237	val64 = readq(&bar0->mac_cfg);
1238	val64 |= MAC_RMAC_BCAST_ENABLE;
1239	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1240	writel((u32)val64, add);
1241	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242	writel((u32) (val64 >> 32), (add + 4));
1243
1244	/* Read registers in all blocks */
1245	val64 = readq(&bar0->mac_int_mask);
1246	val64 = readq(&bar0->mc_int_mask);
1247	val64 = readq(&bar0->xgxs_int_mask);
1248
1249	/*  Set MTU */
1250	val64 = dev->mtu;
1251	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1252
1253	if (nic->device_type & XFRAME_II_DEVICE) {
1254		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1255			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1256					  &bar0->dtx_control, UF);
1257			if (dtx_cnt & 0x1)
1258				msleep(1); /* Necessary!! */
1259			dtx_cnt++;
1260		}
1261	} else {
1262		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1263			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1264					  &bar0->dtx_control, UF);
1265			val64 = readq(&bar0->dtx_control);
1266			dtx_cnt++;
1267		}
1268	}
1269
1270	/*  Tx DMA Initialization */
1271	val64 = 0;
1272	writeq(val64, &bar0->tx_fifo_partition_0);
1273	writeq(val64, &bar0->tx_fifo_partition_1);
1274	writeq(val64, &bar0->tx_fifo_partition_2);
1275	writeq(val64, &bar0->tx_fifo_partition_3);
1276
1277	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1278		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1279
1280		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1281			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1282
1283		if (i == (config->tx_fifo_num - 1)) {
1284			if (i % 2 == 0)
1285				i++;
1286		}
1287
1288		switch (i) {
1289		case 1:
1290			writeq(val64, &bar0->tx_fifo_partition_0);
1291			val64 = 0;
1292			j = 0;
1293			break;
1294		case 3:
1295			writeq(val64, &bar0->tx_fifo_partition_1);
1296			val64 = 0;
1297			j = 0;
1298			break;
1299		case 5:
1300			writeq(val64, &bar0->tx_fifo_partition_2);
1301			val64 = 0;
1302			j = 0;
1303			break;
1304		case 7:
1305			writeq(val64, &bar0->tx_fifo_partition_3);
1306			val64 = 0;
1307			j = 0;
1308			break;
1309		default:
1310			j++;
1311			break;
1312		}
1313	}
1314
1315	/*
1316	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1317	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1318	 */
1319	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1320		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1321
1322	val64 = readq(&bar0->tx_fifo_partition_0);
1323	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1324		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1325
1326	/*
1327	 * Initialization of Tx_PA_CONFIG register to ignore packet
1328	 * integrity checking.
1329	 */
1330	val64 = readq(&bar0->tx_pa_cfg);
1331	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1332		TX_PA_CFG_IGNORE_SNAP_OUI |
1333		TX_PA_CFG_IGNORE_LLC_CTRL |
1334		TX_PA_CFG_IGNORE_L2_ERR;
1335	writeq(val64, &bar0->tx_pa_cfg);
1336
1337	/* Rx DMA initialization. */
1338	val64 = 0;
1339	for (i = 0; i < config->rx_ring_num; i++) {
1340		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1341
1342		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1343	}
1344	writeq(val64, &bar0->rx_queue_priority);
1345
1346	/*
1347	 * Allocating equal share of memory to all the
1348	 * configured Rings.
1349	 */
1350	val64 = 0;
1351	if (nic->device_type & XFRAME_II_DEVICE)
1352		mem_size = 32;
1353	else
1354		mem_size = 64;
1355
1356	for (i = 0; i < config->rx_ring_num; i++) {
1357		switch (i) {
1358		case 0:
1359			mem_share = (mem_size / config->rx_ring_num +
1360				     mem_size % config->rx_ring_num);
1361			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1362			continue;
1363		case 1:
1364			mem_share = (mem_size / config->rx_ring_num);
1365			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1366			continue;
1367		case 2:
1368			mem_share = (mem_size / config->rx_ring_num);
1369			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1370			continue;
1371		case 3:
1372			mem_share = (mem_size / config->rx_ring_num);
1373			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1374			continue;
1375		case 4:
1376			mem_share = (mem_size / config->rx_ring_num);
1377			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1378			continue;
1379		case 5:
1380			mem_share = (mem_size / config->rx_ring_num);
1381			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1382			continue;
1383		case 6:
1384			mem_share = (mem_size / config->rx_ring_num);
1385			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1386			continue;
1387		case 7:
1388			mem_share = (mem_size / config->rx_ring_num);
1389			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1390			continue;
1391		}
1392	}
1393	writeq(val64, &bar0->rx_queue_cfg);
1394
1395	/*
1396	 * Filling Tx round robin registers
1397	 * as per the number of FIFOs for equal scheduling priority
1398	 */
1399	switch (config->tx_fifo_num) {
1400	case 1:
1401		val64 = 0x0;
1402		writeq(val64, &bar0->tx_w_round_robin_0);
1403		writeq(val64, &bar0->tx_w_round_robin_1);
1404		writeq(val64, &bar0->tx_w_round_robin_2);
1405		writeq(val64, &bar0->tx_w_round_robin_3);
1406		writeq(val64, &bar0->tx_w_round_robin_4);
1407		break;
1408	case 2:
1409		val64 = 0x0001000100010001ULL;
1410		writeq(val64, &bar0->tx_w_round_robin_0);
1411		writeq(val64, &bar0->tx_w_round_robin_1);
1412		writeq(val64, &bar0->tx_w_round_robin_2);
1413		writeq(val64, &bar0->tx_w_round_robin_3);
1414		val64 = 0x0001000100000000ULL;
1415		writeq(val64, &bar0->tx_w_round_robin_4);
1416		break;
1417	case 3:
1418		val64 = 0x0001020001020001ULL;
1419		writeq(val64, &bar0->tx_w_round_robin_0);
1420		val64 = 0x0200010200010200ULL;
1421		writeq(val64, &bar0->tx_w_round_robin_1);
1422		val64 = 0x0102000102000102ULL;
1423		writeq(val64, &bar0->tx_w_round_robin_2);
1424		val64 = 0x0001020001020001ULL;
1425		writeq(val64, &bar0->tx_w_round_robin_3);
1426		val64 = 0x0200010200000000ULL;
1427		writeq(val64, &bar0->tx_w_round_robin_4);
1428		break;
1429	case 4:
1430		val64 = 0x0001020300010203ULL;
1431		writeq(val64, &bar0->tx_w_round_robin_0);
1432		writeq(val64, &bar0->tx_w_round_robin_1);
1433		writeq(val64, &bar0->tx_w_round_robin_2);
1434		writeq(val64, &bar0->tx_w_round_robin_3);
1435		val64 = 0x0001020300000000ULL;
1436		writeq(val64, &bar0->tx_w_round_robin_4);
1437		break;
1438	case 5:
1439		val64 = 0x0001020304000102ULL;
1440		writeq(val64, &bar0->tx_w_round_robin_0);
1441		val64 = 0x0304000102030400ULL;
1442		writeq(val64, &bar0->tx_w_round_robin_1);
1443		val64 = 0x0102030400010203ULL;
1444		writeq(val64, &bar0->tx_w_round_robin_2);
1445		val64 = 0x0400010203040001ULL;
1446		writeq(val64, &bar0->tx_w_round_robin_3);
1447		val64 = 0x0203040000000000ULL;
1448		writeq(val64, &bar0->tx_w_round_robin_4);
1449		break;
1450	case 6:
1451		val64 = 0x0001020304050001ULL;
1452		writeq(val64, &bar0->tx_w_round_robin_0);
1453		val64 = 0x0203040500010203ULL;
1454		writeq(val64, &bar0->tx_w_round_robin_1);
1455		val64 = 0x0405000102030405ULL;
1456		writeq(val64, &bar0->tx_w_round_robin_2);
1457		val64 = 0x0001020304050001ULL;
1458		writeq(val64, &bar0->tx_w_round_robin_3);
1459		val64 = 0x0203040500000000ULL;
1460		writeq(val64, &bar0->tx_w_round_robin_4);
1461		break;
1462	case 7:
1463		val64 = 0x0001020304050600ULL;
1464		writeq(val64, &bar0->tx_w_round_robin_0);
1465		val64 = 0x0102030405060001ULL;
1466		writeq(val64, &bar0->tx_w_round_robin_1);
1467		val64 = 0x0203040506000102ULL;
1468		writeq(val64, &bar0->tx_w_round_robin_2);
1469		val64 = 0x0304050600010203ULL;
1470		writeq(val64, &bar0->tx_w_round_robin_3);
1471		val64 = 0x0405060000000000ULL;
1472		writeq(val64, &bar0->tx_w_round_robin_4);
1473		break;
1474	case 8:
1475		val64 = 0x0001020304050607ULL;
1476		writeq(val64, &bar0->tx_w_round_robin_0);
1477		writeq(val64, &bar0->tx_w_round_robin_1);
1478		writeq(val64, &bar0->tx_w_round_robin_2);
1479		writeq(val64, &bar0->tx_w_round_robin_3);
1480		val64 = 0x0001020300000000ULL;
1481		writeq(val64, &bar0->tx_w_round_robin_4);
1482		break;
1483	}
1484
1485	/* Enable all configured Tx FIFO partitions */
1486	val64 = readq(&bar0->tx_fifo_partition_0);
1487	val64 |= (TX_FIFO_PARTITION_EN);
1488	writeq(val64, &bar0->tx_fifo_partition_0);
1489
1490	/* Filling the Rx round robin registers as per the
1491	 * number of Rings and steering based on QoS with
1492	 * equal priority.
1493	 */
1494	switch (config->rx_ring_num) {
1495	case 1:
1496		val64 = 0x0;
1497		writeq(val64, &bar0->rx_w_round_robin_0);
1498		writeq(val64, &bar0->rx_w_round_robin_1);
1499		writeq(val64, &bar0->rx_w_round_robin_2);
1500		writeq(val64, &bar0->rx_w_round_robin_3);
1501		writeq(val64, &bar0->rx_w_round_robin_4);
1502
1503		val64 = 0x8080808080808080ULL;
1504		writeq(val64, &bar0->rts_qos_steering);
1505		break;
1506	case 2:
1507		val64 = 0x0001000100010001ULL;
1508		writeq(val64, &bar0->rx_w_round_robin_0);
1509		writeq(val64, &bar0->rx_w_round_robin_1);
1510		writeq(val64, &bar0->rx_w_round_robin_2);
1511		writeq(val64, &bar0->rx_w_round_robin_3);
1512		val64 = 0x0001000100000000ULL;
1513		writeq(val64, &bar0->rx_w_round_robin_4);
1514
1515		val64 = 0x8080808040404040ULL;
1516		writeq(val64, &bar0->rts_qos_steering);
1517		break;
1518	case 3:
1519		val64 = 0x0001020001020001ULL;
1520		writeq(val64, &bar0->rx_w_round_robin_0);
1521		val64 = 0x0200010200010200ULL;
1522		writeq(val64, &bar0->rx_w_round_robin_1);
1523		val64 = 0x0102000102000102ULL;
1524		writeq(val64, &bar0->rx_w_round_robin_2);
1525		val64 = 0x0001020001020001ULL;
1526		writeq(val64, &bar0->rx_w_round_robin_3);
1527		val64 = 0x0200010200000000ULL;
1528		writeq(val64, &bar0->rx_w_round_robin_4);
1529
1530		val64 = 0x8080804040402020ULL;
1531		writeq(val64, &bar0->rts_qos_steering);
1532		break;
1533	case 4:
1534		val64 = 0x0001020300010203ULL;
1535		writeq(val64, &bar0->rx_w_round_robin_0);
1536		writeq(val64, &bar0->rx_w_round_robin_1);
1537		writeq(val64, &bar0->rx_w_round_robin_2);
1538		writeq(val64, &bar0->rx_w_round_robin_3);
1539		val64 = 0x0001020300000000ULL;
1540		writeq(val64, &bar0->rx_w_round_robin_4);
1541
1542		val64 = 0x8080404020201010ULL;
1543		writeq(val64, &bar0->rts_qos_steering);
1544		break;
1545	case 5:
1546		val64 = 0x0001020304000102ULL;
1547		writeq(val64, &bar0->rx_w_round_robin_0);
1548		val64 = 0x0304000102030400ULL;
1549		writeq(val64, &bar0->rx_w_round_robin_1);
1550		val64 = 0x0102030400010203ULL;
1551		writeq(val64, &bar0->rx_w_round_robin_2);
1552		val64 = 0x0400010203040001ULL;
1553		writeq(val64, &bar0->rx_w_round_robin_3);
1554		val64 = 0x0203040000000000ULL;
1555		writeq(val64, &bar0->rx_w_round_robin_4);
1556
1557		val64 = 0x8080404020201008ULL;
1558		writeq(val64, &bar0->rts_qos_steering);
1559		break;
1560	case 6:
1561		val64 = 0x0001020304050001ULL;
1562		writeq(val64, &bar0->rx_w_round_robin_0);
1563		val64 = 0x0203040500010203ULL;
1564		writeq(val64, &bar0->rx_w_round_robin_1);
1565		val64 = 0x0405000102030405ULL;
1566		writeq(val64, &bar0->rx_w_round_robin_2);
1567		val64 = 0x0001020304050001ULL;
1568		writeq(val64, &bar0->rx_w_round_robin_3);
1569		val64 = 0x0203040500000000ULL;
1570		writeq(val64, &bar0->rx_w_round_robin_4);
1571
1572		val64 = 0x8080404020100804ULL;
1573		writeq(val64, &bar0->rts_qos_steering);
1574		break;
1575	case 7:
1576		val64 = 0x0001020304050600ULL;
1577		writeq(val64, &bar0->rx_w_round_robin_0);
1578		val64 = 0x0102030405060001ULL;
1579		writeq(val64, &bar0->rx_w_round_robin_1);
1580		val64 = 0x0203040506000102ULL;
1581		writeq(val64, &bar0->rx_w_round_robin_2);
1582		val64 = 0x0304050600010203ULL;
1583		writeq(val64, &bar0->rx_w_round_robin_3);
1584		val64 = 0x0405060000000000ULL;
1585		writeq(val64, &bar0->rx_w_round_robin_4);
1586
1587		val64 = 0x8080402010080402ULL;
1588		writeq(val64, &bar0->rts_qos_steering);
1589		break;
1590	case 8:
1591		val64 = 0x0001020304050607ULL;
1592		writeq(val64, &bar0->rx_w_round_robin_0);
1593		writeq(val64, &bar0->rx_w_round_robin_1);
1594		writeq(val64, &bar0->rx_w_round_robin_2);
1595		writeq(val64, &bar0->rx_w_round_robin_3);
1596		val64 = 0x0001020300000000ULL;
1597		writeq(val64, &bar0->rx_w_round_robin_4);
1598
1599		val64 = 0x8040201008040201ULL;
1600		writeq(val64, &bar0->rts_qos_steering);
1601		break;
1602	}
1603
1604	/* UDP Fix */
1605	val64 = 0;
1606	for (i = 0; i < 8; i++)
1607		writeq(val64, &bar0->rts_frm_len_n[i]);
1608
1609	/* Set the default rts frame length for the rings configured */
1610	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1611	for (i = 0 ; i < config->rx_ring_num ; i++)
1612		writeq(val64, &bar0->rts_frm_len_n[i]);
1613
1614	/* Set the frame length for the configured rings
1615	 * desired by the user
1616	 */
1617	for (i = 0; i < config->rx_ring_num; i++) {
1618		/* If rts_frm_len[i] == 0 then it is assumed that user not
1619		 * specified frame length steering.
1620		 * If the user provides the frame length then program
1621		 * the rts_frm_len register for those values or else
1622		 * leave it as it is.
1623		 */
1624		if (rts_frm_len[i] != 0) {
1625			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1626			       &bar0->rts_frm_len_n[i]);
1627		}
1628	}
1629
1630	/* Disable differentiated services steering logic */
1631	for (i = 0; i < 64; i++) {
1632		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1633			DBG_PRINT(ERR_DBG,
1634				  "%s: rts_ds_steer failed on codepoint %d\n",
1635				  dev->name, i);
1636			return -ENODEV;
1637		}
1638	}
1639
1640	/* Program statistics memory */
1641	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1642
1643	if (nic->device_type == XFRAME_II_DEVICE) {
1644		val64 = STAT_BC(0x320);
1645		writeq(val64, &bar0->stat_byte_cnt);
1646	}
1647
1648	/*
1649	 * Initializing the sampling rate for the device to calculate the
1650	 * bandwidth utilization.
1651	 */
1652	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1653		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1654	writeq(val64, &bar0->mac_link_util);
1655
1656	/*
1657	 * Initializing the Transmit and Receive Traffic Interrupt
1658	 * Scheme.
1659	 */
1660
1661	/* Initialize TTI */
1662	if (SUCCESS != init_tti(nic, nic->last_link_state))
1663		return -ENODEV;
1664
1665	/* RTI Initialization */
1666	if (nic->device_type == XFRAME_II_DEVICE) {
1667		/*
1668		 * Programmed to generate Apprx 500 Intrs per
1669		 * second
1670		 */
1671		int count = (nic->config.bus_speed * 125)/4;
1672		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1673	} else
1674		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1675	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1676		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1677		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1678		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1679
1680	writeq(val64, &bar0->rti_data1_mem);
1681
1682	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1683		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1684	if (nic->config.intr_type == MSI_X)
1685		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1686			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1687	else
1688		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1689			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1690	writeq(val64, &bar0->rti_data2_mem);
1691
1692	for (i = 0; i < config->rx_ring_num; i++) {
1693		val64 = RTI_CMD_MEM_WE |
1694			RTI_CMD_MEM_STROBE_NEW_CMD |
1695			RTI_CMD_MEM_OFFSET(i);
1696		writeq(val64, &bar0->rti_command_mem);
1697
1698		/*
1699		 * Once the operation completes, the Strobe bit of the
1700		 * command register will be reset. We poll for this
1701		 * particular condition. We wait for a maximum of 500ms
1702		 * for the operation to complete, if it's not complete
1703		 * by then we return error.
1704		 */
1705		time = 0;
1706		while (true) {
1707			val64 = readq(&bar0->rti_command_mem);
1708			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1709				break;
1710
1711			if (time > 10) {
1712				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1713					  dev->name);
1714				return -ENODEV;
1715			}
1716			time++;
1717			msleep(50);
1718		}
1719	}
1720
1721	/*
1722	 * Initializing proper values as Pause threshold into all
1723	 * the 8 Queues on Rx side.
1724	 */
1725	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1726	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1727
1728	/* Disable RMAC PAD STRIPPING */
1729	add = &bar0->mac_cfg;
1730	val64 = readq(&bar0->mac_cfg);
1731	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1732	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1733	writel((u32) (val64), add);
1734	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735	writel((u32) (val64 >> 32), (add + 4));
1736	val64 = readq(&bar0->mac_cfg);
1737
1738	/* Enable FCS stripping by adapter */
1739	add = &bar0->mac_cfg;
1740	val64 = readq(&bar0->mac_cfg);
1741	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1742	if (nic->device_type == XFRAME_II_DEVICE)
1743		writeq(val64, &bar0->mac_cfg);
1744	else {
1745		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1746		writel((u32) (val64), add);
1747		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748		writel((u32) (val64 >> 32), (add + 4));
1749	}
1750
1751	/*
1752	 * Set the time value to be inserted in the pause frame
1753	 * generated by xena.
1754	 */
1755	val64 = readq(&bar0->rmac_pause_cfg);
1756	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1757	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1758	writeq(val64, &bar0->rmac_pause_cfg);
1759
1760	/*
1761	 * Set the Threshold Limit for Generating the pause frame
1762	 * If the amount of data in any Queue exceeds ratio of
1763	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1764	 * pause frame is generated
1765	 */
1766	val64 = 0;
1767	for (i = 0; i < 4; i++) {
1768		val64 |= (((u64)0xFF00 |
1769			   nic->mac_control.mc_pause_threshold_q0q3)
1770			  << (i * 2 * 8));
1771	}
1772	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1773
1774	val64 = 0;
1775	for (i = 0; i < 4; i++) {
1776		val64 |= (((u64)0xFF00 |
1777			   nic->mac_control.mc_pause_threshold_q4q7)
1778			  << (i * 2 * 8));
1779	}
1780	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1781
1782	/*
1783	 * TxDMA will stop Read request if the number of read split has
1784	 * exceeded the limit pointed by shared_splits
1785	 */
1786	val64 = readq(&bar0->pic_control);
1787	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1788	writeq(val64, &bar0->pic_control);
1789
1790	if (nic->config.bus_speed == 266) {
1791		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1792		writeq(0x0, &bar0->read_retry_delay);
1793		writeq(0x0, &bar0->write_retry_delay);
1794	}
1795
1796	/*
1797	 * Programming the Herc to split every write transaction
1798	 * that does not start on an ADB to reduce disconnects.
1799	 */
1800	if (nic->device_type == XFRAME_II_DEVICE) {
1801		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1802			MISC_LINK_STABILITY_PRD(3);
1803		writeq(val64, &bar0->misc_control);
1804		val64 = readq(&bar0->pic_control2);
1805		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1806		writeq(val64, &bar0->pic_control2);
1807	}
1808	if (strstr(nic->product_name, "CX4")) {
1809		val64 = TMAC_AVG_IPG(0x17);
1810		writeq(val64, &bar0->tmac_avg_ipg);
1811	}
1812
1813	return SUCCESS;
1814}
1815#define LINK_UP_DOWN_INTERRUPT		1
1816#define MAC_RMAC_ERR_TIMER		2
1817
1818static int s2io_link_fault_indication(struct s2io_nic *nic)
1819{
1820	if (nic->device_type == XFRAME_II_DEVICE)
1821		return LINK_UP_DOWN_INTERRUPT;
1822	else
1823		return MAC_RMAC_ERR_TIMER;
1824}
1825
1826/**
1827 *  do_s2io_write_bits -  update alarm bits in alarm register
1828 *  @value: alarm bits
1829 *  @flag: interrupt status
1830 *  @addr: address value
1831 *  Description: update alarm bits in alarm register
1832 *  Return Value:
1833 *  NONE.
1834 */
1835static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1836{
1837	u64 temp64;
1838
1839	temp64 = readq(addr);
1840
1841	if (flag == ENABLE_INTRS)
1842		temp64 &= ~((u64)value);
1843	else
1844		temp64 |= ((u64)value);
1845	writeq(temp64, addr);
1846}
1847
1848static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1849{
1850	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1851	register u64 gen_int_mask = 0;
1852	u64 interruptible;
1853
1854	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1855	if (mask & TX_DMA_INTR) {
1856		gen_int_mask |= TXDMA_INT_M;
1857
1858		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1859				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1860				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1861				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1862
1863		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1864				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1865				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1866				   &bar0->pfc_err_mask);
1867
1868		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1869				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1870				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1871
1872		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1873				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1874				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1875				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1876				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1877				   PCC_TXB_ECC_SG_ERR,
1878				   flag, &bar0->pcc_err_mask);
1879
1880		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1881				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1882
1883		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1884				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1885				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1886				   flag, &bar0->lso_err_mask);
1887
1888		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1889				   flag, &bar0->tpa_err_mask);
1890
1891		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1892	}
1893
1894	if (mask & TX_MAC_INTR) {
1895		gen_int_mask |= TXMAC_INT_M;
1896		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1897				   &bar0->mac_int_mask);
1898		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1899				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1900				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1901				   flag, &bar0->mac_tmac_err_mask);
1902	}
1903
1904	if (mask & TX_XGXS_INTR) {
1905		gen_int_mask |= TXXGXS_INT_M;
1906		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1907				   &bar0->xgxs_int_mask);
1908		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1909				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1910				   flag, &bar0->xgxs_txgxs_err_mask);
1911	}
1912
1913	if (mask & RX_DMA_INTR) {
1914		gen_int_mask |= RXDMA_INT_M;
1915		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1916				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1917				   flag, &bar0->rxdma_int_mask);
1918		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1919				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1920				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1921				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1922		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1923				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1924				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1925				   &bar0->prc_pcix_err_mask);
1926		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1927				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1928				   &bar0->rpa_err_mask);
1929		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1930				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1931				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1932				   RDA_FRM_ECC_SG_ERR |
1933				   RDA_MISC_ERR|RDA_PCIX_ERR,
1934				   flag, &bar0->rda_err_mask);
1935		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1936				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1937				   flag, &bar0->rti_err_mask);
1938	}
1939
1940	if (mask & RX_MAC_INTR) {
1941		gen_int_mask |= RXMAC_INT_M;
1942		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1943				   &bar0->mac_int_mask);
1944		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1945				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1946				 RMAC_DOUBLE_ECC_ERR);
1947		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1948			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1949		do_s2io_write_bits(interruptible,
1950				   flag, &bar0->mac_rmac_err_mask);
1951	}
1952
1953	if (mask & RX_XGXS_INTR) {
1954		gen_int_mask |= RXXGXS_INT_M;
1955		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1956				   &bar0->xgxs_int_mask);
1957		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1958				   &bar0->xgxs_rxgxs_err_mask);
1959	}
1960
1961	if (mask & MC_INTR) {
1962		gen_int_mask |= MC_INT_M;
1963		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1964				   flag, &bar0->mc_int_mask);
1965		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1966				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1967				   &bar0->mc_err_mask);
1968	}
1969	nic->general_int_mask = gen_int_mask;
1970
1971	/* Remove this line when alarm interrupts are enabled */
1972	nic->general_int_mask = 0;
1973}
1974
1975/**
1976 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1977 *  @nic: device private variable,
1978 *  @mask: A mask indicating which Intr block must be modified and,
1979 *  @flag: A flag indicating whether to enable or disable the Intrs.
1980 *  Description: This function will either disable or enable the interrupts
1981 *  depending on the flag argument. The mask argument can be used to
1982 *  enable/disable any Intr block.
1983 *  Return Value: NONE.
1984 */
1985
1986static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1987{
1988	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1989	register u64 temp64 = 0, intr_mask = 0;
1990
1991	intr_mask = nic->general_int_mask;
1992
1993	/*  Top level interrupt classification */
1994	/*  PIC Interrupts */
1995	if (mask & TX_PIC_INTR) {
1996		/*  Enable PIC Intrs in the general intr mask register */
1997		intr_mask |= TXPIC_INT_M;
1998		if (flag == ENABLE_INTRS) {
1999			/*
2000			 * If Hercules adapter enable GPIO otherwise
2001			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2002			 * interrupts for now.
2003			 * TODO
2004			 */
2005			if (s2io_link_fault_indication(nic) ==
2006			    LINK_UP_DOWN_INTERRUPT) {
2007				do_s2io_write_bits(PIC_INT_GPIO, flag,
2008						   &bar0->pic_int_mask);
2009				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2010						   &bar0->gpio_int_mask);
2011			} else
2012				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2013		} else if (flag == DISABLE_INTRS) {
2014			/*
2015			 * Disable PIC Intrs in the general
2016			 * intr mask register
2017			 */
2018			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2019		}
2020	}
2021
2022	/*  Tx traffic interrupts */
2023	if (mask & TX_TRAFFIC_INTR) {
2024		intr_mask |= TXTRAFFIC_INT_M;
2025		if (flag == ENABLE_INTRS) {
2026			/*
2027			 * Enable all the Tx side interrupts
2028			 * writing 0 Enables all 64 TX interrupt levels
2029			 */
2030			writeq(0x0, &bar0->tx_traffic_mask);
2031		} else if (flag == DISABLE_INTRS) {
2032			/*
2033			 * Disable Tx Traffic Intrs in the general intr mask
2034			 * register.
2035			 */
2036			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2037		}
2038	}
2039
2040	/*  Rx traffic interrupts */
2041	if (mask & RX_TRAFFIC_INTR) {
2042		intr_mask |= RXTRAFFIC_INT_M;
2043		if (flag == ENABLE_INTRS) {
2044			/* writing 0 Enables all 8 RX interrupt levels */
2045			writeq(0x0, &bar0->rx_traffic_mask);
2046		} else if (flag == DISABLE_INTRS) {
2047			/*
2048			 * Disable Rx Traffic Intrs in the general intr mask
2049			 * register.
2050			 */
2051			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2052		}
2053	}
2054
2055	temp64 = readq(&bar0->general_int_mask);
2056	if (flag == ENABLE_INTRS)
2057		temp64 &= ~((u64)intr_mask);
2058	else
2059		temp64 = DISABLE_ALL_INTRS;
2060	writeq(temp64, &bar0->general_int_mask);
2061
2062	nic->general_int_mask = readq(&bar0->general_int_mask);
2063}
2064
2065/**
2066 *  verify_pcc_quiescent- Checks for PCC quiescent state
2067 *  Return: 1 If PCC is quiescence
2068 *          0 If PCC is not quiescence
2069 */
2070static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2071{
2072	int ret = 0, herc;
2073	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2074	u64 val64 = readq(&bar0->adapter_status);
2075
2076	herc = (sp->device_type == XFRAME_II_DEVICE);
2077
2078	if (flag == false) {
2079		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2080			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2081				ret = 1;
2082		} else {
2083			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2084				ret = 1;
2085		}
2086	} else {
2087		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2088			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2089			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2090				ret = 1;
2091		} else {
2092			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2093			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2094				ret = 1;
2095		}
2096	}
2097
2098	return ret;
2099}
2100/**
2101 *  verify_xena_quiescence - Checks whether the H/W is ready
2102 *  Description: Returns whether the H/W is ready to go or not. Depending
2103 *  on whether adapter enable bit was written or not the comparison
2104 *  differs and the calling function passes the input argument flag to
2105 *  indicate this.
2106 *  Return: 1 If xena is quiescence
2107 *          0 If Xena is not quiescence
2108 */
2109
2110static int verify_xena_quiescence(struct s2io_nic *sp)
2111{
2112	int  mode;
2113	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2114	u64 val64 = readq(&bar0->adapter_status);
2115	mode = s2io_verify_pci_mode(sp);
2116
2117	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2118		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2119		return 0;
2120	}
2121	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2122		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2123		return 0;
2124	}
2125	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2126		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2127		return 0;
2128	}
2129	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2130		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2131		return 0;
2132	}
2133	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2134		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2135		return 0;
2136	}
2137	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2138		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2139		return 0;
2140	}
2141	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2142		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2143		return 0;
2144	}
2145	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2146		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2147		return 0;
2148	}
2149
2150	/*
2151	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2152	 * the the P_PLL_LOCK bit in the adapter_status register will
2153	 * not be asserted.
2154	 */
2155	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2156	    sp->device_type == XFRAME_II_DEVICE &&
2157	    mode != PCI_MODE_PCI_33) {
2158		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2159		return 0;
2160	}
2161	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2162	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2163		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2164		return 0;
2165	}
2166	return 1;
2167}
2168
2169/**
2170 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2171 * @sp: Pointer to device specifc structure
2172 * Description :
2173 * New procedure to clear mac address reading  problems on Alpha platforms
2174 *
2175 */
2176
2177static void fix_mac_address(struct s2io_nic *sp)
2178{
2179	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2180	int i = 0;
2181
2182	while (fix_mac[i] != END_SIGN) {
2183		writeq(fix_mac[i++], &bar0->gpio_control);
2184		udelay(10);
2185		(void) readq(&bar0->gpio_control);
2186	}
2187}
2188
2189/**
2190 *  start_nic - Turns the device on
2191 *  @nic : device private variable.
2192 *  Description:
2193 *  This function actually turns the device on. Before this  function is
2194 *  called,all Registers are configured from their reset states
2195 *  and shared memory is allocated but the NIC is still quiescent. On
2196 *  calling this function, the device interrupts are cleared and the NIC is
2197 *  literally switched on by writing into the adapter control register.
2198 *  Return Value:
2199 *  SUCCESS on success and -1 on failure.
2200 */
2201
2202static int start_nic(struct s2io_nic *nic)
2203{
2204	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2205	struct net_device *dev = nic->dev;
2206	register u64 val64 = 0;
2207	u16 subid, i;
2208	struct config_param *config = &nic->config;
2209	struct mac_info *mac_control = &nic->mac_control;
2210
2211	/*  PRC Initialization and configuration */
2212	for (i = 0; i < config->rx_ring_num; i++) {
2213		struct ring_info *ring = &mac_control->rings[i];
2214
2215		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2216		       &bar0->prc_rxd0_n[i]);
2217
2218		val64 = readq(&bar0->prc_ctrl_n[i]);
2219		if (nic->rxd_mode == RXD_MODE_1)
2220			val64 |= PRC_CTRL_RC_ENABLED;
2221		else
2222			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2223		if (nic->device_type == XFRAME_II_DEVICE)
2224			val64 |= PRC_CTRL_GROUP_READS;
2225		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2226		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2227		writeq(val64, &bar0->prc_ctrl_n[i]);
2228	}
2229
2230	if (nic->rxd_mode == RXD_MODE_3B) {
2231		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2232		val64 = readq(&bar0->rx_pa_cfg);
2233		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2234		writeq(val64, &bar0->rx_pa_cfg);
2235	}
2236
2237	if (vlan_tag_strip == 0) {
2238		val64 = readq(&bar0->rx_pa_cfg);
2239		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2240		writeq(val64, &bar0->rx_pa_cfg);
2241		nic->vlan_strip_flag = 0;
2242	}
2243
2244	/*
2245	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2246	 * for around 100ms, which is approximately the time required
2247	 * for the device to be ready for operation.
2248	 */
2249	val64 = readq(&bar0->mc_rldram_mrs);
2250	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2251	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2252	val64 = readq(&bar0->mc_rldram_mrs);
2253
2254	msleep(100);	/* Delay by around 100 ms. */
2255
2256	/* Enabling ECC Protection. */
2257	val64 = readq(&bar0->adapter_control);
2258	val64 &= ~ADAPTER_ECC_EN;
2259	writeq(val64, &bar0->adapter_control);
2260
2261	/*
2262	 * Verify if the device is ready to be enabled, if so enable
2263	 * it.
2264	 */
2265	val64 = readq(&bar0->adapter_status);
2266	if (!verify_xena_quiescence(nic)) {
2267		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2268			  "Adapter status reads: 0x%llx\n",
2269			  dev->name, (unsigned long long)val64);
2270		return FAILURE;
2271	}
2272
2273	/*
2274	 * With some switches, link might be already up at this point.
2275	 * Because of this weird behavior, when we enable laser,
2276	 * we may not get link. We need to handle this. We cannot
2277	 * figure out which switch is misbehaving. So we are forced to
2278	 * make a global change.
2279	 */
2280
2281	/* Enabling Laser. */
2282	val64 = readq(&bar0->adapter_control);
2283	val64 |= ADAPTER_EOI_TX_ON;
2284	writeq(val64, &bar0->adapter_control);
2285
2286	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2287		/*
2288		 * Dont see link state interrupts initially on some switches,
2289		 * so directly scheduling the link state task here.
2290		 */
2291		schedule_work(&nic->set_link_task);
2292	}
2293	/* SXE-002: Initialize link and activity LED */
2294	subid = nic->pdev->subsystem_device;
2295	if (((subid & 0xFF) >= 0x07) &&
2296	    (nic->device_type == XFRAME_I_DEVICE)) {
2297		val64 = readq(&bar0->gpio_control);
2298		val64 |= 0x0000800000000000ULL;
2299		writeq(val64, &bar0->gpio_control);
2300		val64 = 0x0411040400000000ULL;
2301		writeq(val64, (void __iomem *)bar0 + 0x2700);
2302	}
2303
2304	return SUCCESS;
2305}
2306/**
2307 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2308 */
2309static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2310					struct TxD *txdlp, int get_off)
2311{
2312	struct s2io_nic *nic = fifo_data->nic;
2313	struct sk_buff *skb;
2314	struct TxD *txds;
2315	u16 j, frg_cnt;
2316
2317	txds = txdlp;
2318	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2319		dma_unmap_single(&nic->pdev->dev,
2320				 (dma_addr_t)txds->Buffer_Pointer,
2321				 sizeof(u64), DMA_TO_DEVICE);
2322		txds++;
2323	}
2324
2325	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2326	if (!skb) {
2327		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2328		return NULL;
2329	}
2330	dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2331			 skb_headlen(skb), DMA_TO_DEVICE);
2332	frg_cnt = skb_shinfo(skb)->nr_frags;
2333	if (frg_cnt) {
2334		txds++;
2335		for (j = 0; j < frg_cnt; j++, txds++) {
2336			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2337			if (!txds->Buffer_Pointer)
2338				break;
2339			dma_unmap_page(&nic->pdev->dev,
2340				       (dma_addr_t)txds->Buffer_Pointer,
2341				       skb_frag_size(frag), DMA_TO_DEVICE);
2342		}
2343	}
2344	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2345	return skb;
2346}
2347
2348/**
2349 *  free_tx_buffers - Free all queued Tx buffers
2350 *  @nic : device private variable.
2351 *  Description:
2352 *  Free all queued Tx buffers.
2353 *  Return Value: void
2354 */
2355
2356static void free_tx_buffers(struct s2io_nic *nic)
2357{
2358	struct net_device *dev = nic->dev;
2359	struct sk_buff *skb;
2360	struct TxD *txdp;
2361	int i, j;
2362	int cnt = 0;
2363	struct config_param *config = &nic->config;
2364	struct mac_info *mac_control = &nic->mac_control;
2365	struct stat_block *stats = mac_control->stats_info;
2366	struct swStat *swstats = &stats->sw_stat;
2367
2368	for (i = 0; i < config->tx_fifo_num; i++) {
2369		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2370		struct fifo_info *fifo = &mac_control->fifos[i];
2371		unsigned long flags;
2372
2373		spin_lock_irqsave(&fifo->tx_lock, flags);
2374		for (j = 0; j < tx_cfg->fifo_len; j++) {
2375			txdp = fifo->list_info[j].list_virt_addr;
2376			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2377			if (skb) {
2378				swstats->mem_freed += skb->truesize;
2379				dev_kfree_skb(skb);
2380				cnt++;
2381			}
2382		}
2383		DBG_PRINT(INTR_DBG,
2384			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2385			  dev->name, cnt, i);
2386		fifo->tx_curr_get_info.offset = 0;
2387		fifo->tx_curr_put_info.offset = 0;
2388		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2389	}
2390}
2391
2392/**
2393 *   stop_nic -  To stop the nic
2394 *   @nic ; device private variable.
2395 *   Description:
2396 *   This function does exactly the opposite of what the start_nic()
2397 *   function does. This function is called to stop the device.
2398 *   Return Value:
2399 *   void.
2400 */
2401
2402static void stop_nic(struct s2io_nic *nic)
2403{
2404	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2405	register u64 val64 = 0;
2406	u16 interruptible;
2407
2408	/*  Disable all interrupts */
2409	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2410	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2411	interruptible |= TX_PIC_INTR;
2412	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2413
2414	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2415	val64 = readq(&bar0->adapter_control);
2416	val64 &= ~(ADAPTER_CNTL_EN);
2417	writeq(val64, &bar0->adapter_control);
2418}
2419
2420/**
2421 *  fill_rx_buffers - Allocates the Rx side skbs
2422 *  @ring_info: per ring structure
2423 *  @from_card_up: If this is true, we will map the buffer to get
2424 *     the dma address for buf0 and buf1 to give it to the card.
2425 *     Else we will sync the already mapped buffer to give it to the card.
2426 *  Description:
2427 *  The function allocates Rx side skbs and puts the physical
2428 *  address of these buffers into the RxD buffer pointers, so that the NIC
2429 *  can DMA the received frame into these locations.
2430 *  The NIC supports 3 receive modes, viz
2431 *  1. single buffer,
2432 *  2. three buffer and
2433 *  3. Five buffer modes.
2434 *  Each mode defines how many fragments the received frame will be split
2435 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2436 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2437 *  is split into 3 fragments. As of now only single buffer mode is
2438 *  supported.
2439 *   Return Value:
2440 *  SUCCESS on success or an appropriate -ve value on failure.
2441 */
2442static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2443			   int from_card_up)
2444{
2445	struct sk_buff *skb;
2446	struct RxD_t *rxdp;
2447	int off, size, block_no, block_no1;
2448	u32 alloc_tab = 0;
2449	u32 alloc_cnt;
2450	u64 tmp;
2451	struct buffAdd *ba;
2452	struct RxD_t *first_rxdp = NULL;
2453	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
 
2454	struct RxD1 *rxdp1;
2455	struct RxD3 *rxdp3;
2456	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2457
2458	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2459
2460	block_no1 = ring->rx_curr_get_info.block_index;
2461	while (alloc_tab < alloc_cnt) {
2462		block_no = ring->rx_curr_put_info.block_index;
2463
2464		off = ring->rx_curr_put_info.offset;
2465
2466		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2467
 
 
 
 
2468		if ((block_no == block_no1) &&
2469		    (off == ring->rx_curr_get_info.offset) &&
2470		    (rxdp->Host_Control)) {
2471			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2472				  ring->dev->name);
2473			goto end;
2474		}
2475		if (off && (off == ring->rxd_count)) {
2476			ring->rx_curr_put_info.block_index++;
2477			if (ring->rx_curr_put_info.block_index ==
2478			    ring->block_count)
2479				ring->rx_curr_put_info.block_index = 0;
2480			block_no = ring->rx_curr_put_info.block_index;
2481			off = 0;
2482			ring->rx_curr_put_info.offset = off;
2483			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2484			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2485				  ring->dev->name, rxdp);
2486
2487		}
2488
2489		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2490		    ((ring->rxd_mode == RXD_MODE_3B) &&
2491		     (rxdp->Control_2 & s2BIT(0)))) {
2492			ring->rx_curr_put_info.offset = off;
2493			goto end;
2494		}
2495		/* calculate size of skb based on ring mode */
2496		size = ring->mtu +
2497			HEADER_ETHERNET_II_802_3_SIZE +
2498			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2499		if (ring->rxd_mode == RXD_MODE_1)
2500			size += NET_IP_ALIGN;
2501		else
2502			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2503
2504		/* allocate skb */
2505		skb = netdev_alloc_skb(nic->dev, size);
2506		if (!skb) {
2507			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2508				  ring->dev->name);
2509			if (first_rxdp) {
2510				dma_wmb();
2511				first_rxdp->Control_1 |= RXD_OWN_XENA;
2512			}
2513			swstats->mem_alloc_fail_cnt++;
2514
2515			return -ENOMEM ;
2516		}
2517		swstats->mem_allocated += skb->truesize;
2518
2519		if (ring->rxd_mode == RXD_MODE_1) {
2520			/* 1 buffer mode - normal operation mode */
2521			rxdp1 = (struct RxD1 *)rxdp;
2522			memset(rxdp, 0, sizeof(struct RxD1));
2523			skb_reserve(skb, NET_IP_ALIGN);
2524			rxdp1->Buffer0_ptr =
2525				dma_map_single(&ring->pdev->dev, skb->data,
2526					       size - NET_IP_ALIGN,
2527					       DMA_FROM_DEVICE);
2528			if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
 
2529				goto pci_map_failed;
2530
2531			rxdp->Control_2 =
2532				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2533			rxdp->Host_Control = (unsigned long)skb;
2534		} else if (ring->rxd_mode == RXD_MODE_3B) {
2535			/*
2536			 * 2 buffer mode -
2537			 * 2 buffer mode provides 128
2538			 * byte aligned receive buffers.
2539			 */
2540
2541			rxdp3 = (struct RxD3 *)rxdp;
2542			/* save buffer pointers to avoid frequent dma mapping */
2543			Buffer0_ptr = rxdp3->Buffer0_ptr;
2544			Buffer1_ptr = rxdp3->Buffer1_ptr;
2545			memset(rxdp, 0, sizeof(struct RxD3));
2546			/* restore the buffer pointers for dma sync*/
2547			rxdp3->Buffer0_ptr = Buffer0_ptr;
2548			rxdp3->Buffer1_ptr = Buffer1_ptr;
2549
2550			ba = &ring->ba[block_no][off];
2551			skb_reserve(skb, BUF0_LEN);
2552			tmp = (u64)(unsigned long)skb->data;
2553			tmp += ALIGN_SIZE;
2554			tmp &= ~ALIGN_SIZE;
2555			skb->data = (void *) (unsigned long)tmp;
2556			skb_reset_tail_pointer(skb);
2557
2558			if (from_card_up) {
2559				rxdp3->Buffer0_ptr =
2560					dma_map_single(&ring->pdev->dev,
2561						       ba->ba_0, BUF0_LEN,
2562						       DMA_FROM_DEVICE);
2563				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
 
2564					goto pci_map_failed;
2565			} else
2566				dma_sync_single_for_device(&ring->pdev->dev,
2567							   (dma_addr_t)rxdp3->Buffer0_ptr,
2568							   BUF0_LEN,
2569							   DMA_FROM_DEVICE);
2570
2571			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2572			if (ring->rxd_mode == RXD_MODE_3B) {
2573				/* Two buffer mode */
2574
2575				/*
2576				 * Buffer2 will have L3/L4 header plus
2577				 * L4 payload
2578				 */
2579				rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2580								    skb->data,
2581								    ring->mtu + 4,
2582								    DMA_FROM_DEVICE);
2583
2584				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
 
2585					goto pci_map_failed;
2586
2587				if (from_card_up) {
2588					rxdp3->Buffer1_ptr =
2589						dma_map_single(&ring->pdev->dev,
2590							       ba->ba_1,
2591							       BUF1_LEN,
2592							       DMA_FROM_DEVICE);
2593
2594					if (dma_mapping_error(&nic->pdev->dev,
2595							      rxdp3->Buffer1_ptr)) {
2596						dma_unmap_single(&ring->pdev->dev,
2597								 (dma_addr_t)(unsigned long)
2598								 skb->data,
2599								 ring->mtu + 4,
2600								 DMA_FROM_DEVICE);
2601						goto pci_map_failed;
2602					}
2603				}
2604				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2605				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2606					(ring->mtu + 4);
2607			}
2608			rxdp->Control_2 |= s2BIT(0);
2609			rxdp->Host_Control = (unsigned long) (skb);
2610		}
2611		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2612			rxdp->Control_1 |= RXD_OWN_XENA;
2613		off++;
2614		if (off == (ring->rxd_count + 1))
2615			off = 0;
2616		ring->rx_curr_put_info.offset = off;
2617
2618		rxdp->Control_2 |= SET_RXD_MARKER;
2619		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2620			if (first_rxdp) {
2621				dma_wmb();
2622				first_rxdp->Control_1 |= RXD_OWN_XENA;
2623			}
2624			first_rxdp = rxdp;
2625		}
2626		ring->rx_bufs_left += 1;
2627		alloc_tab++;
2628	}
2629
2630end:
2631	/* Transfer ownership of first descriptor to adapter just before
2632	 * exiting. Before that, use memory barrier so that ownership
2633	 * and other fields are seen by adapter correctly.
2634	 */
2635	if (first_rxdp) {
2636		dma_wmb();
2637		first_rxdp->Control_1 |= RXD_OWN_XENA;
2638	}
2639
2640	return SUCCESS;
2641
2642pci_map_failed:
2643	swstats->pci_map_fail_cnt++;
2644	swstats->mem_freed += skb->truesize;
2645	dev_kfree_skb_irq(skb);
2646	return -ENOMEM;
2647}
2648
2649static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2650{
2651	struct net_device *dev = sp->dev;
2652	int j;
2653	struct sk_buff *skb;
2654	struct RxD_t *rxdp;
2655	struct RxD1 *rxdp1;
2656	struct RxD3 *rxdp3;
2657	struct mac_info *mac_control = &sp->mac_control;
2658	struct stat_block *stats = mac_control->stats_info;
2659	struct swStat *swstats = &stats->sw_stat;
2660
2661	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2662		rxdp = mac_control->rings[ring_no].
2663			rx_blocks[blk].rxds[j].virt_addr;
2664		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2665		if (!skb)
2666			continue;
2667		if (sp->rxd_mode == RXD_MODE_1) {
2668			rxdp1 = (struct RxD1 *)rxdp;
2669			dma_unmap_single(&sp->pdev->dev,
2670					 (dma_addr_t)rxdp1->Buffer0_ptr,
2671					 dev->mtu +
2672					 HEADER_ETHERNET_II_802_3_SIZE +
2673					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2674					 DMA_FROM_DEVICE);
2675			memset(rxdp, 0, sizeof(struct RxD1));
2676		} else if (sp->rxd_mode == RXD_MODE_3B) {
2677			rxdp3 = (struct RxD3 *)rxdp;
2678			dma_unmap_single(&sp->pdev->dev,
2679					 (dma_addr_t)rxdp3->Buffer0_ptr,
2680					 BUF0_LEN, DMA_FROM_DEVICE);
2681			dma_unmap_single(&sp->pdev->dev,
 
2682					 (dma_addr_t)rxdp3->Buffer1_ptr,
2683					 BUF1_LEN, DMA_FROM_DEVICE);
2684			dma_unmap_single(&sp->pdev->dev,
 
2685					 (dma_addr_t)rxdp3->Buffer2_ptr,
2686					 dev->mtu + 4, DMA_FROM_DEVICE);
 
2687			memset(rxdp, 0, sizeof(struct RxD3));
2688		}
2689		swstats->mem_freed += skb->truesize;
2690		dev_kfree_skb(skb);
2691		mac_control->rings[ring_no].rx_bufs_left -= 1;
2692	}
2693}
2694
2695/**
2696 *  free_rx_buffers - Frees all Rx buffers
2697 *  @sp: device private variable.
2698 *  Description:
2699 *  This function will free all Rx buffers allocated by host.
2700 *  Return Value:
2701 *  NONE.
2702 */
2703
2704static void free_rx_buffers(struct s2io_nic *sp)
2705{
2706	struct net_device *dev = sp->dev;
2707	int i, blk = 0, buf_cnt = 0;
2708	struct config_param *config = &sp->config;
2709	struct mac_info *mac_control = &sp->mac_control;
2710
2711	for (i = 0; i < config->rx_ring_num; i++) {
2712		struct ring_info *ring = &mac_control->rings[i];
2713
2714		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2715			free_rxd_blk(sp, i, blk);
2716
2717		ring->rx_curr_put_info.block_index = 0;
2718		ring->rx_curr_get_info.block_index = 0;
2719		ring->rx_curr_put_info.offset = 0;
2720		ring->rx_curr_get_info.offset = 0;
2721		ring->rx_bufs_left = 0;
2722		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2723			  dev->name, buf_cnt, i);
2724	}
2725}
2726
2727static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2728{
2729	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2730		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2731			  ring->dev->name);
2732	}
2733	return 0;
2734}
2735
2736/**
2737 * s2io_poll - Rx interrupt handler for NAPI support
2738 * @napi : pointer to the napi structure.
2739 * @budget : The number of packets that were budgeted to be processed
2740 * during  one pass through the 'Poll" function.
2741 * Description:
2742 * Comes into picture only if NAPI support has been incorporated. It does
2743 * the same thing that rx_intr_handler does, but not in a interrupt context
2744 * also It will process only a given number of packets.
2745 * Return value:
2746 * 0 on success and 1 if there are No Rx packets to be processed.
2747 */
2748
2749static int s2io_poll_msix(struct napi_struct *napi, int budget)
2750{
2751	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2752	struct net_device *dev = ring->dev;
2753	int pkts_processed = 0;
2754	u8 __iomem *addr = NULL;
2755	u8 val8 = 0;
2756	struct s2io_nic *nic = netdev_priv(dev);
2757	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2758	int budget_org = budget;
2759
2760	if (unlikely(!is_s2io_card_up(nic)))
2761		return 0;
2762
2763	pkts_processed = rx_intr_handler(ring, budget);
2764	s2io_chk_rx_buffers(nic, ring);
2765
2766	if (pkts_processed < budget_org) {
2767		napi_complete_done(napi, pkts_processed);
2768		/*Re Enable MSI-Rx Vector*/
2769		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2770		addr += 7 - ring->ring_no;
2771		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2772		writeb(val8, addr);
2773		val8 = readb(addr);
2774	}
2775	return pkts_processed;
2776}
2777
2778static int s2io_poll_inta(struct napi_struct *napi, int budget)
2779{
2780	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2781	int pkts_processed = 0;
2782	int ring_pkts_processed, i;
2783	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2784	int budget_org = budget;
2785	struct config_param *config = &nic->config;
2786	struct mac_info *mac_control = &nic->mac_control;
2787
2788	if (unlikely(!is_s2io_card_up(nic)))
2789		return 0;
2790
2791	for (i = 0; i < config->rx_ring_num; i++) {
2792		struct ring_info *ring = &mac_control->rings[i];
2793		ring_pkts_processed = rx_intr_handler(ring, budget);
2794		s2io_chk_rx_buffers(nic, ring);
2795		pkts_processed += ring_pkts_processed;
2796		budget -= ring_pkts_processed;
2797		if (budget <= 0)
2798			break;
2799	}
2800	if (pkts_processed < budget_org) {
2801		napi_complete_done(napi, pkts_processed);
2802		/* Re enable the Rx interrupts for the ring */
2803		writeq(0, &bar0->rx_traffic_mask);
2804		readl(&bar0->rx_traffic_mask);
2805	}
2806	return pkts_processed;
2807}
2808
2809#ifdef CONFIG_NET_POLL_CONTROLLER
2810/**
2811 * s2io_netpoll - netpoll event handler entry point
2812 * @dev : pointer to the device structure.
2813 * Description:
2814 * 	This function will be called by upper layer to check for events on the
2815 * interface in situations where interrupts are disabled. It is used for
2816 * specific in-kernel networking tasks, such as remote consoles and kernel
2817 * debugging over the network (example netdump in RedHat).
2818 */
2819static void s2io_netpoll(struct net_device *dev)
2820{
2821	struct s2io_nic *nic = netdev_priv(dev);
2822	const int irq = nic->pdev->irq;
2823	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2824	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2825	int i;
2826	struct config_param *config = &nic->config;
2827	struct mac_info *mac_control = &nic->mac_control;
2828
2829	if (pci_channel_offline(nic->pdev))
2830		return;
2831
2832	disable_irq(irq);
2833
2834	writeq(val64, &bar0->rx_traffic_int);
2835	writeq(val64, &bar0->tx_traffic_int);
2836
2837	/* we need to free up the transmitted skbufs or else netpoll will
2838	 * run out of skbs and will fail and eventually netpoll application such
2839	 * as netdump will fail.
2840	 */
2841	for (i = 0; i < config->tx_fifo_num; i++)
2842		tx_intr_handler(&mac_control->fifos[i]);
2843
2844	/* check for received packet and indicate up to network */
2845	for (i = 0; i < config->rx_ring_num; i++) {
2846		struct ring_info *ring = &mac_control->rings[i];
2847
2848		rx_intr_handler(ring, 0);
2849	}
2850
2851	for (i = 0; i < config->rx_ring_num; i++) {
2852		struct ring_info *ring = &mac_control->rings[i];
2853
2854		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2855			DBG_PRINT(INFO_DBG,
2856				  "%s: Out of memory in Rx Netpoll!!\n",
2857				  dev->name);
2858			break;
2859		}
2860	}
2861	enable_irq(irq);
2862}
2863#endif
2864
2865/**
2866 *  rx_intr_handler - Rx interrupt handler
2867 *  @ring_info: per ring structure.
2868 *  @budget: budget for napi processing.
2869 *  Description:
2870 *  If the interrupt is because of a received frame or if the
2871 *  receive ring contains fresh as yet un-processed frames,this function is
2872 *  called. It picks out the RxD at which place the last Rx processing had
2873 *  stopped and sends the skb to the OSM's Rx handler and then increments
2874 *  the offset.
2875 *  Return Value:
2876 *  No. of napi packets processed.
2877 */
2878static int rx_intr_handler(struct ring_info *ring_data, int budget)
2879{
2880	int get_block, put_block;
2881	struct rx_curr_get_info get_info, put_info;
2882	struct RxD_t *rxdp;
2883	struct sk_buff *skb;
2884	int pkt_cnt = 0, napi_pkts = 0;
2885	int i;
2886	struct RxD1 *rxdp1;
2887	struct RxD3 *rxdp3;
2888
2889	if (budget <= 0)
2890		return napi_pkts;
2891
2892	get_info = ring_data->rx_curr_get_info;
2893	get_block = get_info.block_index;
2894	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2895	put_block = put_info.block_index;
2896	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2897
2898	while (RXD_IS_UP2DT(rxdp)) {
2899		/*
2900		 * If your are next to put index then it's
2901		 * FIFO full condition
2902		 */
2903		if ((get_block == put_block) &&
2904		    (get_info.offset + 1) == put_info.offset) {
2905			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2906				  ring_data->dev->name);
2907			break;
2908		}
2909		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2910		if (skb == NULL) {
2911			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2912				  ring_data->dev->name);
2913			return 0;
2914		}
2915		if (ring_data->rxd_mode == RXD_MODE_1) {
2916			rxdp1 = (struct RxD1 *)rxdp;
2917			dma_unmap_single(&ring_data->pdev->dev,
2918					 (dma_addr_t)rxdp1->Buffer0_ptr,
2919					 ring_data->mtu +
2920					 HEADER_ETHERNET_II_802_3_SIZE +
2921					 HEADER_802_2_SIZE +
2922					 HEADER_SNAP_SIZE,
2923					 DMA_FROM_DEVICE);
2924		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2925			rxdp3 = (struct RxD3 *)rxdp;
2926			dma_sync_single_for_cpu(&ring_data->pdev->dev,
2927						(dma_addr_t)rxdp3->Buffer0_ptr,
2928						BUF0_LEN, DMA_FROM_DEVICE);
2929			dma_unmap_single(&ring_data->pdev->dev,
 
2930					 (dma_addr_t)rxdp3->Buffer2_ptr,
2931					 ring_data->mtu + 4, DMA_FROM_DEVICE);
 
2932		}
2933		prefetch(skb->data);
2934		rx_osm_handler(ring_data, rxdp);
2935		get_info.offset++;
2936		ring_data->rx_curr_get_info.offset = get_info.offset;
2937		rxdp = ring_data->rx_blocks[get_block].
2938			rxds[get_info.offset].virt_addr;
2939		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2940			get_info.offset = 0;
2941			ring_data->rx_curr_get_info.offset = get_info.offset;
2942			get_block++;
2943			if (get_block == ring_data->block_count)
2944				get_block = 0;
2945			ring_data->rx_curr_get_info.block_index = get_block;
2946			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2947		}
2948
2949		if (ring_data->nic->config.napi) {
2950			budget--;
2951			napi_pkts++;
2952			if (!budget)
2953				break;
2954		}
2955		pkt_cnt++;
2956		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2957			break;
2958	}
2959	if (ring_data->lro) {
2960		/* Clear all LRO sessions before exiting */
2961		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2962			struct lro *lro = &ring_data->lro0_n[i];
2963			if (lro->in_use) {
2964				update_L3L4_header(ring_data->nic, lro);
2965				queue_rx_frame(lro->parent, lro->vlan_tag);
2966				clear_lro_session(lro);
2967			}
2968		}
2969	}
2970	return napi_pkts;
2971}
2972
2973/**
2974 *  tx_intr_handler - Transmit interrupt handler
2975 *  @nic : device private variable
2976 *  Description:
2977 *  If an interrupt was raised to indicate DMA complete of the
2978 *  Tx packet, this function is called. It identifies the last TxD
2979 *  whose buffer was freed and frees all skbs whose data have already
2980 *  DMA'ed into the NICs internal memory.
2981 *  Return Value:
2982 *  NONE
2983 */
2984
2985static void tx_intr_handler(struct fifo_info *fifo_data)
2986{
2987	struct s2io_nic *nic = fifo_data->nic;
2988	struct tx_curr_get_info get_info, put_info;
2989	struct sk_buff *skb = NULL;
2990	struct TxD *txdlp;
2991	int pkt_cnt = 0;
2992	unsigned long flags = 0;
2993	u8 err_mask;
2994	struct stat_block *stats = nic->mac_control.stats_info;
2995	struct swStat *swstats = &stats->sw_stat;
2996
2997	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
2998		return;
2999
3000	get_info = fifo_data->tx_curr_get_info;
3001	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3002	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3003	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3004	       (get_info.offset != put_info.offset) &&
3005	       (txdlp->Host_Control)) {
3006		/* Check for TxD errors */
3007		if (txdlp->Control_1 & TXD_T_CODE) {
3008			unsigned long long err;
3009			err = txdlp->Control_1 & TXD_T_CODE;
3010			if (err & 0x1) {
3011				swstats->parity_err_cnt++;
3012			}
3013
3014			/* update t_code statistics */
3015			err_mask = err >> 48;
3016			switch (err_mask) {
3017			case 2:
3018				swstats->tx_buf_abort_cnt++;
3019				break;
3020
3021			case 3:
3022				swstats->tx_desc_abort_cnt++;
3023				break;
3024
3025			case 7:
3026				swstats->tx_parity_err_cnt++;
3027				break;
3028
3029			case 10:
3030				swstats->tx_link_loss_cnt++;
3031				break;
3032
3033			case 15:
3034				swstats->tx_list_proc_err_cnt++;
3035				break;
3036			}
3037		}
3038
3039		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3040		if (skb == NULL) {
3041			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3042			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3043				  __func__);
3044			return;
3045		}
3046		pkt_cnt++;
3047
3048		/* Updating the statistics block */
3049		swstats->mem_freed += skb->truesize;
3050		dev_consume_skb_irq(skb);
3051
3052		get_info.offset++;
3053		if (get_info.offset == get_info.fifo_len + 1)
3054			get_info.offset = 0;
3055		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3056		fifo_data->tx_curr_get_info.offset = get_info.offset;
3057	}
3058
3059	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3060
3061	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3062}
3063
3064/**
3065 *  s2io_mdio_write - Function to write in to MDIO registers
3066 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3067 *  @addr     : address value
3068 *  @value    : data value
3069 *  @dev      : pointer to net_device structure
3070 *  Description:
3071 *  This function is used to write values to the MDIO registers
3072 *  NONE
3073 */
3074static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3075			    struct net_device *dev)
3076{
3077	u64 val64;
3078	struct s2io_nic *sp = netdev_priv(dev);
3079	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3080
3081	/* address transaction */
3082	val64 = MDIO_MMD_INDX_ADDR(addr) |
3083		MDIO_MMD_DEV_ADDR(mmd_type) |
3084		MDIO_MMS_PRT_ADDR(0x0);
3085	writeq(val64, &bar0->mdio_control);
3086	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3087	writeq(val64, &bar0->mdio_control);
3088	udelay(100);
3089
3090	/* Data transaction */
3091	val64 = MDIO_MMD_INDX_ADDR(addr) |
3092		MDIO_MMD_DEV_ADDR(mmd_type) |
3093		MDIO_MMS_PRT_ADDR(0x0) |
3094		MDIO_MDIO_DATA(value) |
3095		MDIO_OP(MDIO_OP_WRITE_TRANS);
3096	writeq(val64, &bar0->mdio_control);
3097	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3098	writeq(val64, &bar0->mdio_control);
3099	udelay(100);
3100
3101	val64 = MDIO_MMD_INDX_ADDR(addr) |
3102		MDIO_MMD_DEV_ADDR(mmd_type) |
3103		MDIO_MMS_PRT_ADDR(0x0) |
3104		MDIO_OP(MDIO_OP_READ_TRANS);
3105	writeq(val64, &bar0->mdio_control);
3106	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3107	writeq(val64, &bar0->mdio_control);
3108	udelay(100);
3109}
3110
3111/**
3112 *  s2io_mdio_read - Function to write in to MDIO registers
3113 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3114 *  @addr     : address value
3115 *  @dev      : pointer to net_device structure
3116 *  Description:
3117 *  This function is used to read values to the MDIO registers
3118 *  NONE
3119 */
3120static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3121{
3122	u64 val64 = 0x0;
3123	u64 rval64 = 0x0;
3124	struct s2io_nic *sp = netdev_priv(dev);
3125	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3126
3127	/* address transaction */
3128	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3129			 | MDIO_MMD_DEV_ADDR(mmd_type)
3130			 | MDIO_MMS_PRT_ADDR(0x0));
3131	writeq(val64, &bar0->mdio_control);
3132	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3133	writeq(val64, &bar0->mdio_control);
3134	udelay(100);
3135
3136	/* Data transaction */
3137	val64 = MDIO_MMD_INDX_ADDR(addr) |
3138		MDIO_MMD_DEV_ADDR(mmd_type) |
3139		MDIO_MMS_PRT_ADDR(0x0) |
3140		MDIO_OP(MDIO_OP_READ_TRANS);
3141	writeq(val64, &bar0->mdio_control);
3142	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3143	writeq(val64, &bar0->mdio_control);
3144	udelay(100);
3145
3146	/* Read the value from regs */
3147	rval64 = readq(&bar0->mdio_control);
3148	rval64 = rval64 & 0xFFFF0000;
3149	rval64 = rval64 >> 16;
3150	return rval64;
3151}
3152
3153/**
3154 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3155 *  @counter      : counter value to be updated
3156 *  @flag         : flag to indicate the status
3157 *  @type         : counter type
3158 *  Description:
3159 *  This function is to check the status of the xpak counters value
3160 *  NONE
3161 */
3162
3163static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3164				  u16 flag, u16 type)
3165{
3166	u64 mask = 0x3;
3167	u64 val64;
3168	int i;
3169	for (i = 0; i < index; i++)
3170		mask = mask << 0x2;
3171
3172	if (flag > 0) {
3173		*counter = *counter + 1;
3174		val64 = *regs_stat & mask;
3175		val64 = val64 >> (index * 0x2);
3176		val64 = val64 + 1;
3177		if (val64 == 3) {
3178			switch (type) {
3179			case 1:
3180				DBG_PRINT(ERR_DBG,
3181					  "Take Xframe NIC out of service.\n");
3182				DBG_PRINT(ERR_DBG,
3183"Excessive temperatures may result in premature transceiver failure.\n");
3184				break;
3185			case 2:
3186				DBG_PRINT(ERR_DBG,
3187					  "Take Xframe NIC out of service.\n");
3188				DBG_PRINT(ERR_DBG,
3189"Excessive bias currents may indicate imminent laser diode failure.\n");
3190				break;
3191			case 3:
3192				DBG_PRINT(ERR_DBG,
3193					  "Take Xframe NIC out of service.\n");
3194				DBG_PRINT(ERR_DBG,
3195"Excessive laser output power may saturate far-end receiver.\n");
3196				break;
3197			default:
3198				DBG_PRINT(ERR_DBG,
3199					  "Incorrect XPAK Alarm type\n");
3200			}
3201			val64 = 0x0;
3202		}
3203		val64 = val64 << (index * 0x2);
3204		*regs_stat = (*regs_stat & (~mask)) | (val64);
3205
3206	} else {
3207		*regs_stat = *regs_stat & (~mask);
3208	}
3209}
3210
3211/**
3212 *  s2io_updt_xpak_counter - Function to update the xpak counters
3213 *  @dev         : pointer to net_device struct
3214 *  Description:
3215 *  This function is to upate the status of the xpak counters value
3216 *  NONE
3217 */
3218static void s2io_updt_xpak_counter(struct net_device *dev)
3219{
3220	u16 flag  = 0x0;
3221	u16 type  = 0x0;
3222	u16 val16 = 0x0;
3223	u64 val64 = 0x0;
3224	u64 addr  = 0x0;
3225
3226	struct s2io_nic *sp = netdev_priv(dev);
3227	struct stat_block *stats = sp->mac_control.stats_info;
3228	struct xpakStat *xstats = &stats->xpak_stat;
3229
3230	/* Check the communication with the MDIO slave */
3231	addr = MDIO_CTRL1;
3232	val64 = 0x0;
3233	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3234	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3235		DBG_PRINT(ERR_DBG,
3236			  "ERR: MDIO slave access failed - Returned %llx\n",
3237			  (unsigned long long)val64);
3238		return;
3239	}
3240
3241	/* Check for the expected value of control reg 1 */
3242	if (val64 != MDIO_CTRL1_SPEED10G) {
3243		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3244			  "Returned: %llx- Expected: 0x%x\n",
3245			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3246		return;
3247	}
3248
3249	/* Loading the DOM register to MDIO register */
3250	addr = 0xA100;
3251	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3252	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3253
3254	/* Reading the Alarm flags */
3255	addr = 0xA070;
3256	val64 = 0x0;
3257	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3258
3259	flag = CHECKBIT(val64, 0x7);
3260	type = 1;
3261	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3262			      &xstats->xpak_regs_stat,
3263			      0x0, flag, type);
3264
3265	if (CHECKBIT(val64, 0x6))
3266		xstats->alarm_transceiver_temp_low++;
3267
3268	flag = CHECKBIT(val64, 0x3);
3269	type = 2;
3270	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3271			      &xstats->xpak_regs_stat,
3272			      0x2, flag, type);
3273
3274	if (CHECKBIT(val64, 0x2))
3275		xstats->alarm_laser_bias_current_low++;
3276
3277	flag = CHECKBIT(val64, 0x1);
3278	type = 3;
3279	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3280			      &xstats->xpak_regs_stat,
3281			      0x4, flag, type);
3282
3283	if (CHECKBIT(val64, 0x0))
3284		xstats->alarm_laser_output_power_low++;
3285
3286	/* Reading the Warning flags */
3287	addr = 0xA074;
3288	val64 = 0x0;
3289	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3290
3291	if (CHECKBIT(val64, 0x7))
3292		xstats->warn_transceiver_temp_high++;
3293
3294	if (CHECKBIT(val64, 0x6))
3295		xstats->warn_transceiver_temp_low++;
3296
3297	if (CHECKBIT(val64, 0x3))
3298		xstats->warn_laser_bias_current_high++;
3299
3300	if (CHECKBIT(val64, 0x2))
3301		xstats->warn_laser_bias_current_low++;
3302
3303	if (CHECKBIT(val64, 0x1))
3304		xstats->warn_laser_output_power_high++;
3305
3306	if (CHECKBIT(val64, 0x0))
3307		xstats->warn_laser_output_power_low++;
3308}
3309
3310/**
3311 *  wait_for_cmd_complete - waits for a command to complete.
3312 *  @sp : private member of the device structure, which is a pointer to the
3313 *  s2io_nic structure.
3314 *  Description: Function that waits for a command to Write into RMAC
3315 *  ADDR DATA registers to be completed and returns either success or
3316 *  error depending on whether the command was complete or not.
3317 *  Return value:
3318 *   SUCCESS on success and FAILURE on failure.
3319 */
3320
3321static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3322				 int bit_state)
3323{
3324	int ret = FAILURE, cnt = 0, delay = 1;
3325	u64 val64;
3326
3327	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3328		return FAILURE;
3329
3330	do {
3331		val64 = readq(addr);
3332		if (bit_state == S2IO_BIT_RESET) {
3333			if (!(val64 & busy_bit)) {
3334				ret = SUCCESS;
3335				break;
3336			}
3337		} else {
3338			if (val64 & busy_bit) {
3339				ret = SUCCESS;
3340				break;
3341			}
3342		}
3343
3344		if (in_interrupt())
3345			mdelay(delay);
3346		else
3347			msleep(delay);
3348
3349		if (++cnt >= 10)
3350			delay = 50;
3351	} while (cnt < 20);
3352	return ret;
3353}
3354/**
3355 * check_pci_device_id - Checks if the device id is supported
3356 * @id : device id
3357 * Description: Function to check if the pci device id is supported by driver.
3358 * Return value: Actual device id if supported else PCI_ANY_ID
3359 */
3360static u16 check_pci_device_id(u16 id)
3361{
3362	switch (id) {
3363	case PCI_DEVICE_ID_HERC_WIN:
3364	case PCI_DEVICE_ID_HERC_UNI:
3365		return XFRAME_II_DEVICE;
3366	case PCI_DEVICE_ID_S2IO_UNI:
3367	case PCI_DEVICE_ID_S2IO_WIN:
3368		return XFRAME_I_DEVICE;
3369	default:
3370		return PCI_ANY_ID;
3371	}
3372}
3373
3374/**
3375 *  s2io_reset - Resets the card.
3376 *  @sp : private member of the device structure.
3377 *  Description: Function to Reset the card. This function then also
3378 *  restores the previously saved PCI configuration space registers as
3379 *  the card reset also resets the configuration space.
3380 *  Return value:
3381 *  void.
3382 */
3383
3384static void s2io_reset(struct s2io_nic *sp)
3385{
3386	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3387	u64 val64;
3388	u16 subid, pci_cmd;
3389	int i;
3390	u16 val16;
3391	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3392	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3393	struct stat_block *stats;
3394	struct swStat *swstats;
3395
3396	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3397		  __func__, pci_name(sp->pdev));
3398
3399	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3400	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3401
3402	val64 = SW_RESET_ALL;
3403	writeq(val64, &bar0->sw_reset);
3404	if (strstr(sp->product_name, "CX4"))
3405		msleep(750);
3406	msleep(250);
3407	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3408
3409		/* Restore the PCI state saved during initialization. */
3410		pci_restore_state(sp->pdev);
3411		pci_save_state(sp->pdev);
3412		pci_read_config_word(sp->pdev, 0x2, &val16);
3413		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3414			break;
3415		msleep(200);
3416	}
3417
3418	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3419		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3420
3421	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3422
3423	s2io_init_pci(sp);
3424
3425	/* Set swapper to enable I/O register access */
3426	s2io_set_swapper(sp);
3427
3428	/* restore mac_addr entries */
3429	do_s2io_restore_unicast_mc(sp);
3430
3431	/* Restore the MSIX table entries from local variables */
3432	restore_xmsi_data(sp);
3433
3434	/* Clear certain PCI/PCI-X fields after reset */
3435	if (sp->device_type == XFRAME_II_DEVICE) {
3436		/* Clear "detected parity error" bit */
3437		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3438
3439		/* Clearing PCIX Ecc status register */
3440		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3441
3442		/* Clearing PCI_STATUS error reflected here */
3443		writeq(s2BIT(62), &bar0->txpic_int_reg);
3444	}
3445
3446	/* Reset device statistics maintained by OS */
3447	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3448
3449	stats = sp->mac_control.stats_info;
3450	swstats = &stats->sw_stat;
3451
3452	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3453	up_cnt = swstats->link_up_cnt;
3454	down_cnt = swstats->link_down_cnt;
3455	up_time = swstats->link_up_time;
3456	down_time = swstats->link_down_time;
3457	reset_cnt = swstats->soft_reset_cnt;
3458	mem_alloc_cnt = swstats->mem_allocated;
3459	mem_free_cnt = swstats->mem_freed;
3460	watchdog_cnt = swstats->watchdog_timer_cnt;
3461
3462	memset(stats, 0, sizeof(struct stat_block));
3463
3464	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3465	swstats->link_up_cnt = up_cnt;
3466	swstats->link_down_cnt = down_cnt;
3467	swstats->link_up_time = up_time;
3468	swstats->link_down_time = down_time;
3469	swstats->soft_reset_cnt = reset_cnt;
3470	swstats->mem_allocated = mem_alloc_cnt;
3471	swstats->mem_freed = mem_free_cnt;
3472	swstats->watchdog_timer_cnt = watchdog_cnt;
3473
3474	/* SXE-002: Configure link and activity LED to turn it off */
3475	subid = sp->pdev->subsystem_device;
3476	if (((subid & 0xFF) >= 0x07) &&
3477	    (sp->device_type == XFRAME_I_DEVICE)) {
3478		val64 = readq(&bar0->gpio_control);
3479		val64 |= 0x0000800000000000ULL;
3480		writeq(val64, &bar0->gpio_control);
3481		val64 = 0x0411040400000000ULL;
3482		writeq(val64, (void __iomem *)bar0 + 0x2700);
3483	}
3484
3485	/*
3486	 * Clear spurious ECC interrupts that would have occurred on
3487	 * XFRAME II cards after reset.
3488	 */
3489	if (sp->device_type == XFRAME_II_DEVICE) {
3490		val64 = readq(&bar0->pcc_err_reg);
3491		writeq(val64, &bar0->pcc_err_reg);
3492	}
3493
3494	sp->device_enabled_once = false;
3495}
3496
3497/**
3498 *  s2io_set_swapper - to set the swapper controle on the card
3499 *  @sp : private member of the device structure,
3500 *  pointer to the s2io_nic structure.
3501 *  Description: Function to set the swapper control on the card
3502 *  correctly depending on the 'endianness' of the system.
3503 *  Return value:
3504 *  SUCCESS on success and FAILURE on failure.
3505 */
3506
3507static int s2io_set_swapper(struct s2io_nic *sp)
3508{
3509	struct net_device *dev = sp->dev;
3510	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3511	u64 val64, valt, valr;
3512
3513	/*
3514	 * Set proper endian settings and verify the same by reading
3515	 * the PIF Feed-back register.
3516	 */
3517
3518	val64 = readq(&bar0->pif_rd_swapper_fb);
3519	if (val64 != 0x0123456789ABCDEFULL) {
3520		int i = 0;
3521		static const u64 value[] = {
3522			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3523			0x8100008181000081ULL,	/* FE=1, SE=0 */
3524			0x4200004242000042ULL,	/* FE=0, SE=1 */
3525			0			/* FE=0, SE=0 */
3526		};
3527
3528		while (i < 4) {
3529			writeq(value[i], &bar0->swapper_ctrl);
3530			val64 = readq(&bar0->pif_rd_swapper_fb);
3531			if (val64 == 0x0123456789ABCDEFULL)
3532				break;
3533			i++;
3534		}
3535		if (i == 4) {
3536			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3537				  "feedback read %llx\n",
3538				  dev->name, (unsigned long long)val64);
3539			return FAILURE;
3540		}
3541		valr = value[i];
3542	} else {
3543		valr = readq(&bar0->swapper_ctrl);
3544	}
3545
3546	valt = 0x0123456789ABCDEFULL;
3547	writeq(valt, &bar0->xmsi_address);
3548	val64 = readq(&bar0->xmsi_address);
3549
3550	if (val64 != valt) {
3551		int i = 0;
3552		static const u64 value[] = {
3553			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3554			0x0081810000818100ULL,	/* FE=1, SE=0 */
3555			0x0042420000424200ULL,	/* FE=0, SE=1 */
3556			0			/* FE=0, SE=0 */
3557		};
3558
3559		while (i < 4) {
3560			writeq((value[i] | valr), &bar0->swapper_ctrl);
3561			writeq(valt, &bar0->xmsi_address);
3562			val64 = readq(&bar0->xmsi_address);
3563			if (val64 == valt)
3564				break;
3565			i++;
3566		}
3567		if (i == 4) {
3568			unsigned long long x = val64;
3569			DBG_PRINT(ERR_DBG,
3570				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3571			return FAILURE;
3572		}
3573	}
3574	val64 = readq(&bar0->swapper_ctrl);
3575	val64 &= 0xFFFF000000000000ULL;
3576
3577#ifdef __BIG_ENDIAN
3578	/*
3579	 * The device by default set to a big endian format, so a
3580	 * big endian driver need not set anything.
3581	 */
3582	val64 |= (SWAPPER_CTRL_TXP_FE |
3583		  SWAPPER_CTRL_TXP_SE |
3584		  SWAPPER_CTRL_TXD_R_FE |
3585		  SWAPPER_CTRL_TXD_W_FE |
3586		  SWAPPER_CTRL_TXF_R_FE |
3587		  SWAPPER_CTRL_RXD_R_FE |
3588		  SWAPPER_CTRL_RXD_W_FE |
3589		  SWAPPER_CTRL_RXF_W_FE |
3590		  SWAPPER_CTRL_XMSI_FE |
3591		  SWAPPER_CTRL_STATS_FE |
3592		  SWAPPER_CTRL_STATS_SE);
3593	if (sp->config.intr_type == INTA)
3594		val64 |= SWAPPER_CTRL_XMSI_SE;
3595	writeq(val64, &bar0->swapper_ctrl);
3596#else
3597	/*
3598	 * Initially we enable all bits to make it accessible by the
3599	 * driver, then we selectively enable only those bits that
3600	 * we want to set.
3601	 */
3602	val64 |= (SWAPPER_CTRL_TXP_FE |
3603		  SWAPPER_CTRL_TXP_SE |
3604		  SWAPPER_CTRL_TXD_R_FE |
3605		  SWAPPER_CTRL_TXD_R_SE |
3606		  SWAPPER_CTRL_TXD_W_FE |
3607		  SWAPPER_CTRL_TXD_W_SE |
3608		  SWAPPER_CTRL_TXF_R_FE |
3609		  SWAPPER_CTRL_RXD_R_FE |
3610		  SWAPPER_CTRL_RXD_R_SE |
3611		  SWAPPER_CTRL_RXD_W_FE |
3612		  SWAPPER_CTRL_RXD_W_SE |
3613		  SWAPPER_CTRL_RXF_W_FE |
3614		  SWAPPER_CTRL_XMSI_FE |
3615		  SWAPPER_CTRL_STATS_FE |
3616		  SWAPPER_CTRL_STATS_SE);
3617	if (sp->config.intr_type == INTA)
3618		val64 |= SWAPPER_CTRL_XMSI_SE;
3619	writeq(val64, &bar0->swapper_ctrl);
3620#endif
3621	val64 = readq(&bar0->swapper_ctrl);
3622
3623	/*
3624	 * Verifying if endian settings are accurate by reading a
3625	 * feedback register.
3626	 */
3627	val64 = readq(&bar0->pif_rd_swapper_fb);
3628	if (val64 != 0x0123456789ABCDEFULL) {
3629		/* Endian settings are incorrect, calls for another dekko. */
3630		DBG_PRINT(ERR_DBG,
3631			  "%s: Endian settings are wrong, feedback read %llx\n",
3632			  dev->name, (unsigned long long)val64);
3633		return FAILURE;
3634	}
3635
3636	return SUCCESS;
3637}
3638
3639static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3640{
3641	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3642	u64 val64;
3643	int ret = 0, cnt = 0;
3644
3645	do {
3646		val64 = readq(&bar0->xmsi_access);
3647		if (!(val64 & s2BIT(15)))
3648			break;
3649		mdelay(1);
3650		cnt++;
3651	} while (cnt < 5);
3652	if (cnt == 5) {
3653		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3654		ret = 1;
3655	}
3656
3657	return ret;
3658}
3659
3660static void restore_xmsi_data(struct s2io_nic *nic)
3661{
3662	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3663	u64 val64;
3664	int i, msix_index;
3665
3666	if (nic->device_type == XFRAME_I_DEVICE)
3667		return;
3668
3669	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3670		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3671		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3672		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3673		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3674		writeq(val64, &bar0->xmsi_access);
3675		if (wait_for_msix_trans(nic, msix_index))
3676			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3677				  __func__, msix_index);
 
 
3678	}
3679}
3680
3681static void store_xmsi_data(struct s2io_nic *nic)
3682{
3683	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3684	u64 val64, addr, data;
3685	int i, msix_index;
3686
3687	if (nic->device_type == XFRAME_I_DEVICE)
3688		return;
3689
3690	/* Store and display */
3691	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3692		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3693		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3694		writeq(val64, &bar0->xmsi_access);
3695		if (wait_for_msix_trans(nic, msix_index)) {
3696			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3697				  __func__, msix_index);
3698			continue;
3699		}
3700		addr = readq(&bar0->xmsi_address);
3701		data = readq(&bar0->xmsi_data);
3702		if (addr && data) {
3703			nic->msix_info[i].addr = addr;
3704			nic->msix_info[i].data = data;
3705		}
3706	}
3707}
3708
3709static int s2io_enable_msi_x(struct s2io_nic *nic)
3710{
3711	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712	u64 rx_mat;
3713	u16 msi_control; /* Temp variable */
3714	int ret, i, j, msix_indx = 1;
3715	int size;
3716	struct stat_block *stats = nic->mac_control.stats_info;
3717	struct swStat *swstats = &stats->sw_stat;
3718
3719	size = nic->num_entries * sizeof(struct msix_entry);
3720	nic->entries = kzalloc(size, GFP_KERNEL);
3721	if (!nic->entries) {
3722		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3723			  __func__);
3724		swstats->mem_alloc_fail_cnt++;
3725		return -ENOMEM;
3726	}
3727	swstats->mem_allocated += size;
3728
3729	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3730	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3731	if (!nic->s2io_entries) {
3732		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3733			  __func__);
3734		swstats->mem_alloc_fail_cnt++;
3735		kfree(nic->entries);
3736		swstats->mem_freed
3737			+= (nic->num_entries * sizeof(struct msix_entry));
3738		return -ENOMEM;
3739	}
3740	swstats->mem_allocated += size;
3741
3742	nic->entries[0].entry = 0;
3743	nic->s2io_entries[0].entry = 0;
3744	nic->s2io_entries[0].in_use = MSIX_FLG;
3745	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3746	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3747
3748	for (i = 1; i < nic->num_entries; i++) {
3749		nic->entries[i].entry = ((i - 1) * 8) + 1;
3750		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3751		nic->s2io_entries[i].arg = NULL;
3752		nic->s2io_entries[i].in_use = 0;
3753	}
3754
3755	rx_mat = readq(&bar0->rx_mat);
3756	for (j = 0; j < nic->config.rx_ring_num; j++) {
3757		rx_mat |= RX_MAT_SET(j, msix_indx);
3758		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3759		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3760		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3761		msix_indx += 8;
3762	}
3763	writeq(rx_mat, &bar0->rx_mat);
3764	readq(&bar0->rx_mat);
3765
3766	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3767				    nic->num_entries, nic->num_entries);
3768	/* We fail init if error or we get less vectors than min required */
3769	if (ret < 0) {
3770		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3771		kfree(nic->entries);
3772		swstats->mem_freed += nic->num_entries *
3773			sizeof(struct msix_entry);
3774		kfree(nic->s2io_entries);
3775		swstats->mem_freed += nic->num_entries *
3776			sizeof(struct s2io_msix_entry);
3777		nic->entries = NULL;
3778		nic->s2io_entries = NULL;
3779		return -ENOMEM;
3780	}
3781
3782	/*
3783	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3784	 * in the herc NIC. (Temp change, needs to be removed later)
3785	 */
3786	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3787	msi_control |= 0x1; /* Enable MSI */
3788	pci_write_config_word(nic->pdev, 0x42, msi_control);
3789
3790	return 0;
3791}
3792
3793/* Handle software interrupt used during MSI(X) test */
3794static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3795{
3796	struct s2io_nic *sp = dev_id;
3797
3798	sp->msi_detected = 1;
3799	wake_up(&sp->msi_wait);
3800
3801	return IRQ_HANDLED;
3802}
3803
3804/* Test interrupt path by forcing a a software IRQ */
3805static int s2io_test_msi(struct s2io_nic *sp)
3806{
3807	struct pci_dev *pdev = sp->pdev;
3808	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3809	int err;
3810	u64 val64, saved64;
3811
3812	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3813			  sp->name, sp);
3814	if (err) {
3815		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3816			  sp->dev->name, pci_name(pdev), pdev->irq);
3817		return err;
3818	}
3819
3820	init_waitqueue_head(&sp->msi_wait);
3821	sp->msi_detected = 0;
3822
3823	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3824	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3825	val64 |= SCHED_INT_CTRL_TIMER_EN;
3826	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3827	writeq(val64, &bar0->scheduled_int_ctrl);
3828
3829	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3830
3831	if (!sp->msi_detected) {
3832		/* MSI(X) test failed, go back to INTx mode */
3833		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3834			  "using MSI(X) during test\n",
3835			  sp->dev->name, pci_name(pdev));
3836
3837		err = -EOPNOTSUPP;
3838	}
3839
3840	free_irq(sp->entries[1].vector, sp);
3841
3842	writeq(saved64, &bar0->scheduled_int_ctrl);
3843
3844	return err;
3845}
3846
3847static void remove_msix_isr(struct s2io_nic *sp)
3848{
3849	int i;
3850	u16 msi_control;
3851
3852	for (i = 0; i < sp->num_entries; i++) {
3853		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3854			int vector = sp->entries[i].vector;
3855			void *arg = sp->s2io_entries[i].arg;
3856			free_irq(vector, arg);
3857		}
3858	}
3859
3860	kfree(sp->entries);
3861	kfree(sp->s2io_entries);
3862	sp->entries = NULL;
3863	sp->s2io_entries = NULL;
3864
3865	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3866	msi_control &= 0xFFFE; /* Disable MSI */
3867	pci_write_config_word(sp->pdev, 0x42, msi_control);
3868
3869	pci_disable_msix(sp->pdev);
3870}
3871
3872static void remove_inta_isr(struct s2io_nic *sp)
3873{
3874	free_irq(sp->pdev->irq, sp->dev);
3875}
3876
3877/* ********************************************************* *
3878 * Functions defined below concern the OS part of the driver *
3879 * ********************************************************* */
3880
3881/**
3882 *  s2io_open - open entry point of the driver
3883 *  @dev : pointer to the device structure.
3884 *  Description:
3885 *  This function is the open entry point of the driver. It mainly calls a
3886 *  function to allocate Rx buffers and inserts them into the buffer
3887 *  descriptors and then enables the Rx part of the NIC.
3888 *  Return value:
3889 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3890 *   file on failure.
3891 */
3892
3893static int s2io_open(struct net_device *dev)
3894{
3895	struct s2io_nic *sp = netdev_priv(dev);
3896	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3897	int err = 0;
3898
3899	/*
3900	 * Make sure you have link off by default every time
3901	 * Nic is initialized
3902	 */
3903	netif_carrier_off(dev);
3904	sp->last_link_state = 0;
3905
3906	/* Initialize H/W and enable interrupts */
3907	err = s2io_card_up(sp);
3908	if (err) {
3909		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3910			  dev->name);
3911		goto hw_init_failed;
3912	}
3913
3914	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3915		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3916		s2io_card_down(sp);
3917		err = -ENODEV;
3918		goto hw_init_failed;
3919	}
3920	s2io_start_all_tx_queue(sp);
3921	return 0;
3922
3923hw_init_failed:
3924	if (sp->config.intr_type == MSI_X) {
3925		if (sp->entries) {
3926			kfree(sp->entries);
3927			swstats->mem_freed += sp->num_entries *
3928				sizeof(struct msix_entry);
3929		}
3930		if (sp->s2io_entries) {
3931			kfree(sp->s2io_entries);
3932			swstats->mem_freed += sp->num_entries *
3933				sizeof(struct s2io_msix_entry);
3934		}
3935	}
3936	return err;
3937}
3938
3939/**
3940 *  s2io_close -close entry point of the driver
3941 *  @dev : device pointer.
3942 *  Description:
3943 *  This is the stop entry point of the driver. It needs to undo exactly
3944 *  whatever was done by the open entry point,thus it's usually referred to
3945 *  as the close function.Among other things this function mainly stops the
3946 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3947 *  Return value:
3948 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3949 *  file on failure.
3950 */
3951
3952static int s2io_close(struct net_device *dev)
3953{
3954	struct s2io_nic *sp = netdev_priv(dev);
3955	struct config_param *config = &sp->config;
3956	u64 tmp64;
3957	int offset;
3958
3959	/* Return if the device is already closed               *
3960	 *  Can happen when s2io_card_up failed in change_mtu    *
3961	 */
3962	if (!is_s2io_card_up(sp))
3963		return 0;
3964
3965	s2io_stop_all_tx_queue(sp);
3966	/* delete all populated mac entries */
3967	for (offset = 1; offset < config->max_mc_addr; offset++) {
3968		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3969		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3970			do_s2io_delete_unicast_mc(sp, tmp64);
3971	}
3972
3973	s2io_card_down(sp);
3974
3975	return 0;
3976}
3977
3978/**
3979 *  s2io_xmit - Tx entry point of te driver
3980 *  @skb : the socket buffer containing the Tx data.
3981 *  @dev : device pointer.
3982 *  Description :
3983 *  This function is the Tx entry point of the driver. S2IO NIC supports
3984 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
3985 *  NOTE: when device can't queue the pkt,just the trans_start variable will
3986 *  not be upadted.
3987 *  Return value:
3988 *  0 on success & 1 on failure.
3989 */
3990
3991static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3992{
3993	struct s2io_nic *sp = netdev_priv(dev);
3994	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3995	register u64 val64;
3996	struct TxD *txdp;
3997	struct TxFIFO_element __iomem *tx_fifo;
3998	unsigned long flags = 0;
3999	u16 vlan_tag = 0;
4000	struct fifo_info *fifo = NULL;
 
4001	int offload_type;
4002	int enable_per_list_interrupt = 0;
4003	struct config_param *config = &sp->config;
4004	struct mac_info *mac_control = &sp->mac_control;
4005	struct stat_block *stats = mac_control->stats_info;
4006	struct swStat *swstats = &stats->sw_stat;
4007
4008	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4009
4010	if (unlikely(skb->len <= 0)) {
4011		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4012		dev_kfree_skb_any(skb);
4013		return NETDEV_TX_OK;
4014	}
4015
4016	if (!is_s2io_card_up(sp)) {
4017		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4018			  dev->name);
4019		dev_kfree_skb_any(skb);
4020		return NETDEV_TX_OK;
4021	}
4022
4023	queue = 0;
4024	if (skb_vlan_tag_present(skb))
4025		vlan_tag = skb_vlan_tag_get(skb);
4026	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4027		if (skb->protocol == htons(ETH_P_IP)) {
4028			struct iphdr *ip;
4029			struct tcphdr *th;
4030			ip = ip_hdr(skb);
4031
4032			if (!ip_is_fragment(ip)) {
4033				th = (struct tcphdr *)(((unsigned char *)ip) +
4034						       ip->ihl*4);
4035
4036				if (ip->protocol == IPPROTO_TCP) {
4037					queue_len = sp->total_tcp_fifos;
4038					queue = (ntohs(th->source) +
4039						 ntohs(th->dest)) &
4040						sp->fifo_selector[queue_len - 1];
4041					if (queue >= queue_len)
4042						queue = queue_len - 1;
4043				} else if (ip->protocol == IPPROTO_UDP) {
4044					queue_len = sp->total_udp_fifos;
4045					queue = (ntohs(th->source) +
4046						 ntohs(th->dest)) &
4047						sp->fifo_selector[queue_len - 1];
4048					if (queue >= queue_len)
4049						queue = queue_len - 1;
4050					queue += sp->udp_fifo_idx;
4051					if (skb->len > 1024)
4052						enable_per_list_interrupt = 1;
 
4053				}
4054			}
4055		}
4056	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4057		/* get fifo number based on skb->priority value */
4058		queue = config->fifo_mapping
4059			[skb->priority & (MAX_TX_FIFOS - 1)];
4060	fifo = &mac_control->fifos[queue];
4061
4062	spin_lock_irqsave(&fifo->tx_lock, flags);
 
 
 
 
 
4063
4064	if (sp->config.multiq) {
4065		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4066			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4067			return NETDEV_TX_BUSY;
4068		}
4069	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4070		if (netif_queue_stopped(dev)) {
4071			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4072			return NETDEV_TX_BUSY;
4073		}
4074	}
4075
4076	put_off = (u16)fifo->tx_curr_put_info.offset;
4077	get_off = (u16)fifo->tx_curr_get_info.offset;
4078	txdp = fifo->list_info[put_off].list_virt_addr;
4079
4080	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4081	/* Avoid "put" pointer going beyond "get" pointer */
4082	if (txdp->Host_Control ||
4083	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4084		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4085		s2io_stop_tx_queue(sp, fifo->fifo_no);
4086		dev_kfree_skb_any(skb);
4087		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4088		return NETDEV_TX_OK;
4089	}
4090
4091	offload_type = s2io_offload_type(skb);
4092	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4093		txdp->Control_1 |= TXD_TCP_LSO_EN;
4094		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4095	}
4096	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4097		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4098				    TXD_TX_CKO_TCP_EN |
4099				    TXD_TX_CKO_UDP_EN);
4100	}
4101	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4102	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4103	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4104	if (enable_per_list_interrupt)
4105		if (put_off & (queue_len >> 5))
4106			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4107	if (vlan_tag) {
4108		txdp->Control_2 |= TXD_VLAN_ENABLE;
4109		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4110	}
4111
4112	frg_len = skb_headlen(skb);
4113	txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4114					      frg_len, DMA_TO_DEVICE);
4115	if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4116		goto pci_map_failed;
4117
4118	txdp->Host_Control = (unsigned long)skb;
4119	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
 
 
4120
4121	frg_cnt = skb_shinfo(skb)->nr_frags;
4122	/* For fragmented SKB. */
4123	for (i = 0; i < frg_cnt; i++) {
4124		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4125		/* A '0' length fragment will be ignored */
4126		if (!skb_frag_size(frag))
4127			continue;
4128		txdp++;
4129		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4130							     frag, 0,
4131							     skb_frag_size(frag),
4132							     DMA_TO_DEVICE);
4133		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
 
 
4134	}
4135	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4136
 
 
 
4137	tx_fifo = mac_control->tx_FIFO_start[queue];
4138	val64 = fifo->list_info[put_off].list_phy_addr;
4139	writeq(val64, &tx_fifo->TxDL_Pointer);
4140
4141	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4142		 TX_FIFO_LAST_LIST);
4143	if (offload_type)
4144		val64 |= TX_FIFO_SPECIAL_FUNC;
4145
4146	writeq(val64, &tx_fifo->List_Control);
4147
 
 
4148	put_off++;
4149	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4150		put_off = 0;
4151	fifo->tx_curr_put_info.offset = put_off;
4152
4153	/* Avoid "put" pointer going beyond "get" pointer */
4154	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4155		swstats->fifo_full_cnt++;
4156		DBG_PRINT(TX_DBG,
4157			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4158			  put_off, get_off);
4159		s2io_stop_tx_queue(sp, fifo->fifo_no);
4160	}
4161	swstats->mem_allocated += skb->truesize;
4162	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4163
4164	if (sp->config.intr_type == MSI_X)
4165		tx_intr_handler(fifo);
4166
4167	return NETDEV_TX_OK;
4168
4169pci_map_failed:
4170	swstats->pci_map_fail_cnt++;
4171	s2io_stop_tx_queue(sp, fifo->fifo_no);
4172	swstats->mem_freed += skb->truesize;
4173	dev_kfree_skb_any(skb);
4174	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4175	return NETDEV_TX_OK;
4176}
4177
4178static void
4179s2io_alarm_handle(struct timer_list *t)
4180{
4181	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4182	struct net_device *dev = sp->dev;
4183
4184	s2io_handle_errors(dev);
4185	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4186}
4187
4188static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4189{
4190	struct ring_info *ring = (struct ring_info *)dev_id;
4191	struct s2io_nic *sp = ring->nic;
4192	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4193
4194	if (unlikely(!is_s2io_card_up(sp)))
4195		return IRQ_HANDLED;
4196
4197	if (sp->config.napi) {
4198		u8 __iomem *addr = NULL;
4199		u8 val8 = 0;
4200
4201		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4202		addr += (7 - ring->ring_no);
4203		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4204		writeb(val8, addr);
4205		val8 = readb(addr);
4206		napi_schedule(&ring->napi);
4207	} else {
4208		rx_intr_handler(ring, 0);
4209		s2io_chk_rx_buffers(sp, ring);
4210	}
4211
4212	return IRQ_HANDLED;
4213}
4214
4215static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4216{
4217	int i;
4218	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4219	struct s2io_nic *sp = fifos->nic;
4220	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4221	struct config_param *config  = &sp->config;
4222	u64 reason;
4223
4224	if (unlikely(!is_s2io_card_up(sp)))
4225		return IRQ_NONE;
4226
4227	reason = readq(&bar0->general_int_status);
4228	if (unlikely(reason == S2IO_MINUS_ONE))
4229		/* Nothing much can be done. Get out */
4230		return IRQ_HANDLED;
4231
4232	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4233		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4234
4235		if (reason & GEN_INTR_TXPIC)
4236			s2io_txpic_intr_handle(sp);
4237
4238		if (reason & GEN_INTR_TXTRAFFIC)
4239			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4240
4241		for (i = 0; i < config->tx_fifo_num; i++)
4242			tx_intr_handler(&fifos[i]);
4243
4244		writeq(sp->general_int_mask, &bar0->general_int_mask);
4245		readl(&bar0->general_int_status);
4246		return IRQ_HANDLED;
4247	}
4248	/* The interrupt was not raised by us */
4249	return IRQ_NONE;
4250}
4251
4252static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4253{
4254	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4255	u64 val64;
4256
4257	val64 = readq(&bar0->pic_int_status);
4258	if (val64 & PIC_INT_GPIO) {
4259		val64 = readq(&bar0->gpio_int_reg);
4260		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4261		    (val64 & GPIO_INT_REG_LINK_UP)) {
4262			/*
4263			 * This is unstable state so clear both up/down
4264			 * interrupt and adapter to re-evaluate the link state.
4265			 */
4266			val64 |= GPIO_INT_REG_LINK_DOWN;
4267			val64 |= GPIO_INT_REG_LINK_UP;
4268			writeq(val64, &bar0->gpio_int_reg);
4269			val64 = readq(&bar0->gpio_int_mask);
4270			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4271				   GPIO_INT_MASK_LINK_DOWN);
4272			writeq(val64, &bar0->gpio_int_mask);
4273		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4274			val64 = readq(&bar0->adapter_status);
4275			/* Enable Adapter */
4276			val64 = readq(&bar0->adapter_control);
4277			val64 |= ADAPTER_CNTL_EN;
4278			writeq(val64, &bar0->adapter_control);
4279			val64 |= ADAPTER_LED_ON;
4280			writeq(val64, &bar0->adapter_control);
4281			if (!sp->device_enabled_once)
4282				sp->device_enabled_once = 1;
4283
4284			s2io_link(sp, LINK_UP);
4285			/*
4286			 * unmask link down interrupt and mask link-up
4287			 * intr
4288			 */
4289			val64 = readq(&bar0->gpio_int_mask);
4290			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4291			val64 |= GPIO_INT_MASK_LINK_UP;
4292			writeq(val64, &bar0->gpio_int_mask);
4293
4294		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4295			val64 = readq(&bar0->adapter_status);
4296			s2io_link(sp, LINK_DOWN);
4297			/* Link is down so unmaks link up interrupt */
4298			val64 = readq(&bar0->gpio_int_mask);
4299			val64 &= ~GPIO_INT_MASK_LINK_UP;
4300			val64 |= GPIO_INT_MASK_LINK_DOWN;
4301			writeq(val64, &bar0->gpio_int_mask);
4302
4303			/* turn off LED */
4304			val64 = readq(&bar0->adapter_control);
4305			val64 = val64 & (~ADAPTER_LED_ON);
4306			writeq(val64, &bar0->adapter_control);
4307		}
4308	}
4309	val64 = readq(&bar0->gpio_int_mask);
4310}
4311
4312/**
4313 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4314 *  @value: alarm bits
4315 *  @addr: address value
4316 *  @cnt: counter variable
4317 *  Description: Check for alarm and increment the counter
4318 *  Return Value:
4319 *  1 - if alarm bit set
4320 *  0 - if alarm bit is not set
4321 */
4322static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4323				 unsigned long long *cnt)
4324{
4325	u64 val64;
4326	val64 = readq(addr);
4327	if (val64 & value) {
4328		writeq(val64, addr);
4329		(*cnt)++;
4330		return 1;
4331	}
4332	return 0;
4333
4334}
4335
4336/**
4337 *  s2io_handle_errors - Xframe error indication handler
4338 *  @nic: device private variable
4339 *  Description: Handle alarms such as loss of link, single or
4340 *  double ECC errors, critical and serious errors.
4341 *  Return Value:
4342 *  NONE
4343 */
4344static void s2io_handle_errors(void *dev_id)
4345{
4346	struct net_device *dev = (struct net_device *)dev_id;
4347	struct s2io_nic *sp = netdev_priv(dev);
4348	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4349	u64 temp64 = 0, val64 = 0;
4350	int i = 0;
4351
4352	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4353	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4354
4355	if (!is_s2io_card_up(sp))
4356		return;
4357
4358	if (pci_channel_offline(sp->pdev))
4359		return;
4360
4361	memset(&sw_stat->ring_full_cnt, 0,
4362	       sizeof(sw_stat->ring_full_cnt));
4363
4364	/* Handling the XPAK counters update */
4365	if (stats->xpak_timer_count < 72000) {
4366		/* waiting for an hour */
4367		stats->xpak_timer_count++;
4368	} else {
4369		s2io_updt_xpak_counter(dev);
4370		/* reset the count to zero */
4371		stats->xpak_timer_count = 0;
4372	}
4373
4374	/* Handling link status change error Intr */
4375	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4376		val64 = readq(&bar0->mac_rmac_err_reg);
4377		writeq(val64, &bar0->mac_rmac_err_reg);
4378		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4379			schedule_work(&sp->set_link_task);
4380	}
4381
4382	/* In case of a serious error, the device will be Reset. */
4383	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4384				  &sw_stat->serious_err_cnt))
4385		goto reset;
4386
4387	/* Check for data parity error */
4388	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4389				  &sw_stat->parity_err_cnt))
4390		goto reset;
4391
4392	/* Check for ring full counter */
4393	if (sp->device_type == XFRAME_II_DEVICE) {
4394		val64 = readq(&bar0->ring_bump_counter1);
4395		for (i = 0; i < 4; i++) {
4396			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4397			temp64 >>= 64 - ((i+1)*16);
4398			sw_stat->ring_full_cnt[i] += temp64;
4399		}
4400
4401		val64 = readq(&bar0->ring_bump_counter2);
4402		for (i = 0; i < 4; i++) {
4403			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4404			temp64 >>= 64 - ((i+1)*16);
4405			sw_stat->ring_full_cnt[i+4] += temp64;
4406		}
4407	}
4408
4409	val64 = readq(&bar0->txdma_int_status);
4410	/*check for pfc_err*/
4411	if (val64 & TXDMA_PFC_INT) {
4412		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4413					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4414					  PFC_PCIX_ERR,
4415					  &bar0->pfc_err_reg,
4416					  &sw_stat->pfc_err_cnt))
4417			goto reset;
4418		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4419				      &bar0->pfc_err_reg,
4420				      &sw_stat->pfc_err_cnt);
4421	}
4422
4423	/*check for tda_err*/
4424	if (val64 & TXDMA_TDA_INT) {
4425		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4426					  TDA_SM0_ERR_ALARM |
4427					  TDA_SM1_ERR_ALARM,
4428					  &bar0->tda_err_reg,
4429					  &sw_stat->tda_err_cnt))
4430			goto reset;
4431		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4432				      &bar0->tda_err_reg,
4433				      &sw_stat->tda_err_cnt);
4434	}
4435	/*check for pcc_err*/
4436	if (val64 & TXDMA_PCC_INT) {
4437		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4438					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4439					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4440					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4441					  PCC_TXB_ECC_DB_ERR,
4442					  &bar0->pcc_err_reg,
4443					  &sw_stat->pcc_err_cnt))
4444			goto reset;
4445		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4446				      &bar0->pcc_err_reg,
4447				      &sw_stat->pcc_err_cnt);
4448	}
4449
4450	/*check for tti_err*/
4451	if (val64 & TXDMA_TTI_INT) {
4452		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4453					  &bar0->tti_err_reg,
4454					  &sw_stat->tti_err_cnt))
4455			goto reset;
4456		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4457				      &bar0->tti_err_reg,
4458				      &sw_stat->tti_err_cnt);
4459	}
4460
4461	/*check for lso_err*/
4462	if (val64 & TXDMA_LSO_INT) {
4463		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4464					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4465					  &bar0->lso_err_reg,
4466					  &sw_stat->lso_err_cnt))
4467			goto reset;
4468		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4469				      &bar0->lso_err_reg,
4470				      &sw_stat->lso_err_cnt);
4471	}
4472
4473	/*check for tpa_err*/
4474	if (val64 & TXDMA_TPA_INT) {
4475		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4476					  &bar0->tpa_err_reg,
4477					  &sw_stat->tpa_err_cnt))
4478			goto reset;
4479		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4480				      &bar0->tpa_err_reg,
4481				      &sw_stat->tpa_err_cnt);
4482	}
4483
4484	/*check for sm_err*/
4485	if (val64 & TXDMA_SM_INT) {
4486		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4487					  &bar0->sm_err_reg,
4488					  &sw_stat->sm_err_cnt))
4489			goto reset;
4490	}
4491
4492	val64 = readq(&bar0->mac_int_status);
4493	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4494		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4495					  &bar0->mac_tmac_err_reg,
4496					  &sw_stat->mac_tmac_err_cnt))
4497			goto reset;
4498		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4499				      TMAC_DESC_ECC_SG_ERR |
4500				      TMAC_DESC_ECC_DB_ERR,
4501				      &bar0->mac_tmac_err_reg,
4502				      &sw_stat->mac_tmac_err_cnt);
4503	}
4504
4505	val64 = readq(&bar0->xgxs_int_status);
4506	if (val64 & XGXS_INT_STATUS_TXGXS) {
4507		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4508					  &bar0->xgxs_txgxs_err_reg,
4509					  &sw_stat->xgxs_txgxs_err_cnt))
4510			goto reset;
4511		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4512				      &bar0->xgxs_txgxs_err_reg,
4513				      &sw_stat->xgxs_txgxs_err_cnt);
4514	}
4515
4516	val64 = readq(&bar0->rxdma_int_status);
4517	if (val64 & RXDMA_INT_RC_INT_M) {
4518		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4519					  RC_FTC_ECC_DB_ERR |
4520					  RC_PRCn_SM_ERR_ALARM |
4521					  RC_FTC_SM_ERR_ALARM,
4522					  &bar0->rc_err_reg,
4523					  &sw_stat->rc_err_cnt))
4524			goto reset;
4525		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4526				      RC_FTC_ECC_SG_ERR |
4527				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4528				      &sw_stat->rc_err_cnt);
4529		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4530					  PRC_PCI_AB_WR_Rn |
4531					  PRC_PCI_AB_F_WR_Rn,
4532					  &bar0->prc_pcix_err_reg,
4533					  &sw_stat->prc_pcix_err_cnt))
4534			goto reset;
4535		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4536				      PRC_PCI_DP_WR_Rn |
4537				      PRC_PCI_DP_F_WR_Rn,
4538				      &bar0->prc_pcix_err_reg,
4539				      &sw_stat->prc_pcix_err_cnt);
4540	}
4541
4542	if (val64 & RXDMA_INT_RPA_INT_M) {
4543		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4544					  &bar0->rpa_err_reg,
4545					  &sw_stat->rpa_err_cnt))
4546			goto reset;
4547		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4548				      &bar0->rpa_err_reg,
4549				      &sw_stat->rpa_err_cnt);
4550	}
4551
4552	if (val64 & RXDMA_INT_RDA_INT_M) {
4553		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4554					  RDA_FRM_ECC_DB_N_AERR |
4555					  RDA_SM1_ERR_ALARM |
4556					  RDA_SM0_ERR_ALARM |
4557					  RDA_RXD_ECC_DB_SERR,
4558					  &bar0->rda_err_reg,
4559					  &sw_stat->rda_err_cnt))
4560			goto reset;
4561		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4562				      RDA_FRM_ECC_SG_ERR |
4563				      RDA_MISC_ERR |
4564				      RDA_PCIX_ERR,
4565				      &bar0->rda_err_reg,
4566				      &sw_stat->rda_err_cnt);
4567	}
4568
4569	if (val64 & RXDMA_INT_RTI_INT_M) {
4570		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4571					  &bar0->rti_err_reg,
4572					  &sw_stat->rti_err_cnt))
4573			goto reset;
4574		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4575				      &bar0->rti_err_reg,
4576				      &sw_stat->rti_err_cnt);
4577	}
4578
4579	val64 = readq(&bar0->mac_int_status);
4580	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4581		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4582					  &bar0->mac_rmac_err_reg,
4583					  &sw_stat->mac_rmac_err_cnt))
4584			goto reset;
4585		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4586				      RMAC_SINGLE_ECC_ERR |
4587				      RMAC_DOUBLE_ECC_ERR,
4588				      &bar0->mac_rmac_err_reg,
4589				      &sw_stat->mac_rmac_err_cnt);
4590	}
4591
4592	val64 = readq(&bar0->xgxs_int_status);
4593	if (val64 & XGXS_INT_STATUS_RXGXS) {
4594		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4595					  &bar0->xgxs_rxgxs_err_reg,
4596					  &sw_stat->xgxs_rxgxs_err_cnt))
4597			goto reset;
4598	}
4599
4600	val64 = readq(&bar0->mc_int_status);
4601	if (val64 & MC_INT_STATUS_MC_INT) {
4602		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4603					  &bar0->mc_err_reg,
4604					  &sw_stat->mc_err_cnt))
4605			goto reset;
4606
4607		/* Handling Ecc errors */
4608		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4609			writeq(val64, &bar0->mc_err_reg);
4610			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4611				sw_stat->double_ecc_errs++;
4612				if (sp->device_type != XFRAME_II_DEVICE) {
4613					/*
4614					 * Reset XframeI only if critical error
4615					 */
4616					if (val64 &
4617					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4618					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4619						goto reset;
4620				}
4621			} else
4622				sw_stat->single_ecc_errs++;
4623		}
4624	}
4625	return;
4626
4627reset:
4628	s2io_stop_all_tx_queue(sp);
4629	schedule_work(&sp->rst_timer_task);
4630	sw_stat->soft_reset_cnt++;
4631}
4632
4633/**
4634 *  s2io_isr - ISR handler of the device .
4635 *  @irq: the irq of the device.
4636 *  @dev_id: a void pointer to the dev structure of the NIC.
4637 *  Description:  This function is the ISR handler of the device. It
4638 *  identifies the reason for the interrupt and calls the relevant
4639 *  service routines. As a contongency measure, this ISR allocates the
4640 *  recv buffers, if their numbers are below the panic value which is
4641 *  presently set to 25% of the original number of rcv buffers allocated.
4642 *  Return value:
4643 *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4644 *   IRQ_NONE: will be returned if interrupt is not from our device
4645 */
4646static irqreturn_t s2io_isr(int irq, void *dev_id)
4647{
4648	struct net_device *dev = (struct net_device *)dev_id;
4649	struct s2io_nic *sp = netdev_priv(dev);
4650	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4651	int i;
4652	u64 reason = 0;
4653	struct mac_info *mac_control;
4654	struct config_param *config;
4655
4656	/* Pretend we handled any irq's from a disconnected card */
4657	if (pci_channel_offline(sp->pdev))
4658		return IRQ_NONE;
4659
4660	if (!is_s2io_card_up(sp))
4661		return IRQ_NONE;
4662
4663	config = &sp->config;
4664	mac_control = &sp->mac_control;
4665
4666	/*
4667	 * Identify the cause for interrupt and call the appropriate
4668	 * interrupt handler. Causes for the interrupt could be;
4669	 * 1. Rx of packet.
4670	 * 2. Tx complete.
4671	 * 3. Link down.
4672	 */
4673	reason = readq(&bar0->general_int_status);
4674
4675	if (unlikely(reason == S2IO_MINUS_ONE))
4676		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4677
4678	if (reason &
4679	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4680		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4681
4682		if (config->napi) {
4683			if (reason & GEN_INTR_RXTRAFFIC) {
4684				napi_schedule(&sp->napi);
4685				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4686				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4687				readl(&bar0->rx_traffic_int);
4688			}
4689		} else {
4690			/*
4691			 * rx_traffic_int reg is an R1 register, writing all 1's
4692			 * will ensure that the actual interrupt causing bit
4693			 * get's cleared and hence a read can be avoided.
4694			 */
4695			if (reason & GEN_INTR_RXTRAFFIC)
4696				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4697
4698			for (i = 0; i < config->rx_ring_num; i++) {
4699				struct ring_info *ring = &mac_control->rings[i];
4700
4701				rx_intr_handler(ring, 0);
4702			}
4703		}
4704
4705		/*
4706		 * tx_traffic_int reg is an R1 register, writing all 1's
4707		 * will ensure that the actual interrupt causing bit get's
4708		 * cleared and hence a read can be avoided.
4709		 */
4710		if (reason & GEN_INTR_TXTRAFFIC)
4711			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4712
4713		for (i = 0; i < config->tx_fifo_num; i++)
4714			tx_intr_handler(&mac_control->fifos[i]);
4715
4716		if (reason & GEN_INTR_TXPIC)
4717			s2io_txpic_intr_handle(sp);
4718
4719		/*
4720		 * Reallocate the buffers from the interrupt handler itself.
4721		 */
4722		if (!config->napi) {
4723			for (i = 0; i < config->rx_ring_num; i++) {
4724				struct ring_info *ring = &mac_control->rings[i];
4725
4726				s2io_chk_rx_buffers(sp, ring);
4727			}
4728		}
4729		writeq(sp->general_int_mask, &bar0->general_int_mask);
4730		readl(&bar0->general_int_status);
4731
4732		return IRQ_HANDLED;
4733
4734	} else if (!reason) {
4735		/* The interrupt was not raised by us */
4736		return IRQ_NONE;
4737	}
4738
4739	return IRQ_HANDLED;
4740}
4741
4742/**
4743 * s2io_updt_stats -
4744 */
4745static void s2io_updt_stats(struct s2io_nic *sp)
4746{
4747	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4748	u64 val64;
4749	int cnt = 0;
4750
4751	if (is_s2io_card_up(sp)) {
4752		/* Apprx 30us on a 133 MHz bus */
4753		val64 = SET_UPDT_CLICKS(10) |
4754			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4755		writeq(val64, &bar0->stat_cfg);
4756		do {
4757			udelay(100);
4758			val64 = readq(&bar0->stat_cfg);
4759			if (!(val64 & s2BIT(0)))
4760				break;
4761			cnt++;
4762			if (cnt == 5)
4763				break; /* Updt failed */
4764		} while (1);
4765	}
4766}
4767
4768/**
4769 *  s2io_get_stats - Updates the device statistics structure.
4770 *  @dev : pointer to the device structure.
4771 *  Description:
4772 *  This function updates the device statistics structure in the s2io_nic
4773 *  structure and returns a pointer to the same.
4774 *  Return value:
4775 *  pointer to the updated net_device_stats structure.
4776 */
4777static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4778{
4779	struct s2io_nic *sp = netdev_priv(dev);
4780	struct mac_info *mac_control = &sp->mac_control;
4781	struct stat_block *stats = mac_control->stats_info;
4782	u64 delta;
4783
4784	/* Configure Stats for immediate updt */
4785	s2io_updt_stats(sp);
4786
4787	/* A device reset will cause the on-adapter statistics to be zero'ed.
4788	 * This can be done while running by changing the MTU.  To prevent the
4789	 * system from having the stats zero'ed, the driver keeps a copy of the
4790	 * last update to the system (which is also zero'ed on reset).  This
4791	 * enables the driver to accurately know the delta between the last
4792	 * update and the current update.
4793	 */
4794	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4795		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4796	sp->stats.rx_packets += delta;
4797	dev->stats.rx_packets += delta;
4798
4799	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4800		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4801	sp->stats.tx_packets += delta;
4802	dev->stats.tx_packets += delta;
4803
4804	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4805		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4806	sp->stats.rx_bytes += delta;
4807	dev->stats.rx_bytes += delta;
4808
4809	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4810		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4811	sp->stats.tx_bytes += delta;
4812	dev->stats.tx_bytes += delta;
4813
4814	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4815	sp->stats.rx_errors += delta;
4816	dev->stats.rx_errors += delta;
4817
4818	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4819		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4820	sp->stats.tx_errors += delta;
4821	dev->stats.tx_errors += delta;
4822
4823	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4824	sp->stats.rx_dropped += delta;
4825	dev->stats.rx_dropped += delta;
4826
4827	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4828	sp->stats.tx_dropped += delta;
4829	dev->stats.tx_dropped += delta;
4830
4831	/* The adapter MAC interprets pause frames as multicast packets, but
4832	 * does not pass them up.  This erroneously increases the multicast
4833	 * packet count and needs to be deducted when the multicast frame count
4834	 * is queried.
4835	 */
4836	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4837		le32_to_cpu(stats->rmac_vld_mcst_frms);
4838	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4839	delta -= sp->stats.multicast;
4840	sp->stats.multicast += delta;
4841	dev->stats.multicast += delta;
4842
4843	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4844		le32_to_cpu(stats->rmac_usized_frms)) +
4845		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4846	sp->stats.rx_length_errors += delta;
4847	dev->stats.rx_length_errors += delta;
4848
4849	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4850	sp->stats.rx_crc_errors += delta;
4851	dev->stats.rx_crc_errors += delta;
4852
4853	return &dev->stats;
4854}
4855
4856/**
4857 *  s2io_set_multicast - entry point for multicast address enable/disable.
4858 *  @dev : pointer to the device structure
4859 *  Description:
4860 *  This function is a driver entry point which gets called by the kernel
4861 *  whenever multicast addresses must be enabled/disabled. This also gets
4862 *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4863 *  determine, if multicast address must be enabled or if promiscuous mode
4864 *  is to be disabled etc.
4865 *  Return value:
4866 *  void.
4867 */
4868
4869static void s2io_set_multicast(struct net_device *dev)
4870{
4871	int i, j, prev_cnt;
4872	struct netdev_hw_addr *ha;
4873	struct s2io_nic *sp = netdev_priv(dev);
4874	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4875	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4876		0xfeffffffffffULL;
4877	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4878	void __iomem *add;
4879	struct config_param *config = &sp->config;
4880
4881	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4882		/*  Enable all Multicast addresses */
4883		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4884		       &bar0->rmac_addr_data0_mem);
4885		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4886		       &bar0->rmac_addr_data1_mem);
4887		val64 = RMAC_ADDR_CMD_MEM_WE |
4888			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4889			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4890		writeq(val64, &bar0->rmac_addr_cmd_mem);
4891		/* Wait till command completes */
4892		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4893				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4894				      S2IO_BIT_RESET);
4895
4896		sp->m_cast_flg = 1;
4897		sp->all_multi_pos = config->max_mc_addr - 1;
4898	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4899		/*  Disable all Multicast addresses */
4900		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4901		       &bar0->rmac_addr_data0_mem);
4902		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4903		       &bar0->rmac_addr_data1_mem);
4904		val64 = RMAC_ADDR_CMD_MEM_WE |
4905			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4907		writeq(val64, &bar0->rmac_addr_cmd_mem);
4908		/* Wait till command completes */
4909		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911				      S2IO_BIT_RESET);
4912
4913		sp->m_cast_flg = 0;
4914		sp->all_multi_pos = 0;
4915	}
4916
4917	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4918		/*  Put the NIC into promiscuous mode */
4919		add = &bar0->mac_cfg;
4920		val64 = readq(&bar0->mac_cfg);
4921		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4922
4923		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4924		writel((u32)val64, add);
4925		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4926		writel((u32) (val64 >> 32), (add + 4));
4927
4928		if (vlan_tag_strip != 1) {
4929			val64 = readq(&bar0->rx_pa_cfg);
4930			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4931			writeq(val64, &bar0->rx_pa_cfg);
4932			sp->vlan_strip_flag = 0;
4933		}
4934
4935		val64 = readq(&bar0->mac_cfg);
4936		sp->promisc_flg = 1;
4937		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4938			  dev->name);
4939	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4940		/*  Remove the NIC from promiscuous mode */
4941		add = &bar0->mac_cfg;
4942		val64 = readq(&bar0->mac_cfg);
4943		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4944
4945		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4946		writel((u32)val64, add);
4947		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4948		writel((u32) (val64 >> 32), (add + 4));
4949
4950		if (vlan_tag_strip != 0) {
4951			val64 = readq(&bar0->rx_pa_cfg);
4952			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4953			writeq(val64, &bar0->rx_pa_cfg);
4954			sp->vlan_strip_flag = 1;
4955		}
4956
4957		val64 = readq(&bar0->mac_cfg);
4958		sp->promisc_flg = 0;
4959		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4960	}
4961
4962	/*  Update individual M_CAST address list */
4963	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4964		if (netdev_mc_count(dev) >
4965		    (config->max_mc_addr - config->max_mac_addr)) {
4966			DBG_PRINT(ERR_DBG,
4967				  "%s: No more Rx filters can be added - "
4968				  "please enable ALL_MULTI instead\n",
4969				  dev->name);
4970			return;
4971		}
4972
4973		prev_cnt = sp->mc_addr_count;
4974		sp->mc_addr_count = netdev_mc_count(dev);
4975
4976		/* Clear out the previous list of Mc in the H/W. */
4977		for (i = 0; i < prev_cnt; i++) {
4978			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4979			       &bar0->rmac_addr_data0_mem);
4980			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4981			       &bar0->rmac_addr_data1_mem);
4982			val64 = RMAC_ADDR_CMD_MEM_WE |
4983				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4984				RMAC_ADDR_CMD_MEM_OFFSET
4985				(config->mc_start_offset + i);
4986			writeq(val64, &bar0->rmac_addr_cmd_mem);
4987
4988			/* Wait for command completes */
4989			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4990						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4991						  S2IO_BIT_RESET)) {
4992				DBG_PRINT(ERR_DBG,
4993					  "%s: Adding Multicasts failed\n",
4994					  dev->name);
4995				return;
4996			}
4997		}
4998
4999		/* Create the new Rx filter list and update the same in H/W. */
5000		i = 0;
5001		netdev_for_each_mc_addr(ha, dev) {
5002			mac_addr = 0;
5003			for (j = 0; j < ETH_ALEN; j++) {
5004				mac_addr |= ha->addr[j];
5005				mac_addr <<= 8;
5006			}
5007			mac_addr >>= 8;
5008			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5009			       &bar0->rmac_addr_data0_mem);
5010			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5011			       &bar0->rmac_addr_data1_mem);
5012			val64 = RMAC_ADDR_CMD_MEM_WE |
5013				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5014				RMAC_ADDR_CMD_MEM_OFFSET
5015				(i + config->mc_start_offset);
5016			writeq(val64, &bar0->rmac_addr_cmd_mem);
5017
5018			/* Wait for command completes */
5019			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5020						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5021						  S2IO_BIT_RESET)) {
5022				DBG_PRINT(ERR_DBG,
5023					  "%s: Adding Multicasts failed\n",
5024					  dev->name);
5025				return;
5026			}
5027			i++;
5028		}
5029	}
5030}
5031
5032/* read from CAM unicast & multicast addresses and store it in
5033 * def_mac_addr structure
5034 */
5035static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5036{
5037	int offset;
5038	u64 mac_addr = 0x0;
5039	struct config_param *config = &sp->config;
5040
5041	/* store unicast & multicast mac addresses */
5042	for (offset = 0; offset < config->max_mc_addr; offset++) {
5043		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5044		/* if read fails disable the entry */
5045		if (mac_addr == FAILURE)
5046			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5047		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5048	}
5049}
5050
5051/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5052static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5053{
5054	int offset;
5055	struct config_param *config = &sp->config;
5056	/* restore unicast mac address */
5057	for (offset = 0; offset < config->max_mac_addr; offset++)
5058		do_s2io_prog_unicast(sp->dev,
5059				     sp->def_mac_addr[offset].mac_addr);
5060
5061	/* restore multicast mac address */
5062	for (offset = config->mc_start_offset;
5063	     offset < config->max_mc_addr; offset++)
5064		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5065}
5066
5067/* add a multicast MAC address to CAM */
5068static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5069{
5070	int i;
5071	u64 mac_addr = 0;
5072	struct config_param *config = &sp->config;
5073
5074	for (i = 0; i < ETH_ALEN; i++) {
5075		mac_addr <<= 8;
5076		mac_addr |= addr[i];
5077	}
5078	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5079		return SUCCESS;
5080
5081	/* check if the multicast mac already preset in CAM */
5082	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5083		u64 tmp64;
5084		tmp64 = do_s2io_read_unicast_mc(sp, i);
5085		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5086			break;
5087
5088		if (tmp64 == mac_addr)
5089			return SUCCESS;
5090	}
5091	if (i == config->max_mc_addr) {
5092		DBG_PRINT(ERR_DBG,
5093			  "CAM full no space left for multicast MAC\n");
5094		return FAILURE;
5095	}
5096	/* Update the internal structure with this new mac address */
5097	do_s2io_copy_mac_addr(sp, i, mac_addr);
5098
5099	return do_s2io_add_mac(sp, mac_addr, i);
5100}
5101
5102/* add MAC address to CAM */
5103static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5104{
5105	u64 val64;
5106	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5107
5108	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5109	       &bar0->rmac_addr_data0_mem);
5110
5111	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5112		RMAC_ADDR_CMD_MEM_OFFSET(off);
5113	writeq(val64, &bar0->rmac_addr_cmd_mem);
5114
5115	/* Wait till command completes */
5116	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5117				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5118				  S2IO_BIT_RESET)) {
5119		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5120		return FAILURE;
5121	}
5122	return SUCCESS;
5123}
5124/* deletes a specified unicast/multicast mac entry from CAM */
5125static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5126{
5127	int offset;
5128	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5129	struct config_param *config = &sp->config;
5130
5131	for (offset = 1;
5132	     offset < config->max_mc_addr; offset++) {
5133		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5134		if (tmp64 == addr) {
5135			/* disable the entry by writing  0xffffffffffffULL */
5136			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5137				return FAILURE;
5138			/* store the new mac list from CAM */
5139			do_s2io_store_unicast_mc(sp);
5140			return SUCCESS;
5141		}
5142	}
5143	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5144		  (unsigned long long)addr);
5145	return FAILURE;
5146}
5147
5148/* read mac entries from CAM */
5149static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5150{
5151	u64 tmp64, val64;
5152	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5153
5154	/* read mac addr */
5155	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5156		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5157	writeq(val64, &bar0->rmac_addr_cmd_mem);
5158
5159	/* Wait till command completes */
5160	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5161				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5162				  S2IO_BIT_RESET)) {
5163		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5164		return FAILURE;
5165	}
5166	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5167
5168	return tmp64 >> 16;
5169}
5170
5171/**
5172 * s2io_set_mac_addr - driver entry point
5173 */
5174
5175static int s2io_set_mac_addr(struct net_device *dev, void *p)
5176{
5177	struct sockaddr *addr = p;
5178
5179	if (!is_valid_ether_addr(addr->sa_data))
5180		return -EADDRNOTAVAIL;
5181
5182	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5183
5184	/* store the MAC address in CAM */
5185	return do_s2io_prog_unicast(dev, dev->dev_addr);
5186}
5187/**
5188 *  do_s2io_prog_unicast - Programs the Xframe mac address
5189 *  @dev : pointer to the device structure.
5190 *  @addr: a uchar pointer to the new mac address which is to be set.
5191 *  Description : This procedure will program the Xframe to receive
5192 *  frames with new Mac Address
5193 *  Return value: SUCCESS on success and an appropriate (-)ve integer
5194 *  as defined in errno.h file on failure.
5195 */
5196
5197static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5198{
5199	struct s2io_nic *sp = netdev_priv(dev);
5200	register u64 mac_addr = 0, perm_addr = 0;
5201	int i;
5202	u64 tmp64;
5203	struct config_param *config = &sp->config;
5204
5205	/*
5206	 * Set the new MAC address as the new unicast filter and reflect this
5207	 * change on the device address registered with the OS. It will be
5208	 * at offset 0.
5209	 */
5210	for (i = 0; i < ETH_ALEN; i++) {
5211		mac_addr <<= 8;
5212		mac_addr |= addr[i];
5213		perm_addr <<= 8;
5214		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5215	}
5216
5217	/* check if the dev_addr is different than perm_addr */
5218	if (mac_addr == perm_addr)
5219		return SUCCESS;
5220
5221	/* check if the mac already preset in CAM */
5222	for (i = 1; i < config->max_mac_addr; i++) {
5223		tmp64 = do_s2io_read_unicast_mc(sp, i);
5224		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5225			break;
5226
5227		if (tmp64 == mac_addr) {
5228			DBG_PRINT(INFO_DBG,
5229				  "MAC addr:0x%llx already present in CAM\n",
5230				  (unsigned long long)mac_addr);
5231			return SUCCESS;
5232		}
5233	}
5234	if (i == config->max_mac_addr) {
5235		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5236		return FAILURE;
5237	}
5238	/* Update the internal structure with this new mac address */
5239	do_s2io_copy_mac_addr(sp, i, mac_addr);
5240
5241	return do_s2io_add_mac(sp, mac_addr, i);
5242}
5243
5244/**
5245 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5246 * @sp : private member of the device structure, which is a pointer to the
5247 * s2io_nic structure.
5248 * @cmd: pointer to the structure with parameters given by ethtool to set
5249 * link information.
5250 * Description:
5251 * The function sets different link parameters provided by the user onto
5252 * the NIC.
5253 * Return value:
5254 * 0 on success.
5255 */
5256
5257static int
5258s2io_ethtool_set_link_ksettings(struct net_device *dev,
5259				const struct ethtool_link_ksettings *cmd)
5260{
5261	struct s2io_nic *sp = netdev_priv(dev);
5262	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5263	    (cmd->base.speed != SPEED_10000) ||
5264	    (cmd->base.duplex != DUPLEX_FULL))
5265		return -EINVAL;
5266	else {
5267		s2io_close(sp->dev);
5268		s2io_open(sp->dev);
5269	}
5270
5271	return 0;
5272}
5273
5274/**
5275 * s2io_ethtol_get_link_ksettings - Return link specific information.
5276 * @sp : private member of the device structure, pointer to the
5277 *      s2io_nic structure.
5278 * @cmd : pointer to the structure with parameters given by ethtool
5279 * to return link information.
5280 * Description:
5281 * Returns link specific information like speed, duplex etc.. to ethtool.
5282 * Return value :
5283 * return 0 on success.
5284 */
5285
5286static int
5287s2io_ethtool_get_link_ksettings(struct net_device *dev,
5288				struct ethtool_link_ksettings *cmd)
5289{
5290	struct s2io_nic *sp = netdev_priv(dev);
 
 
 
5291
5292	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5293	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5294	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5295
5296	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5297	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5298	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5299
5300	cmd->base.port = PORT_FIBRE;
5301
5302	if (netif_carrier_ok(sp->dev)) {
5303		cmd->base.speed = SPEED_10000;
5304		cmd->base.duplex = DUPLEX_FULL;
5305	} else {
5306		cmd->base.speed = SPEED_UNKNOWN;
5307		cmd->base.duplex = DUPLEX_UNKNOWN;
5308	}
5309
5310	cmd->base.autoneg = AUTONEG_DISABLE;
5311	return 0;
5312}
5313
5314/**
5315 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5316 * @sp : private member of the device structure, which is a pointer to the
5317 * s2io_nic structure.
5318 * @info : pointer to the structure with parameters given by ethtool to
5319 * return driver information.
5320 * Description:
5321 * Returns driver specefic information like name, version etc.. to ethtool.
5322 * Return value:
5323 *  void
5324 */
5325
5326static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5327				  struct ethtool_drvinfo *info)
5328{
5329	struct s2io_nic *sp = netdev_priv(dev);
5330
5331	strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
5332	strlcpy(info->version, s2io_driver_version, sizeof(info->version));
5333	strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
 
 
5334}
5335
5336/**
5337 *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5338 *  @sp: private member of the device structure, which is a pointer to the
5339 *  s2io_nic structure.
5340 *  @regs : pointer to the structure with parameters given by ethtool for
5341 *  dumping the registers.
5342 *  @reg_space: The input argument into which all the registers are dumped.
5343 *  Description:
5344 *  Dumps the entire register space of xFrame NIC into the user given
5345 *  buffer area.
5346 * Return value :
5347 * void .
5348 */
5349
5350static void s2io_ethtool_gregs(struct net_device *dev,
5351			       struct ethtool_regs *regs, void *space)
5352{
5353	int i;
5354	u64 reg;
5355	u8 *reg_space = (u8 *)space;
5356	struct s2io_nic *sp = netdev_priv(dev);
5357
5358	regs->len = XENA_REG_SPACE;
5359	regs->version = sp->pdev->subsystem_device;
5360
5361	for (i = 0; i < regs->len; i += 8) {
5362		reg = readq(sp->bar0 + i);
5363		memcpy((reg_space + i), &reg, 8);
5364	}
5365}
5366
5367/*
5368 *  s2io_set_led - control NIC led
5369 */
5370static void s2io_set_led(struct s2io_nic *sp, bool on)
5371{
5372	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5373	u16 subid = sp->pdev->subsystem_device;
5374	u64 val64;
5375
5376	if ((sp->device_type == XFRAME_II_DEVICE) ||
5377	    ((subid & 0xFF) >= 0x07)) {
5378		val64 = readq(&bar0->gpio_control);
5379		if (on)
5380			val64 |= GPIO_CTRL_GPIO_0;
5381		else
5382			val64 &= ~GPIO_CTRL_GPIO_0;
5383
5384		writeq(val64, &bar0->gpio_control);
5385	} else {
5386		val64 = readq(&bar0->adapter_control);
5387		if (on)
5388			val64 |= ADAPTER_LED_ON;
5389		else
5390			val64 &= ~ADAPTER_LED_ON;
5391
5392		writeq(val64, &bar0->adapter_control);
5393	}
5394
5395}
5396
5397/**
5398 * s2io_ethtool_set_led - To physically identify the nic on the system.
5399 * @dev : network device
5400 * @state: led setting
5401 *
5402 * Description: Used to physically identify the NIC on the system.
5403 * The Link LED will blink for a time specified by the user for
5404 * identification.
5405 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5406 * identification is possible only if it's link is up.
5407 */
5408
5409static int s2io_ethtool_set_led(struct net_device *dev,
5410				enum ethtool_phys_id_state state)
5411{
5412	struct s2io_nic *sp = netdev_priv(dev);
5413	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5414	u16 subid = sp->pdev->subsystem_device;
5415
5416	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5417		u64 val64 = readq(&bar0->adapter_control);
5418		if (!(val64 & ADAPTER_CNTL_EN)) {
5419			pr_err("Adapter Link down, cannot blink LED\n");
5420			return -EAGAIN;
5421		}
5422	}
5423
5424	switch (state) {
5425	case ETHTOOL_ID_ACTIVE:
5426		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5427		return 1;	/* cycle on/off once per second */
5428
5429	case ETHTOOL_ID_ON:
5430		s2io_set_led(sp, true);
5431		break;
5432
5433	case ETHTOOL_ID_OFF:
5434		s2io_set_led(sp, false);
5435		break;
5436
5437	case ETHTOOL_ID_INACTIVE:
5438		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5439			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5440	}
5441
5442	return 0;
5443}
5444
5445static void s2io_ethtool_gringparam(struct net_device *dev,
5446				    struct ethtool_ringparam *ering)
5447{
5448	struct s2io_nic *sp = netdev_priv(dev);
5449	int i, tx_desc_count = 0, rx_desc_count = 0;
5450
5451	if (sp->rxd_mode == RXD_MODE_1) {
5452		ering->rx_max_pending = MAX_RX_DESC_1;
5453		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5454	} else {
5455		ering->rx_max_pending = MAX_RX_DESC_2;
5456		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5457	}
5458
5459	ering->tx_max_pending = MAX_TX_DESC;
5460
5461	for (i = 0; i < sp->config.rx_ring_num; i++)
5462		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5463	ering->rx_pending = rx_desc_count;
5464	ering->rx_jumbo_pending = rx_desc_count;
5465
5466	for (i = 0; i < sp->config.tx_fifo_num; i++)
5467		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5468	ering->tx_pending = tx_desc_count;
5469	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5470}
5471
5472/**
5473 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5474 * @sp : private member of the device structure, which is a pointer to the
5475 *	s2io_nic structure.
5476 * @ep : pointer to the structure with pause parameters given by ethtool.
5477 * Description:
5478 * Returns the Pause frame generation and reception capability of the NIC.
5479 * Return value:
5480 *  void
5481 */
5482static void s2io_ethtool_getpause_data(struct net_device *dev,
5483				       struct ethtool_pauseparam *ep)
5484{
5485	u64 val64;
5486	struct s2io_nic *sp = netdev_priv(dev);
5487	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5488
5489	val64 = readq(&bar0->rmac_pause_cfg);
5490	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5491		ep->tx_pause = true;
5492	if (val64 & RMAC_PAUSE_RX_ENABLE)
5493		ep->rx_pause = true;
5494	ep->autoneg = false;
5495}
5496
5497/**
5498 * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5499 * @sp : private member of the device structure, which is a pointer to the
5500 *      s2io_nic structure.
5501 * @ep : pointer to the structure with pause parameters given by ethtool.
5502 * Description:
5503 * It can be used to set or reset Pause frame generation or reception
5504 * support of the NIC.
5505 * Return value:
5506 * int, returns 0 on Success
5507 */
5508
5509static int s2io_ethtool_setpause_data(struct net_device *dev,
5510				      struct ethtool_pauseparam *ep)
5511{
5512	u64 val64;
5513	struct s2io_nic *sp = netdev_priv(dev);
5514	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5515
5516	val64 = readq(&bar0->rmac_pause_cfg);
5517	if (ep->tx_pause)
5518		val64 |= RMAC_PAUSE_GEN_ENABLE;
5519	else
5520		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5521	if (ep->rx_pause)
5522		val64 |= RMAC_PAUSE_RX_ENABLE;
5523	else
5524		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5525	writeq(val64, &bar0->rmac_pause_cfg);
5526	return 0;
5527}
5528
5529/**
5530 * read_eeprom - reads 4 bytes of data from user given offset.
5531 * @sp : private member of the device structure, which is a pointer to the
5532 *      s2io_nic structure.
5533 * @off : offset at which the data must be written
5534 * @data : Its an output parameter where the data read at the given
5535 *	offset is stored.
5536 * Description:
5537 * Will read 4 bytes of data from the user given offset and return the
5538 * read data.
5539 * NOTE: Will allow to read only part of the EEPROM visible through the
5540 *   I2C bus.
5541 * Return value:
5542 *  -1 on failure and 0 on success.
5543 */
5544
5545#define S2IO_DEV_ID		5
5546static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5547{
5548	int ret = -1;
5549	u32 exit_cnt = 0;
5550	u64 val64;
5551	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5552
5553	if (sp->device_type == XFRAME_I_DEVICE) {
5554		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5555			I2C_CONTROL_ADDR(off) |
5556			I2C_CONTROL_BYTE_CNT(0x3) |
5557			I2C_CONTROL_READ |
5558			I2C_CONTROL_CNTL_START;
5559		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5560
5561		while (exit_cnt < 5) {
5562			val64 = readq(&bar0->i2c_control);
5563			if (I2C_CONTROL_CNTL_END(val64)) {
5564				*data = I2C_CONTROL_GET_DATA(val64);
5565				ret = 0;
5566				break;
5567			}
5568			msleep(50);
5569			exit_cnt++;
5570		}
5571	}
5572
5573	if (sp->device_type == XFRAME_II_DEVICE) {
5574		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5575			SPI_CONTROL_BYTECNT(0x3) |
5576			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5577		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5578		val64 |= SPI_CONTROL_REQ;
5579		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5580		while (exit_cnt < 5) {
5581			val64 = readq(&bar0->spi_control);
5582			if (val64 & SPI_CONTROL_NACK) {
5583				ret = 1;
5584				break;
5585			} else if (val64 & SPI_CONTROL_DONE) {
5586				*data = readq(&bar0->spi_data);
5587				*data &= 0xffffff;
5588				ret = 0;
5589				break;
5590			}
5591			msleep(50);
5592			exit_cnt++;
5593		}
5594	}
5595	return ret;
5596}
5597
5598/**
5599 *  write_eeprom - actually writes the relevant part of the data value.
5600 *  @sp : private member of the device structure, which is a pointer to the
5601 *       s2io_nic structure.
5602 *  @off : offset at which the data must be written
5603 *  @data : The data that is to be written
5604 *  @cnt : Number of bytes of the data that are actually to be written into
5605 *  the Eeprom. (max of 3)
5606 * Description:
5607 *  Actually writes the relevant part of the data value into the Eeprom
5608 *  through the I2C bus.
5609 * Return value:
5610 *  0 on success, -1 on failure.
5611 */
5612
5613static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5614{
5615	int exit_cnt = 0, ret = -1;
5616	u64 val64;
5617	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5618
5619	if (sp->device_type == XFRAME_I_DEVICE) {
5620		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5621			I2C_CONTROL_ADDR(off) |
5622			I2C_CONTROL_BYTE_CNT(cnt) |
5623			I2C_CONTROL_SET_DATA((u32)data) |
5624			I2C_CONTROL_CNTL_START;
5625		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5626
5627		while (exit_cnt < 5) {
5628			val64 = readq(&bar0->i2c_control);
5629			if (I2C_CONTROL_CNTL_END(val64)) {
5630				if (!(val64 & I2C_CONTROL_NACK))
5631					ret = 0;
5632				break;
5633			}
5634			msleep(50);
5635			exit_cnt++;
5636		}
5637	}
5638
5639	if (sp->device_type == XFRAME_II_DEVICE) {
5640		int write_cnt = (cnt == 8) ? 0 : cnt;
5641		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5642
5643		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5644			SPI_CONTROL_BYTECNT(write_cnt) |
5645			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5646		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5647		val64 |= SPI_CONTROL_REQ;
5648		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5649		while (exit_cnt < 5) {
5650			val64 = readq(&bar0->spi_control);
5651			if (val64 & SPI_CONTROL_NACK) {
5652				ret = 1;
5653				break;
5654			} else if (val64 & SPI_CONTROL_DONE) {
5655				ret = 0;
5656				break;
5657			}
5658			msleep(50);
5659			exit_cnt++;
5660		}
5661	}
5662	return ret;
5663}
5664static void s2io_vpd_read(struct s2io_nic *nic)
5665{
5666	u8 *vpd_data;
5667	u8 data;
5668	int i = 0, cnt, len, fail = 0;
5669	int vpd_addr = 0x80;
5670	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5671
5672	if (nic->device_type == XFRAME_II_DEVICE) {
5673		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5674		vpd_addr = 0x80;
5675	} else {
5676		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5677		vpd_addr = 0x50;
5678	}
5679	strcpy(nic->serial_num, "NOT AVAILABLE");
5680
5681	vpd_data = kmalloc(256, GFP_KERNEL);
5682	if (!vpd_data) {
5683		swstats->mem_alloc_fail_cnt++;
5684		return;
5685	}
5686	swstats->mem_allocated += 256;
5687
5688	for (i = 0; i < 256; i += 4) {
5689		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5690		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5691		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5692		for (cnt = 0; cnt < 5; cnt++) {
5693			msleep(2);
5694			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5695			if (data == 0x80)
5696				break;
5697		}
5698		if (cnt >= 5) {
5699			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5700			fail = 1;
5701			break;
5702		}
5703		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5704				      (u32 *)&vpd_data[i]);
5705	}
5706
5707	if (!fail) {
5708		/* read serial number of adapter */
5709		for (cnt = 0; cnt < 252; cnt++) {
5710			if ((vpd_data[cnt] == 'S') &&
5711			    (vpd_data[cnt+1] == 'N')) {
5712				len = vpd_data[cnt+2];
5713				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5714					memcpy(nic->serial_num,
5715					       &vpd_data[cnt + 3],
5716					       len);
5717					memset(nic->serial_num+len,
5718					       0,
5719					       VPD_STRING_LEN-len);
5720					break;
5721				}
5722			}
5723		}
5724	}
5725
5726	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5727		len = vpd_data[1];
5728		memcpy(nic->product_name, &vpd_data[3], len);
5729		nic->product_name[len] = 0;
5730	}
5731	kfree(vpd_data);
5732	swstats->mem_freed += 256;
5733}
5734
5735/**
5736 *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5737 *  @sp : private member of the device structure, which is a pointer to the
5738 *  s2io_nic structure.
5739 *  @eeprom : pointer to the user level structure provided by ethtool,
5740 *  containing all relevant information.
5741 *  @data_buf : user defined value to be written into Eeprom.
5742 *  Description: Reads the values stored in the Eeprom at given offset
5743 *  for a given length. Stores these values int the input argument data
5744 *  buffer 'data_buf' and returns these to the caller (ethtool.)
5745 *  Return value:
5746 *  int  0 on success
5747 */
5748
5749static int s2io_ethtool_geeprom(struct net_device *dev,
5750				struct ethtool_eeprom *eeprom, u8 * data_buf)
5751{
5752	u32 i, valid;
5753	u64 data;
5754	struct s2io_nic *sp = netdev_priv(dev);
5755
5756	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5757
5758	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5759		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5760
5761	for (i = 0; i < eeprom->len; i += 4) {
5762		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5763			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5764			return -EFAULT;
5765		}
5766		valid = INV(data);
5767		memcpy((data_buf + i), &valid, 4);
5768	}
5769	return 0;
5770}
5771
5772/**
5773 *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5774 *  @sp : private member of the device structure, which is a pointer to the
5775 *  s2io_nic structure.
5776 *  @eeprom : pointer to the user level structure provided by ethtool,
5777 *  containing all relevant information.
5778 *  @data_buf ; user defined value to be written into Eeprom.
5779 *  Description:
5780 *  Tries to write the user provided value in the Eeprom, at the offset
5781 *  given by the user.
5782 *  Return value:
5783 *  0 on success, -EFAULT on failure.
5784 */
5785
5786static int s2io_ethtool_seeprom(struct net_device *dev,
5787				struct ethtool_eeprom *eeprom,
5788				u8 *data_buf)
5789{
5790	int len = eeprom->len, cnt = 0;
5791	u64 valid = 0, data;
5792	struct s2io_nic *sp = netdev_priv(dev);
5793
5794	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5795		DBG_PRINT(ERR_DBG,
5796			  "ETHTOOL_WRITE_EEPROM Err: "
5797			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5798			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5799			  eeprom->magic);
5800		return -EFAULT;
5801	}
5802
5803	while (len) {
5804		data = (u32)data_buf[cnt] & 0x000000FF;
5805		if (data)
5806			valid = (u32)(data << 24);
5807		else
5808			valid = data;
5809
5810		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5811			DBG_PRINT(ERR_DBG,
5812				  "ETHTOOL_WRITE_EEPROM Err: "
5813				  "Cannot write into the specified offset\n");
5814			return -EFAULT;
5815		}
5816		cnt++;
5817		len--;
5818	}
5819
5820	return 0;
5821}
5822
5823/**
5824 * s2io_register_test - reads and writes into all clock domains.
5825 * @sp : private member of the device structure, which is a pointer to the
5826 * s2io_nic structure.
5827 * @data : variable that returns the result of each of the test conducted b
5828 * by the driver.
5829 * Description:
5830 * Read and write into all clock domains. The NIC has 3 clock domains,
5831 * see that registers in all the three regions are accessible.
5832 * Return value:
5833 * 0 on success.
5834 */
5835
5836static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5837{
5838	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5839	u64 val64 = 0, exp_val;
5840	int fail = 0;
5841
5842	val64 = readq(&bar0->pif_rd_swapper_fb);
5843	if (val64 != 0x123456789abcdefULL) {
5844		fail = 1;
5845		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5846	}
5847
5848	val64 = readq(&bar0->rmac_pause_cfg);
5849	if (val64 != 0xc000ffff00000000ULL) {
5850		fail = 1;
5851		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5852	}
5853
5854	val64 = readq(&bar0->rx_queue_cfg);
5855	if (sp->device_type == XFRAME_II_DEVICE)
5856		exp_val = 0x0404040404040404ULL;
5857	else
5858		exp_val = 0x0808080808080808ULL;
5859	if (val64 != exp_val) {
5860		fail = 1;
5861		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5862	}
5863
5864	val64 = readq(&bar0->xgxs_efifo_cfg);
5865	if (val64 != 0x000000001923141EULL) {
5866		fail = 1;
5867		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5868	}
5869
5870	val64 = 0x5A5A5A5A5A5A5A5AULL;
5871	writeq(val64, &bar0->xmsi_data);
5872	val64 = readq(&bar0->xmsi_data);
5873	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5874		fail = 1;
5875		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5876	}
5877
5878	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5879	writeq(val64, &bar0->xmsi_data);
5880	val64 = readq(&bar0->xmsi_data);
5881	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5882		fail = 1;
5883		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5884	}
5885
5886	*data = fail;
5887	return fail;
5888}
5889
5890/**
5891 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5892 * @sp : private member of the device structure, which is a pointer to the
5893 * s2io_nic structure.
5894 * @data:variable that returns the result of each of the test conducted by
5895 * the driver.
5896 * Description:
5897 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5898 * register.
5899 * Return value:
5900 * 0 on success.
5901 */
5902
5903static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5904{
5905	int fail = 0;
5906	u64 ret_data, org_4F0, org_7F0;
5907	u8 saved_4F0 = 0, saved_7F0 = 0;
5908	struct net_device *dev = sp->dev;
5909
5910	/* Test Write Error at offset 0 */
5911	/* Note that SPI interface allows write access to all areas
5912	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5913	 */
5914	if (sp->device_type == XFRAME_I_DEVICE)
5915		if (!write_eeprom(sp, 0, 0, 3))
5916			fail = 1;
5917
5918	/* Save current values at offsets 0x4F0 and 0x7F0 */
5919	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5920		saved_4F0 = 1;
5921	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5922		saved_7F0 = 1;
5923
5924	/* Test Write at offset 4f0 */
5925	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5926		fail = 1;
5927	if (read_eeprom(sp, 0x4F0, &ret_data))
5928		fail = 1;
5929
5930	if (ret_data != 0x012345) {
5931		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5932			  "Data written %llx Data read %llx\n",
5933			  dev->name, (unsigned long long)0x12345,
5934			  (unsigned long long)ret_data);
5935		fail = 1;
5936	}
5937
5938	/* Reset the EEPROM data go FFFF */
5939	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5940
5941	/* Test Write Request Error at offset 0x7c */
5942	if (sp->device_type == XFRAME_I_DEVICE)
5943		if (!write_eeprom(sp, 0x07C, 0, 3))
5944			fail = 1;
5945
5946	/* Test Write Request at offset 0x7f0 */
5947	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5948		fail = 1;
5949	if (read_eeprom(sp, 0x7F0, &ret_data))
5950		fail = 1;
5951
5952	if (ret_data != 0x012345) {
5953		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5954			  "Data written %llx Data read %llx\n",
5955			  dev->name, (unsigned long long)0x12345,
5956			  (unsigned long long)ret_data);
5957		fail = 1;
5958	}
5959
5960	/* Reset the EEPROM data go FFFF */
5961	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5962
5963	if (sp->device_type == XFRAME_I_DEVICE) {
5964		/* Test Write Error at offset 0x80 */
5965		if (!write_eeprom(sp, 0x080, 0, 3))
5966			fail = 1;
5967
5968		/* Test Write Error at offset 0xfc */
5969		if (!write_eeprom(sp, 0x0FC, 0, 3))
5970			fail = 1;
5971
5972		/* Test Write Error at offset 0x100 */
5973		if (!write_eeprom(sp, 0x100, 0, 3))
5974			fail = 1;
5975
5976		/* Test Write Error at offset 4ec */
5977		if (!write_eeprom(sp, 0x4EC, 0, 3))
5978			fail = 1;
5979	}
5980
5981	/* Restore values at offsets 0x4F0 and 0x7F0 */
5982	if (saved_4F0)
5983		write_eeprom(sp, 0x4F0, org_4F0, 3);
5984	if (saved_7F0)
5985		write_eeprom(sp, 0x7F0, org_7F0, 3);
5986
5987	*data = fail;
5988	return fail;
5989}
5990
5991/**
5992 * s2io_bist_test - invokes the MemBist test of the card .
5993 * @sp : private member of the device structure, which is a pointer to the
5994 * s2io_nic structure.
5995 * @data:variable that returns the result of each of the test conducted by
5996 * the driver.
5997 * Description:
5998 * This invokes the MemBist test of the card. We give around
5999 * 2 secs time for the Test to complete. If it's still not complete
6000 * within this peiod, we consider that the test failed.
6001 * Return value:
6002 * 0 on success and -1 on failure.
6003 */
6004
6005static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6006{
6007	u8 bist = 0;
6008	int cnt = 0, ret = -1;
6009
6010	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6011	bist |= PCI_BIST_START;
6012	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6013
6014	while (cnt < 20) {
6015		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6016		if (!(bist & PCI_BIST_START)) {
6017			*data = (bist & PCI_BIST_CODE_MASK);
6018			ret = 0;
6019			break;
6020		}
6021		msleep(100);
6022		cnt++;
6023	}
6024
6025	return ret;
6026}
6027
6028/**
6029 * s2io_link_test - verifies the link state of the nic
6030 * @sp ; private member of the device structure, which is a pointer to the
6031 * s2io_nic structure.
6032 * @data: variable that returns the result of each of the test conducted by
6033 * the driver.
6034 * Description:
6035 * The function verifies the link state of the NIC and updates the input
6036 * argument 'data' appropriately.
6037 * Return value:
6038 * 0 on success.
6039 */
6040
6041static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6042{
6043	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6044	u64 val64;
6045
6046	val64 = readq(&bar0->adapter_status);
6047	if (!(LINK_IS_UP(val64)))
6048		*data = 1;
6049	else
6050		*data = 0;
6051
6052	return *data;
6053}
6054
6055/**
6056 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6057 * @sp: private member of the device structure, which is a pointer to the
6058 * s2io_nic structure.
6059 * @data: variable that returns the result of each of the test
6060 * conducted by the driver.
6061 * Description:
6062 *  This is one of the offline test that tests the read and write
6063 *  access to the RldRam chip on the NIC.
6064 * Return value:
6065 *  0 on success.
6066 */
6067
6068static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6069{
6070	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6071	u64 val64;
6072	int cnt, iteration = 0, test_fail = 0;
6073
6074	val64 = readq(&bar0->adapter_control);
6075	val64 &= ~ADAPTER_ECC_EN;
6076	writeq(val64, &bar0->adapter_control);
6077
6078	val64 = readq(&bar0->mc_rldram_test_ctrl);
6079	val64 |= MC_RLDRAM_TEST_MODE;
6080	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6081
6082	val64 = readq(&bar0->mc_rldram_mrs);
6083	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6084	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6085
6086	val64 |= MC_RLDRAM_MRS_ENABLE;
6087	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6088
6089	while (iteration < 2) {
6090		val64 = 0x55555555aaaa0000ULL;
6091		if (iteration == 1)
6092			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6093		writeq(val64, &bar0->mc_rldram_test_d0);
6094
6095		val64 = 0xaaaa5a5555550000ULL;
6096		if (iteration == 1)
6097			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6098		writeq(val64, &bar0->mc_rldram_test_d1);
6099
6100		val64 = 0x55aaaaaaaa5a0000ULL;
6101		if (iteration == 1)
6102			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6103		writeq(val64, &bar0->mc_rldram_test_d2);
6104
6105		val64 = (u64) (0x0000003ffffe0100ULL);
6106		writeq(val64, &bar0->mc_rldram_test_add);
6107
6108		val64 = MC_RLDRAM_TEST_MODE |
6109			MC_RLDRAM_TEST_WRITE |
6110			MC_RLDRAM_TEST_GO;
6111		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6112
6113		for (cnt = 0; cnt < 5; cnt++) {
6114			val64 = readq(&bar0->mc_rldram_test_ctrl);
6115			if (val64 & MC_RLDRAM_TEST_DONE)
6116				break;
6117			msleep(200);
6118		}
6119
6120		if (cnt == 5)
6121			break;
6122
6123		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6124		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6125
6126		for (cnt = 0; cnt < 5; cnt++) {
6127			val64 = readq(&bar0->mc_rldram_test_ctrl);
6128			if (val64 & MC_RLDRAM_TEST_DONE)
6129				break;
6130			msleep(500);
6131		}
6132
6133		if (cnt == 5)
6134			break;
6135
6136		val64 = readq(&bar0->mc_rldram_test_ctrl);
6137		if (!(val64 & MC_RLDRAM_TEST_PASS))
6138			test_fail = 1;
6139
6140		iteration++;
6141	}
6142
6143	*data = test_fail;
6144
6145	/* Bring the adapter out of test mode */
6146	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6147
6148	return test_fail;
6149}
6150
6151/**
6152 *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6153 *  @sp : private member of the device structure, which is a pointer to the
6154 *  s2io_nic structure.
6155 *  @ethtest : pointer to a ethtool command specific structure that will be
6156 *  returned to the user.
6157 *  @data : variable that returns the result of each of the test
6158 * conducted by the driver.
6159 * Description:
6160 *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6161 *  the health of the card.
6162 * Return value:
6163 *  void
6164 */
6165
6166static void s2io_ethtool_test(struct net_device *dev,
6167			      struct ethtool_test *ethtest,
6168			      uint64_t *data)
6169{
6170	struct s2io_nic *sp = netdev_priv(dev);
6171	int orig_state = netif_running(sp->dev);
6172
6173	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6174		/* Offline Tests. */
6175		if (orig_state)
6176			s2io_close(sp->dev);
6177
6178		if (s2io_register_test(sp, &data[0]))
6179			ethtest->flags |= ETH_TEST_FL_FAILED;
6180
6181		s2io_reset(sp);
6182
6183		if (s2io_rldram_test(sp, &data[3]))
6184			ethtest->flags |= ETH_TEST_FL_FAILED;
6185
6186		s2io_reset(sp);
6187
6188		if (s2io_eeprom_test(sp, &data[1]))
6189			ethtest->flags |= ETH_TEST_FL_FAILED;
6190
6191		if (s2io_bist_test(sp, &data[4]))
6192			ethtest->flags |= ETH_TEST_FL_FAILED;
6193
6194		if (orig_state)
6195			s2io_open(sp->dev);
6196
6197		data[2] = 0;
6198	} else {
6199		/* Online Tests. */
6200		if (!orig_state) {
6201			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6202				  dev->name);
6203			data[0] = -1;
6204			data[1] = -1;
6205			data[2] = -1;
6206			data[3] = -1;
6207			data[4] = -1;
6208		}
6209
6210		if (s2io_link_test(sp, &data[2]))
6211			ethtest->flags |= ETH_TEST_FL_FAILED;
6212
6213		data[0] = 0;
6214		data[1] = 0;
6215		data[3] = 0;
6216		data[4] = 0;
6217	}
6218}
6219
6220static void s2io_get_ethtool_stats(struct net_device *dev,
6221				   struct ethtool_stats *estats,
6222				   u64 *tmp_stats)
6223{
6224	int i = 0, k;
6225	struct s2io_nic *sp = netdev_priv(dev);
6226	struct stat_block *stats = sp->mac_control.stats_info;
6227	struct swStat *swstats = &stats->sw_stat;
6228	struct xpakStat *xstats = &stats->xpak_stat;
6229
6230	s2io_updt_stats(sp);
6231	tmp_stats[i++] =
6232		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6233		le32_to_cpu(stats->tmac_frms);
6234	tmp_stats[i++] =
6235		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6236		le32_to_cpu(stats->tmac_data_octets);
6237	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6238	tmp_stats[i++] =
6239		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6240		le32_to_cpu(stats->tmac_mcst_frms);
6241	tmp_stats[i++] =
6242		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6243		le32_to_cpu(stats->tmac_bcst_frms);
6244	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6245	tmp_stats[i++] =
6246		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6247		le32_to_cpu(stats->tmac_ttl_octets);
6248	tmp_stats[i++] =
6249		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6250		le32_to_cpu(stats->tmac_ucst_frms);
6251	tmp_stats[i++] =
6252		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6253		le32_to_cpu(stats->tmac_nucst_frms);
6254	tmp_stats[i++] =
6255		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6256		le32_to_cpu(stats->tmac_any_err_frms);
6257	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6258	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6259	tmp_stats[i++] =
6260		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6261		le32_to_cpu(stats->tmac_vld_ip);
6262	tmp_stats[i++] =
6263		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6264		le32_to_cpu(stats->tmac_drop_ip);
6265	tmp_stats[i++] =
6266		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6267		le32_to_cpu(stats->tmac_icmp);
6268	tmp_stats[i++] =
6269		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6270		le32_to_cpu(stats->tmac_rst_tcp);
6271	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6272	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6273		le32_to_cpu(stats->tmac_udp);
6274	tmp_stats[i++] =
6275		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6276		le32_to_cpu(stats->rmac_vld_frms);
6277	tmp_stats[i++] =
6278		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6279		le32_to_cpu(stats->rmac_data_octets);
6280	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6281	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6282	tmp_stats[i++] =
6283		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6284		le32_to_cpu(stats->rmac_vld_mcst_frms);
6285	tmp_stats[i++] =
6286		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6287		le32_to_cpu(stats->rmac_vld_bcst_frms);
6288	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6289	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6290	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6291	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6292	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6293	tmp_stats[i++] =
6294		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6295		le32_to_cpu(stats->rmac_ttl_octets);
6296	tmp_stats[i++] =
6297		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6298		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6299	tmp_stats[i++] =
6300		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6301		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6302	tmp_stats[i++] =
6303		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6304		le32_to_cpu(stats->rmac_discarded_frms);
6305	tmp_stats[i++] =
6306		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6307		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6308	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6309	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6310	tmp_stats[i++] =
6311		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6312		le32_to_cpu(stats->rmac_usized_frms);
6313	tmp_stats[i++] =
6314		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6315		le32_to_cpu(stats->rmac_osized_frms);
6316	tmp_stats[i++] =
6317		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6318		le32_to_cpu(stats->rmac_frag_frms);
6319	tmp_stats[i++] =
6320		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6321		le32_to_cpu(stats->rmac_jabber_frms);
6322	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6323	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6324	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6325	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6326	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6327	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6328	tmp_stats[i++] =
6329		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6330		le32_to_cpu(stats->rmac_ip);
6331	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6332	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6333	tmp_stats[i++] =
6334		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6335		le32_to_cpu(stats->rmac_drop_ip);
6336	tmp_stats[i++] =
6337		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6338		le32_to_cpu(stats->rmac_icmp);
6339	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6340	tmp_stats[i++] =
6341		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6342		le32_to_cpu(stats->rmac_udp);
6343	tmp_stats[i++] =
6344		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6345		le32_to_cpu(stats->rmac_err_drp_udp);
6346	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6347	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6348	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6349	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6350	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6351	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6352	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6353	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6354	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6355	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6356	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6357	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6358	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6359	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6360	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6361	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6362	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6363	tmp_stats[i++] =
6364		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6365		le32_to_cpu(stats->rmac_pause_cnt);
6366	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6367	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6368	tmp_stats[i++] =
6369		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6370		le32_to_cpu(stats->rmac_accepted_ip);
6371	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6372	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6373	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6374	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6375	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6376	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6377	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6378	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6379	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6380	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6381	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6382	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6383	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6384	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6385	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6386	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6387	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6388	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6389	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6390
6391	/* Enhanced statistics exist only for Hercules */
6392	if (sp->device_type == XFRAME_II_DEVICE) {
6393		tmp_stats[i++] =
6394			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6395		tmp_stats[i++] =
6396			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6397		tmp_stats[i++] =
6398			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6399		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6400		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6401		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6402		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6403		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6404		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6405		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6406		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6407		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6408		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6409		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6410		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6411		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6412	}
6413
6414	tmp_stats[i++] = 0;
6415	tmp_stats[i++] = swstats->single_ecc_errs;
6416	tmp_stats[i++] = swstats->double_ecc_errs;
6417	tmp_stats[i++] = swstats->parity_err_cnt;
6418	tmp_stats[i++] = swstats->serious_err_cnt;
6419	tmp_stats[i++] = swstats->soft_reset_cnt;
6420	tmp_stats[i++] = swstats->fifo_full_cnt;
6421	for (k = 0; k < MAX_RX_RINGS; k++)
6422		tmp_stats[i++] = swstats->ring_full_cnt[k];
6423	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6424	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6425	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6426	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6427	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6428	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6429	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6430	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6431	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6432	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6433	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6434	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6435	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6436	tmp_stats[i++] = swstats->sending_both;
6437	tmp_stats[i++] = swstats->outof_sequence_pkts;
6438	tmp_stats[i++] = swstats->flush_max_pkts;
6439	if (swstats->num_aggregations) {
6440		u64 tmp = swstats->sum_avg_pkts_aggregated;
6441		int count = 0;
6442		/*
6443		 * Since 64-bit divide does not work on all platforms,
6444		 * do repeated subtraction.
6445		 */
6446		while (tmp >= swstats->num_aggregations) {
6447			tmp -= swstats->num_aggregations;
6448			count++;
6449		}
6450		tmp_stats[i++] = count;
6451	} else
6452		tmp_stats[i++] = 0;
6453	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6454	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6455	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6456	tmp_stats[i++] = swstats->mem_allocated;
6457	tmp_stats[i++] = swstats->mem_freed;
6458	tmp_stats[i++] = swstats->link_up_cnt;
6459	tmp_stats[i++] = swstats->link_down_cnt;
6460	tmp_stats[i++] = swstats->link_up_time;
6461	tmp_stats[i++] = swstats->link_down_time;
6462
6463	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6464	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6465	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6466	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6467	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6468
6469	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6470	tmp_stats[i++] = swstats->rx_abort_cnt;
6471	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6472	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6473	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6474	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6475	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6476	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6477	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6478	tmp_stats[i++] = swstats->tda_err_cnt;
6479	tmp_stats[i++] = swstats->pfc_err_cnt;
6480	tmp_stats[i++] = swstats->pcc_err_cnt;
6481	tmp_stats[i++] = swstats->tti_err_cnt;
6482	tmp_stats[i++] = swstats->tpa_err_cnt;
6483	tmp_stats[i++] = swstats->sm_err_cnt;
6484	tmp_stats[i++] = swstats->lso_err_cnt;
6485	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6486	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6487	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6488	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6489	tmp_stats[i++] = swstats->rc_err_cnt;
6490	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6491	tmp_stats[i++] = swstats->rpa_err_cnt;
6492	tmp_stats[i++] = swstats->rda_err_cnt;
6493	tmp_stats[i++] = swstats->rti_err_cnt;
6494	tmp_stats[i++] = swstats->mc_err_cnt;
6495}
6496
6497static int s2io_ethtool_get_regs_len(struct net_device *dev)
6498{
6499	return XENA_REG_SPACE;
6500}
6501
6502
6503static int s2io_get_eeprom_len(struct net_device *dev)
6504{
6505	return XENA_EEPROM_SPACE;
6506}
6507
6508static int s2io_get_sset_count(struct net_device *dev, int sset)
6509{
6510	struct s2io_nic *sp = netdev_priv(dev);
6511
6512	switch (sset) {
6513	case ETH_SS_TEST:
6514		return S2IO_TEST_LEN;
6515	case ETH_SS_STATS:
6516		switch (sp->device_type) {
6517		case XFRAME_I_DEVICE:
6518			return XFRAME_I_STAT_LEN;
6519		case XFRAME_II_DEVICE:
6520			return XFRAME_II_STAT_LEN;
6521		default:
6522			return 0;
6523		}
6524	default:
6525		return -EOPNOTSUPP;
6526	}
6527}
6528
6529static void s2io_ethtool_get_strings(struct net_device *dev,
6530				     u32 stringset, u8 *data)
6531{
6532	int stat_size = 0;
6533	struct s2io_nic *sp = netdev_priv(dev);
6534
6535	switch (stringset) {
6536	case ETH_SS_TEST:
6537		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6538		break;
6539	case ETH_SS_STATS:
6540		stat_size = sizeof(ethtool_xena_stats_keys);
6541		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6542		if (sp->device_type == XFRAME_II_DEVICE) {
6543			memcpy(data + stat_size,
6544			       &ethtool_enhanced_stats_keys,
6545			       sizeof(ethtool_enhanced_stats_keys));
6546			stat_size += sizeof(ethtool_enhanced_stats_keys);
6547		}
6548
6549		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6550		       sizeof(ethtool_driver_stats_keys));
6551	}
6552}
6553
6554static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6555{
6556	struct s2io_nic *sp = netdev_priv(dev);
6557	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6558
6559	if (changed && netif_running(dev)) {
6560		int rc;
6561
6562		s2io_stop_all_tx_queue(sp);
6563		s2io_card_down(sp);
6564		dev->features = features;
6565		rc = s2io_card_up(sp);
6566		if (rc)
6567			s2io_reset(sp);
6568		else
6569			s2io_start_all_tx_queue(sp);
6570
6571		return rc ? rc : 1;
6572	}
6573
6574	return 0;
6575}
6576
6577static const struct ethtool_ops netdev_ethtool_ops = {
 
 
6578	.get_drvinfo = s2io_ethtool_gdrvinfo,
6579	.get_regs_len = s2io_ethtool_get_regs_len,
6580	.get_regs = s2io_ethtool_gregs,
6581	.get_link = ethtool_op_get_link,
6582	.get_eeprom_len = s2io_get_eeprom_len,
6583	.get_eeprom = s2io_ethtool_geeprom,
6584	.set_eeprom = s2io_ethtool_seeprom,
6585	.get_ringparam = s2io_ethtool_gringparam,
6586	.get_pauseparam = s2io_ethtool_getpause_data,
6587	.set_pauseparam = s2io_ethtool_setpause_data,
6588	.self_test = s2io_ethtool_test,
6589	.get_strings = s2io_ethtool_get_strings,
6590	.set_phys_id = s2io_ethtool_set_led,
6591	.get_ethtool_stats = s2io_get_ethtool_stats,
6592	.get_sset_count = s2io_get_sset_count,
6593	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6594	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6595};
6596
6597/**
6598 *  s2io_ioctl - Entry point for the Ioctl
6599 *  @dev :  Device pointer.
6600 *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6601 *  a proprietary structure used to pass information to the driver.
6602 *  @cmd :  This is used to distinguish between the different commands that
6603 *  can be passed to the IOCTL functions.
6604 *  Description:
6605 *  Currently there are no special functionality supported in IOCTL, hence
6606 *  function always return EOPNOTSUPPORTED
6607 */
6608
6609static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6610{
6611	return -EOPNOTSUPP;
6612}
6613
6614/**
6615 *  s2io_change_mtu - entry point to change MTU size for the device.
6616 *   @dev : device pointer.
6617 *   @new_mtu : the new MTU size for the device.
6618 *   Description: A driver entry point to change MTU size for the device.
6619 *   Before changing the MTU the device must be stopped.
6620 *  Return value:
6621 *   0 on success and an appropriate (-)ve integer as defined in errno.h
6622 *   file on failure.
6623 */
6624
6625static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6626{
6627	struct s2io_nic *sp = netdev_priv(dev);
6628	int ret = 0;
6629
 
 
 
 
 
6630	dev->mtu = new_mtu;
6631	if (netif_running(dev)) {
6632		s2io_stop_all_tx_queue(sp);
6633		s2io_card_down(sp);
6634		ret = s2io_card_up(sp);
6635		if (ret) {
6636			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6637				  __func__);
6638			return ret;
6639		}
6640		s2io_wake_all_tx_queue(sp);
6641	} else { /* Device is down */
6642		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6643		u64 val64 = new_mtu;
6644
6645		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6646	}
6647
6648	return ret;
6649}
6650
6651/**
6652 * s2io_set_link - Set the LInk status
6653 * @data: long pointer to device private structue
6654 * Description: Sets the link status for the adapter
6655 */
6656
6657static void s2io_set_link(struct work_struct *work)
6658{
6659	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6660					    set_link_task);
6661	struct net_device *dev = nic->dev;
6662	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6663	register u64 val64;
6664	u16 subid;
6665
6666	rtnl_lock();
6667
6668	if (!netif_running(dev))
6669		goto out_unlock;
6670
6671	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6672		/* The card is being reset, no point doing anything */
6673		goto out_unlock;
6674	}
6675
6676	subid = nic->pdev->subsystem_device;
6677	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6678		/*
6679		 * Allow a small delay for the NICs self initiated
6680		 * cleanup to complete.
6681		 */
6682		msleep(100);
6683	}
6684
6685	val64 = readq(&bar0->adapter_status);
6686	if (LINK_IS_UP(val64)) {
6687		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6688			if (verify_xena_quiescence(nic)) {
6689				val64 = readq(&bar0->adapter_control);
6690				val64 |= ADAPTER_CNTL_EN;
6691				writeq(val64, &bar0->adapter_control);
6692				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6693					    nic->device_type, subid)) {
6694					val64 = readq(&bar0->gpio_control);
6695					val64 |= GPIO_CTRL_GPIO_0;
6696					writeq(val64, &bar0->gpio_control);
6697					val64 = readq(&bar0->gpio_control);
6698				} else {
6699					val64 |= ADAPTER_LED_ON;
6700					writeq(val64, &bar0->adapter_control);
6701				}
6702				nic->device_enabled_once = true;
6703			} else {
6704				DBG_PRINT(ERR_DBG,
6705					  "%s: Error: device is not Quiescent\n",
6706					  dev->name);
6707				s2io_stop_all_tx_queue(nic);
6708			}
6709		}
6710		val64 = readq(&bar0->adapter_control);
6711		val64 |= ADAPTER_LED_ON;
6712		writeq(val64, &bar0->adapter_control);
6713		s2io_link(nic, LINK_UP);
6714	} else {
6715		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6716						      subid)) {
6717			val64 = readq(&bar0->gpio_control);
6718			val64 &= ~GPIO_CTRL_GPIO_0;
6719			writeq(val64, &bar0->gpio_control);
6720			val64 = readq(&bar0->gpio_control);
6721		}
6722		/* turn off LED */
6723		val64 = readq(&bar0->adapter_control);
6724		val64 = val64 & (~ADAPTER_LED_ON);
6725		writeq(val64, &bar0->adapter_control);
6726		s2io_link(nic, LINK_DOWN);
6727	}
6728	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6729
6730out_unlock:
6731	rtnl_unlock();
6732}
6733
6734static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6735				  struct buffAdd *ba,
6736				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6737				  u64 *temp2, int size)
6738{
6739	struct net_device *dev = sp->dev;
6740	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6741
6742	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6743		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6744		/* allocate skb */
6745		if (*skb) {
6746			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6747			/*
6748			 * As Rx frame are not going to be processed,
6749			 * using same mapped address for the Rxd
6750			 * buffer pointer
6751			 */
6752			rxdp1->Buffer0_ptr = *temp0;
6753		} else {
6754			*skb = netdev_alloc_skb(dev, size);
6755			if (!(*skb)) {
6756				DBG_PRINT(INFO_DBG,
6757					  "%s: Out of memory to allocate %s\n",
6758					  dev->name, "1 buf mode SKBs");
6759				stats->mem_alloc_fail_cnt++;
6760				return -ENOMEM ;
6761			}
6762			stats->mem_allocated += (*skb)->truesize;
6763			/* storing the mapped addr in a temp variable
6764			 * such it will be used for next rxd whose
6765			 * Host Control is NULL
6766			 */
6767			rxdp1->Buffer0_ptr = *temp0 =
6768				dma_map_single(&sp->pdev->dev, (*skb)->data,
6769					       size - NET_IP_ALIGN,
6770					       DMA_FROM_DEVICE);
6771			if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6772				goto memalloc_failed;
6773			rxdp->Host_Control = (unsigned long) (*skb);
6774		}
6775	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6776		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6777		/* Two buffer Mode */
6778		if (*skb) {
6779			rxdp3->Buffer2_ptr = *temp2;
6780			rxdp3->Buffer0_ptr = *temp0;
6781			rxdp3->Buffer1_ptr = *temp1;
6782		} else {
6783			*skb = netdev_alloc_skb(dev, size);
6784			if (!(*skb)) {
6785				DBG_PRINT(INFO_DBG,
6786					  "%s: Out of memory to allocate %s\n",
6787					  dev->name,
6788					  "2 buf mode SKBs");
6789				stats->mem_alloc_fail_cnt++;
6790				return -ENOMEM;
6791			}
6792			stats->mem_allocated += (*skb)->truesize;
6793			rxdp3->Buffer2_ptr = *temp2 =
6794				dma_map_single(&sp->pdev->dev, (*skb)->data,
6795					       dev->mtu + 4, DMA_FROM_DEVICE);
6796			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
 
6797				goto memalloc_failed;
6798			rxdp3->Buffer0_ptr = *temp0 =
6799				dma_map_single(&sp->pdev->dev, ba->ba_0,
6800					       BUF0_LEN, DMA_FROM_DEVICE);
6801			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6802				dma_unmap_single(&sp->pdev->dev,
 
6803						 (dma_addr_t)rxdp3->Buffer2_ptr,
6804						 dev->mtu + 4,
6805						 DMA_FROM_DEVICE);
6806				goto memalloc_failed;
6807			}
6808			rxdp->Host_Control = (unsigned long) (*skb);
6809
6810			/* Buffer-1 will be dummy buffer not used */
6811			rxdp3->Buffer1_ptr = *temp1 =
6812				dma_map_single(&sp->pdev->dev, ba->ba_1,
6813					       BUF1_LEN, DMA_FROM_DEVICE);
6814			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6815				dma_unmap_single(&sp->pdev->dev,
 
6816						 (dma_addr_t)rxdp3->Buffer0_ptr,
6817						 BUF0_LEN, DMA_FROM_DEVICE);
6818				dma_unmap_single(&sp->pdev->dev,
6819						 (dma_addr_t)rxdp3->Buffer2_ptr,
6820						 dev->mtu + 4,
6821						 DMA_FROM_DEVICE);
6822				goto memalloc_failed;
6823			}
6824		}
6825	}
6826	return 0;
6827
6828memalloc_failed:
6829	stats->pci_map_fail_cnt++;
6830	stats->mem_freed += (*skb)->truesize;
6831	dev_kfree_skb(*skb);
6832	return -ENOMEM;
6833}
6834
6835static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6836				int size)
6837{
6838	struct net_device *dev = sp->dev;
6839	if (sp->rxd_mode == RXD_MODE_1) {
6840		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6841	} else if (sp->rxd_mode == RXD_MODE_3B) {
6842		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6843		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6844		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6845	}
6846}
6847
6848static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6849{
6850	int i, j, k, blk_cnt = 0, size;
6851	struct config_param *config = &sp->config;
6852	struct mac_info *mac_control = &sp->mac_control;
6853	struct net_device *dev = sp->dev;
6854	struct RxD_t *rxdp = NULL;
6855	struct sk_buff *skb = NULL;
6856	struct buffAdd *ba = NULL;
6857	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6858
6859	/* Calculate the size based on ring mode */
6860	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6861		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6862	if (sp->rxd_mode == RXD_MODE_1)
6863		size += NET_IP_ALIGN;
6864	else if (sp->rxd_mode == RXD_MODE_3B)
6865		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6866
6867	for (i = 0; i < config->rx_ring_num; i++) {
6868		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6869		struct ring_info *ring = &mac_control->rings[i];
6870
6871		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6872
6873		for (j = 0; j < blk_cnt; j++) {
6874			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6875				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6876				if (sp->rxd_mode == RXD_MODE_3B)
6877					ba = &ring->ba[j][k];
6878				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6879							   &temp0_64,
6880							   &temp1_64,
6881							   &temp2_64,
6882							   size) == -ENOMEM) {
6883					return 0;
6884				}
6885
6886				set_rxd_buffer_size(sp, rxdp, size);
6887				dma_wmb();
6888				/* flip the Ownership bit to Hardware */
6889				rxdp->Control_1 |= RXD_OWN_XENA;
6890			}
6891		}
6892	}
6893	return 0;
6894
6895}
6896
6897static int s2io_add_isr(struct s2io_nic *sp)
6898{
6899	int ret = 0;
6900	struct net_device *dev = sp->dev;
6901	int err = 0;
6902
6903	if (sp->config.intr_type == MSI_X)
6904		ret = s2io_enable_msi_x(sp);
6905	if (ret) {
6906		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6907		sp->config.intr_type = INTA;
6908	}
6909
6910	/*
6911	 * Store the values of the MSIX table in
6912	 * the struct s2io_nic structure
6913	 */
6914	store_xmsi_data(sp);
6915
6916	/* After proper initialization of H/W, register ISR */
6917	if (sp->config.intr_type == MSI_X) {
6918		int i, msix_rx_cnt = 0;
6919
6920		for (i = 0; i < sp->num_entries; i++) {
6921			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6922				if (sp->s2io_entries[i].type ==
6923				    MSIX_RING_TYPE) {
6924					snprintf(sp->desc[i],
6925						sizeof(sp->desc[i]),
6926						"%s:MSI-X-%d-RX",
6927						dev->name, i);
6928					err = request_irq(sp->entries[i].vector,
6929							  s2io_msix_ring_handle,
6930							  0,
6931							  sp->desc[i],
6932							  sp->s2io_entries[i].arg);
6933				} else if (sp->s2io_entries[i].type ==
6934					   MSIX_ALARM_TYPE) {
6935					snprintf(sp->desc[i],
6936						sizeof(sp->desc[i]),
6937						"%s:MSI-X-%d-TX",
6938						dev->name, i);
6939					err = request_irq(sp->entries[i].vector,
6940							  s2io_msix_fifo_handle,
6941							  0,
6942							  sp->desc[i],
6943							  sp->s2io_entries[i].arg);
6944
6945				}
6946				/* if either data or addr is zero print it. */
6947				if (!(sp->msix_info[i].addr &&
6948				      sp->msix_info[i].data)) {
6949					DBG_PRINT(ERR_DBG,
6950						  "%s @Addr:0x%llx Data:0x%llx\n",
6951						  sp->desc[i],
6952						  (unsigned long long)
6953						  sp->msix_info[i].addr,
6954						  (unsigned long long)
6955						  ntohl(sp->msix_info[i].data));
6956				} else
6957					msix_rx_cnt++;
6958				if (err) {
6959					remove_msix_isr(sp);
6960
6961					DBG_PRINT(ERR_DBG,
6962						  "%s:MSI-X-%d registration "
6963						  "failed\n", dev->name, i);
6964
6965					DBG_PRINT(ERR_DBG,
6966						  "%s: Defaulting to INTA\n",
6967						  dev->name);
6968					sp->config.intr_type = INTA;
6969					break;
6970				}
6971				sp->s2io_entries[i].in_use =
6972					MSIX_REGISTERED_SUCCESS;
6973			}
6974		}
6975		if (!err) {
6976			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6977			DBG_PRINT(INFO_DBG,
6978				  "MSI-X-TX entries enabled through alarm vector\n");
6979		}
6980	}
6981	if (sp->config.intr_type == INTA) {
6982		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6983				  sp->name, dev);
6984		if (err) {
6985			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6986				  dev->name);
6987			return -1;
6988		}
6989	}
6990	return 0;
6991}
6992
6993static void s2io_rem_isr(struct s2io_nic *sp)
6994{
6995	if (sp->config.intr_type == MSI_X)
6996		remove_msix_isr(sp);
6997	else
6998		remove_inta_isr(sp);
6999}
7000
7001static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7002{
7003	int cnt = 0;
7004	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7005	register u64 val64 = 0;
7006	struct config_param *config;
7007	config = &sp->config;
7008
7009	if (!is_s2io_card_up(sp))
7010		return;
7011
7012	del_timer_sync(&sp->alarm_timer);
7013	/* If s2io_set_link task is executing, wait till it completes. */
7014	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7015		msleep(50);
7016	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7017
7018	/* Disable napi */
7019	if (sp->config.napi) {
7020		int off = 0;
7021		if (config->intr_type ==  MSI_X) {
7022			for (; off < sp->config.rx_ring_num; off++)
7023				napi_disable(&sp->mac_control.rings[off].napi);
7024		}
7025		else
7026			napi_disable(&sp->napi);
7027	}
7028
7029	/* disable Tx and Rx traffic on the NIC */
7030	if (do_io)
7031		stop_nic(sp);
7032
7033	s2io_rem_isr(sp);
7034
7035	/* stop the tx queue, indicate link down */
7036	s2io_link(sp, LINK_DOWN);
7037
7038	/* Check if the device is Quiescent and then Reset the NIC */
7039	while (do_io) {
7040		/* As per the HW requirement we need to replenish the
7041		 * receive buffer to avoid the ring bump. Since there is
7042		 * no intention of processing the Rx frame at this pointwe are
7043		 * just setting the ownership bit of rxd in Each Rx
7044		 * ring to HW and set the appropriate buffer size
7045		 * based on the ring mode
7046		 */
7047		rxd_owner_bit_reset(sp);
7048
7049		val64 = readq(&bar0->adapter_status);
7050		if (verify_xena_quiescence(sp)) {
7051			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7052				break;
7053		}
7054
7055		msleep(50);
7056		cnt++;
7057		if (cnt == 10) {
7058			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7059				  "adapter status reads 0x%llx\n",
7060				  (unsigned long long)val64);
7061			break;
7062		}
7063	}
7064	if (do_io)
7065		s2io_reset(sp);
7066
7067	/* Free all Tx buffers */
7068	free_tx_buffers(sp);
7069
7070	/* Free all Rx buffers */
7071	free_rx_buffers(sp);
7072
7073	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7074}
7075
7076static void s2io_card_down(struct s2io_nic *sp)
7077{
7078	do_s2io_card_down(sp, 1);
7079}
7080
7081static int s2io_card_up(struct s2io_nic *sp)
7082{
7083	int i, ret = 0;
7084	struct config_param *config;
7085	struct mac_info *mac_control;
7086	struct net_device *dev = sp->dev;
7087	u16 interruptible;
7088
7089	/* Initialize the H/W I/O registers */
7090	ret = init_nic(sp);
7091	if (ret != 0) {
7092		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7093			  dev->name);
7094		if (ret != -EIO)
7095			s2io_reset(sp);
7096		return ret;
7097	}
7098
7099	/*
7100	 * Initializing the Rx buffers. For now we are considering only 1
7101	 * Rx ring and initializing buffers into 30 Rx blocks
7102	 */
7103	config = &sp->config;
7104	mac_control = &sp->mac_control;
7105
7106	for (i = 0; i < config->rx_ring_num; i++) {
7107		struct ring_info *ring = &mac_control->rings[i];
7108
7109		ring->mtu = dev->mtu;
7110		ring->lro = !!(dev->features & NETIF_F_LRO);
7111		ret = fill_rx_buffers(sp, ring, 1);
7112		if (ret) {
7113			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7114				  dev->name);
7115			s2io_reset(sp);
7116			free_rx_buffers(sp);
7117			return -ENOMEM;
7118		}
7119		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7120			  ring->rx_bufs_left);
7121	}
7122
7123	/* Initialise napi */
7124	if (config->napi) {
7125		if (config->intr_type ==  MSI_X) {
7126			for (i = 0; i < sp->config.rx_ring_num; i++)
7127				napi_enable(&sp->mac_control.rings[i].napi);
7128		} else {
7129			napi_enable(&sp->napi);
7130		}
7131	}
7132
7133	/* Maintain the state prior to the open */
7134	if (sp->promisc_flg)
7135		sp->promisc_flg = 0;
7136	if (sp->m_cast_flg) {
7137		sp->m_cast_flg = 0;
7138		sp->all_multi_pos = 0;
7139	}
7140
7141	/* Setting its receive mode */
7142	s2io_set_multicast(dev);
7143
7144	if (dev->features & NETIF_F_LRO) {
7145		/* Initialize max aggregatable pkts per session based on MTU */
7146		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7147		/* Check if we can use (if specified) user provided value */
7148		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7149			sp->lro_max_aggr_per_sess = lro_max_pkts;
7150	}
7151
7152	/* Enable Rx Traffic and interrupts on the NIC */
7153	if (start_nic(sp)) {
7154		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7155		s2io_reset(sp);
7156		free_rx_buffers(sp);
7157		return -ENODEV;
7158	}
7159
7160	/* Add interrupt service routine */
7161	if (s2io_add_isr(sp) != 0) {
7162		if (sp->config.intr_type == MSI_X)
7163			s2io_rem_isr(sp);
7164		s2io_reset(sp);
7165		free_rx_buffers(sp);
7166		return -ENODEV;
7167	}
7168
7169	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7170	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7171
7172	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7173
7174	/*  Enable select interrupts */
7175	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7176	if (sp->config.intr_type != INTA) {
7177		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7178		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7179	} else {
7180		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7181		interruptible |= TX_PIC_INTR;
7182		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7183	}
7184
7185	return 0;
7186}
7187
7188/**
7189 * s2io_restart_nic - Resets the NIC.
7190 * @data : long pointer to the device private structure
7191 * Description:
7192 * This function is scheduled to be run by the s2io_tx_watchdog
7193 * function after 0.5 secs to reset the NIC. The idea is to reduce
7194 * the run time of the watch dog routine which is run holding a
7195 * spin lock.
7196 */
7197
7198static void s2io_restart_nic(struct work_struct *work)
7199{
7200	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7201	struct net_device *dev = sp->dev;
7202
7203	rtnl_lock();
7204
7205	if (!netif_running(dev))
7206		goto out_unlock;
7207
7208	s2io_card_down(sp);
7209	if (s2io_card_up(sp)) {
7210		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7211	}
7212	s2io_wake_all_tx_queue(sp);
7213	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7214out_unlock:
7215	rtnl_unlock();
7216}
7217
7218/**
7219 *  s2io_tx_watchdog - Watchdog for transmit side.
7220 *  @dev : Pointer to net device structure
7221 *  Description:
7222 *  This function is triggered if the Tx Queue is stopped
7223 *  for a pre-defined amount of time when the Interface is still up.
7224 *  If the Interface is jammed in such a situation, the hardware is
7225 *  reset (by s2io_close) and restarted again (by s2io_open) to
7226 *  overcome any problem that might have been caused in the hardware.
7227 *  Return value:
7228 *  void
7229 */
7230
7231static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7232{
7233	struct s2io_nic *sp = netdev_priv(dev);
7234	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7235
7236	if (netif_carrier_ok(dev)) {
7237		swstats->watchdog_timer_cnt++;
7238		schedule_work(&sp->rst_timer_task);
7239		swstats->soft_reset_cnt++;
7240	}
7241}
7242
7243/**
7244 *   rx_osm_handler - To perform some OS related operations on SKB.
7245 *   @sp: private member of the device structure,pointer to s2io_nic structure.
7246 *   @skb : the socket buffer pointer.
7247 *   @len : length of the packet
7248 *   @cksum : FCS checksum of the frame.
7249 *   @ring_no : the ring from which this RxD was extracted.
7250 *   Description:
7251 *   This function is called by the Rx interrupt serivce routine to perform
7252 *   some OS related operations on the SKB before passing it to the upper
7253 *   layers. It mainly checks if the checksum is OK, if so adds it to the
7254 *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7255 *   to the upper layer. If the checksum is wrong, it increments the Rx
7256 *   packet error count, frees the SKB and returns error.
7257 *   Return value:
7258 *   SUCCESS on success and -1 on failure.
7259 */
7260static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7261{
7262	struct s2io_nic *sp = ring_data->nic;
7263	struct net_device *dev = ring_data->dev;
7264	struct sk_buff *skb = (struct sk_buff *)
7265		((unsigned long)rxdp->Host_Control);
7266	int ring_no = ring_data->ring_no;
7267	u16 l3_csum, l4_csum;
7268	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7269	struct lro *lro;
7270	u8 err_mask;
7271	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7272
7273	skb->dev = dev;
7274
7275	if (err) {
7276		/* Check for parity error */
7277		if (err & 0x1)
7278			swstats->parity_err_cnt++;
7279
7280		err_mask = err >> 48;
7281		switch (err_mask) {
7282		case 1:
7283			swstats->rx_parity_err_cnt++;
7284			break;
7285
7286		case 2:
7287			swstats->rx_abort_cnt++;
7288			break;
7289
7290		case 3:
7291			swstats->rx_parity_abort_cnt++;
7292			break;
7293
7294		case 4:
7295			swstats->rx_rda_fail_cnt++;
7296			break;
7297
7298		case 5:
7299			swstats->rx_unkn_prot_cnt++;
7300			break;
7301
7302		case 6:
7303			swstats->rx_fcs_err_cnt++;
7304			break;
7305
7306		case 7:
7307			swstats->rx_buf_size_err_cnt++;
7308			break;
7309
7310		case 8:
7311			swstats->rx_rxd_corrupt_cnt++;
7312			break;
7313
7314		case 15:
7315			swstats->rx_unkn_err_cnt++;
7316			break;
7317		}
7318		/*
7319		 * Drop the packet if bad transfer code. Exception being
7320		 * 0x5, which could be due to unsupported IPv6 extension header.
7321		 * In this case, we let stack handle the packet.
7322		 * Note that in this case, since checksum will be incorrect,
7323		 * stack will validate the same.
7324		 */
7325		if (err_mask != 0x5) {
7326			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7327				  dev->name, err_mask);
7328			dev->stats.rx_crc_errors++;
7329			swstats->mem_freed
7330				+= skb->truesize;
7331			dev_kfree_skb(skb);
7332			ring_data->rx_bufs_left -= 1;
7333			rxdp->Host_Control = 0;
7334			return 0;
7335		}
7336	}
7337
7338	rxdp->Host_Control = 0;
7339	if (sp->rxd_mode == RXD_MODE_1) {
7340		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7341
7342		skb_put(skb, len);
7343	} else if (sp->rxd_mode == RXD_MODE_3B) {
7344		int get_block = ring_data->rx_curr_get_info.block_index;
7345		int get_off = ring_data->rx_curr_get_info.offset;
7346		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7347		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7348		unsigned char *buff = skb_push(skb, buf0_len);
7349
7350		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7351		memcpy(buff, ba->ba_0, buf0_len);
7352		skb_put(skb, buf2_len);
7353	}
7354
7355	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7356	    ((!ring_data->lro) ||
7357	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7358	    (dev->features & NETIF_F_RXCSUM)) {
7359		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7360		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7361		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7362			/*
7363			 * NIC verifies if the Checksum of the received
7364			 * frame is Ok or not and accordingly returns
7365			 * a flag in the RxD.
7366			 */
7367			skb->ip_summed = CHECKSUM_UNNECESSARY;
7368			if (ring_data->lro) {
7369				u32 tcp_len = 0;
7370				u8 *tcp;
7371				int ret = 0;
7372
7373				ret = s2io_club_tcp_session(ring_data,
7374							    skb->data, &tcp,
7375							    &tcp_len, &lro,
7376							    rxdp, sp);
7377				switch (ret) {
7378				case 3: /* Begin anew */
7379					lro->parent = skb;
7380					goto aggregate;
7381				case 1: /* Aggregate */
7382					lro_append_pkt(sp, lro, skb, tcp_len);
7383					goto aggregate;
7384				case 4: /* Flush session */
7385					lro_append_pkt(sp, lro, skb, tcp_len);
7386					queue_rx_frame(lro->parent,
7387						       lro->vlan_tag);
7388					clear_lro_session(lro);
7389					swstats->flush_max_pkts++;
7390					goto aggregate;
7391				case 2: /* Flush both */
7392					lro->parent->data_len = lro->frags_len;
7393					swstats->sending_both++;
7394					queue_rx_frame(lro->parent,
7395						       lro->vlan_tag);
7396					clear_lro_session(lro);
7397					goto send_up;
7398				case 0: /* sessions exceeded */
7399				case -1: /* non-TCP or not L2 aggregatable */
7400				case 5: /*
7401					 * First pkt in session not
7402					 * L3/L4 aggregatable
7403					 */
7404					break;
7405				default:
7406					DBG_PRINT(ERR_DBG,
7407						  "%s: Samadhana!!\n",
7408						  __func__);
7409					BUG();
7410				}
7411			}
7412		} else {
7413			/*
7414			 * Packet with erroneous checksum, let the
7415			 * upper layers deal with it.
7416			 */
7417			skb_checksum_none_assert(skb);
7418		}
7419	} else
7420		skb_checksum_none_assert(skb);
7421
7422	swstats->mem_freed += skb->truesize;
7423send_up:
7424	skb_record_rx_queue(skb, ring_no);
7425	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7426aggregate:
7427	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7428	return SUCCESS;
7429}
7430
7431/**
7432 *  s2io_link - stops/starts the Tx queue.
7433 *  @sp : private member of the device structure, which is a pointer to the
7434 *  s2io_nic structure.
7435 *  @link : inidicates whether link is UP/DOWN.
7436 *  Description:
7437 *  This function stops/starts the Tx queue depending on whether the link
7438 *  status of the NIC is is down or up. This is called by the Alarm
7439 *  interrupt handler whenever a link change interrupt comes up.
7440 *  Return value:
7441 *  void.
7442 */
7443
7444static void s2io_link(struct s2io_nic *sp, int link)
7445{
7446	struct net_device *dev = sp->dev;
7447	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7448
7449	if (link != sp->last_link_state) {
7450		init_tti(sp, link);
7451		if (link == LINK_DOWN) {
7452			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7453			s2io_stop_all_tx_queue(sp);
7454			netif_carrier_off(dev);
7455			if (swstats->link_up_cnt)
7456				swstats->link_up_time =
7457					jiffies - sp->start_time;
7458			swstats->link_down_cnt++;
7459		} else {
7460			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7461			if (swstats->link_down_cnt)
7462				swstats->link_down_time =
7463					jiffies - sp->start_time;
7464			swstats->link_up_cnt++;
7465			netif_carrier_on(dev);
7466			s2io_wake_all_tx_queue(sp);
7467		}
7468	}
7469	sp->last_link_state = link;
7470	sp->start_time = jiffies;
7471}
7472
7473/**
7474 *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7475 *  @sp : private member of the device structure, which is a pointer to the
7476 *  s2io_nic structure.
7477 *  Description:
7478 *  This function initializes a few of the PCI and PCI-X configuration registers
7479 *  with recommended values.
7480 *  Return value:
7481 *  void
7482 */
7483
7484static void s2io_init_pci(struct s2io_nic *sp)
7485{
7486	u16 pci_cmd = 0, pcix_cmd = 0;
7487
7488	/* Enable Data Parity Error Recovery in PCI-X command register. */
7489	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7490			     &(pcix_cmd));
7491	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7492			      (pcix_cmd | 1));
7493	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7494			     &(pcix_cmd));
7495
7496	/* Set the PErr Response bit in PCI command register. */
7497	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7498	pci_write_config_word(sp->pdev, PCI_COMMAND,
7499			      (pci_cmd | PCI_COMMAND_PARITY));
7500	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7501}
7502
7503static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7504			    u8 *dev_multiq)
7505{
7506	int i;
7507
7508	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7509		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7510			  "(%d) not supported\n", tx_fifo_num);
7511
7512		if (tx_fifo_num < 1)
7513			tx_fifo_num = 1;
7514		else
7515			tx_fifo_num = MAX_TX_FIFOS;
7516
7517		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7518	}
7519
7520	if (multiq)
7521		*dev_multiq = multiq;
7522
7523	if (tx_steering_type && (1 == tx_fifo_num)) {
7524		if (tx_steering_type != TX_DEFAULT_STEERING)
7525			DBG_PRINT(ERR_DBG,
7526				  "Tx steering is not supported with "
7527				  "one fifo. Disabling Tx steering.\n");
7528		tx_steering_type = NO_STEERING;
7529	}
7530
7531	if ((tx_steering_type < NO_STEERING) ||
7532	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7533		DBG_PRINT(ERR_DBG,
7534			  "Requested transmit steering not supported\n");
7535		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7536		tx_steering_type = NO_STEERING;
7537	}
7538
7539	if (rx_ring_num > MAX_RX_RINGS) {
7540		DBG_PRINT(ERR_DBG,
7541			  "Requested number of rx rings not supported\n");
7542		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7543			  MAX_RX_RINGS);
7544		rx_ring_num = MAX_RX_RINGS;
7545	}
7546
7547	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7548		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7549			  "Defaulting to INTA\n");
7550		*dev_intr_type = INTA;
7551	}
7552
7553	if ((*dev_intr_type == MSI_X) &&
7554	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7555	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7556		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7557			  "Defaulting to INTA\n");
7558		*dev_intr_type = INTA;
7559	}
7560
7561	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7562		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7563		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7564		rx_ring_mode = 1;
7565	}
7566
7567	for (i = 0; i < MAX_RX_RINGS; i++)
7568		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7569			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7570				  "supported\nDefaulting to %d\n",
7571				  MAX_RX_BLOCKS_PER_RING);
7572			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7573		}
7574
7575	return SUCCESS;
7576}
7577
7578/**
7579 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7580 * or Traffic class respectively.
7581 * @nic: device private variable
7582 * Description: The function configures the receive steering to
7583 * desired receive ring.
7584 * Return Value:  SUCCESS on success and
7585 * '-1' on failure (endian settings incorrect).
7586 */
7587static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7588{
7589	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7590	register u64 val64 = 0;
7591
7592	if (ds_codepoint > 63)
7593		return FAILURE;
7594
7595	val64 = RTS_DS_MEM_DATA(ring);
7596	writeq(val64, &bar0->rts_ds_mem_data);
7597
7598	val64 = RTS_DS_MEM_CTRL_WE |
7599		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7600		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7601
7602	writeq(val64, &bar0->rts_ds_mem_ctrl);
7603
7604	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7605				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7606				     S2IO_BIT_RESET);
7607}
7608
7609static const struct net_device_ops s2io_netdev_ops = {
7610	.ndo_open	        = s2io_open,
7611	.ndo_stop	        = s2io_close,
7612	.ndo_get_stats	        = s2io_get_stats,
7613	.ndo_start_xmit    	= s2io_xmit,
7614	.ndo_validate_addr	= eth_validate_addr,
7615	.ndo_set_rx_mode	= s2io_set_multicast,
7616	.ndo_do_ioctl	   	= s2io_ioctl,
7617	.ndo_set_mac_address    = s2io_set_mac_addr,
7618	.ndo_change_mtu	   	= s2io_change_mtu,
7619	.ndo_set_features	= s2io_set_features,
7620	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7621#ifdef CONFIG_NET_POLL_CONTROLLER
7622	.ndo_poll_controller    = s2io_netpoll,
7623#endif
7624};
7625
7626/**
7627 *  s2io_init_nic - Initialization of the adapter .
7628 *  @pdev : structure containing the PCI related information of the device.
7629 *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7630 *  Description:
7631 *  The function initializes an adapter identified by the pci_dec structure.
7632 *  All OS related initialization including memory and device structure and
7633 *  initlaization of the device private variable is done. Also the swapper
7634 *  control register is initialized to enable read and write into the I/O
7635 *  registers of the device.
7636 *  Return value:
7637 *  returns 0 on success and negative on failure.
7638 */
7639
7640static int
7641s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7642{
7643	struct s2io_nic *sp;
7644	struct net_device *dev;
7645	int i, j, ret;
7646	int dma_flag = false;
7647	u32 mac_up, mac_down;
7648	u64 val64 = 0, tmp64 = 0;
7649	struct XENA_dev_config __iomem *bar0 = NULL;
7650	u16 subid;
7651	struct config_param *config;
7652	struct mac_info *mac_control;
7653	int mode;
7654	u8 dev_intr_type = intr_type;
7655	u8 dev_multiq = 0;
7656
7657	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7658	if (ret)
7659		return ret;
7660
7661	ret = pci_enable_device(pdev);
7662	if (ret) {
7663		DBG_PRINT(ERR_DBG,
7664			  "%s: pci_enable_device failed\n", __func__);
7665		return ret;
7666	}
7667
7668	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7669		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7670		dma_flag = true;
7671		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
7672			DBG_PRINT(ERR_DBG,
7673				  "Unable to obtain 64bit DMA for coherent allocations\n");
 
7674			pci_disable_device(pdev);
7675			return -ENOMEM;
7676		}
7677	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
7678		DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
7679	} else {
7680		pci_disable_device(pdev);
7681		return -ENOMEM;
7682	}
7683	ret = pci_request_regions(pdev, s2io_driver_name);
7684	if (ret) {
7685		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7686			  __func__, ret);
7687		pci_disable_device(pdev);
7688		return -ENODEV;
7689	}
7690	if (dev_multiq)
7691		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7692	else
7693		dev = alloc_etherdev(sizeof(struct s2io_nic));
7694	if (dev == NULL) {
7695		pci_disable_device(pdev);
7696		pci_release_regions(pdev);
7697		return -ENODEV;
7698	}
7699
7700	pci_set_master(pdev);
7701	pci_set_drvdata(pdev, dev);
7702	SET_NETDEV_DEV(dev, &pdev->dev);
7703
7704	/*  Private member variable initialized to s2io NIC structure */
7705	sp = netdev_priv(dev);
7706	sp->dev = dev;
7707	sp->pdev = pdev;
7708	sp->high_dma_flag = dma_flag;
7709	sp->device_enabled_once = false;
7710	if (rx_ring_mode == 1)
7711		sp->rxd_mode = RXD_MODE_1;
7712	if (rx_ring_mode == 2)
7713		sp->rxd_mode = RXD_MODE_3B;
7714
7715	sp->config.intr_type = dev_intr_type;
7716
7717	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7718	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7719		sp->device_type = XFRAME_II_DEVICE;
7720	else
7721		sp->device_type = XFRAME_I_DEVICE;
7722
7723
7724	/* Initialize some PCI/PCI-X fields of the NIC. */
7725	s2io_init_pci(sp);
7726
7727	/*
7728	 * Setting the device configuration parameters.
7729	 * Most of these parameters can be specified by the user during
7730	 * module insertion as they are module loadable parameters. If
7731	 * these parameters are not not specified during load time, they
7732	 * are initialized with default values.
7733	 */
7734	config = &sp->config;
7735	mac_control = &sp->mac_control;
7736
7737	config->napi = napi;
7738	config->tx_steering_type = tx_steering_type;
7739
7740	/* Tx side parameters. */
7741	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7742		config->tx_fifo_num = MAX_TX_FIFOS;
7743	else
7744		config->tx_fifo_num = tx_fifo_num;
7745
7746	/* Initialize the fifos used for tx steering */
7747	if (config->tx_fifo_num < 5) {
7748		if (config->tx_fifo_num  == 1)
7749			sp->total_tcp_fifos = 1;
7750		else
7751			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7752		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7753		sp->total_udp_fifos = 1;
7754		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7755	} else {
7756		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7757				       FIFO_OTHER_MAX_NUM);
7758		sp->udp_fifo_idx = sp->total_tcp_fifos;
7759		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7760		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7761	}
7762
7763	config->multiq = dev_multiq;
7764	for (i = 0; i < config->tx_fifo_num; i++) {
7765		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7766
7767		tx_cfg->fifo_len = tx_fifo_len[i];
7768		tx_cfg->fifo_priority = i;
7769	}
7770
7771	/* mapping the QoS priority to the configured fifos */
7772	for (i = 0; i < MAX_TX_FIFOS; i++)
7773		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7774
7775	/* map the hashing selector table to the configured fifos */
7776	for (i = 0; i < config->tx_fifo_num; i++)
7777		sp->fifo_selector[i] = fifo_selector[i];
7778
7779
7780	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7781	for (i = 0; i < config->tx_fifo_num; i++) {
7782		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7783
7784		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7785		if (tx_cfg->fifo_len < 65) {
7786			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7787			break;
7788		}
7789	}
7790	/* + 2 because one Txd for skb->data and one Txd for UFO */
7791	config->max_txds = MAX_SKB_FRAGS + 2;
7792
7793	/* Rx side parameters. */
7794	config->rx_ring_num = rx_ring_num;
7795	for (i = 0; i < config->rx_ring_num; i++) {
7796		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7797		struct ring_info *ring = &mac_control->rings[i];
7798
7799		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7800		rx_cfg->ring_priority = i;
7801		ring->rx_bufs_left = 0;
7802		ring->rxd_mode = sp->rxd_mode;
7803		ring->rxd_count = rxd_count[sp->rxd_mode];
7804		ring->pdev = sp->pdev;
7805		ring->dev = sp->dev;
7806	}
7807
7808	for (i = 0; i < rx_ring_num; i++) {
7809		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7810
7811		rx_cfg->ring_org = RING_ORG_BUFF1;
7812		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7813	}
7814
7815	/*  Setting Mac Control parameters */
7816	mac_control->rmac_pause_time = rmac_pause_time;
7817	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7818	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7819
7820
7821	/*  initialize the shared memory used by the NIC and the host */
7822	if (init_shared_mem(sp)) {
7823		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7824		ret = -ENOMEM;
7825		goto mem_alloc_failed;
7826	}
7827
7828	sp->bar0 = pci_ioremap_bar(pdev, 0);
7829	if (!sp->bar0) {
7830		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7831			  dev->name);
7832		ret = -ENOMEM;
7833		goto bar0_remap_failed;
7834	}
7835
7836	sp->bar1 = pci_ioremap_bar(pdev, 2);
7837	if (!sp->bar1) {
7838		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7839			  dev->name);
7840		ret = -ENOMEM;
7841		goto bar1_remap_failed;
7842	}
7843
7844	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7845	for (j = 0; j < MAX_TX_FIFOS; j++) {
7846		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7847	}
7848
7849	/*  Driver entry points */
7850	dev->netdev_ops = &s2io_netdev_ops;
7851	dev->ethtool_ops = &netdev_ethtool_ops;
7852	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7853		NETIF_F_TSO | NETIF_F_TSO6 |
7854		NETIF_F_RXCSUM | NETIF_F_LRO;
7855	dev->features |= dev->hw_features |
7856		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
 
 
 
 
7857	if (sp->high_dma_flag == true)
7858		dev->features |= NETIF_F_HIGHDMA;
7859	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7860	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7861	INIT_WORK(&sp->set_link_task, s2io_set_link);
7862
7863	pci_save_state(sp->pdev);
7864
7865	/* Setting swapper control on the NIC, for proper reset operation */
7866	if (s2io_set_swapper(sp)) {
7867		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7868			  dev->name);
7869		ret = -EAGAIN;
7870		goto set_swap_failed;
7871	}
7872
7873	/* Verify if the Herc works on the slot its placed into */
7874	if (sp->device_type & XFRAME_II_DEVICE) {
7875		mode = s2io_verify_pci_mode(sp);
7876		if (mode < 0) {
7877			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7878				  __func__);
7879			ret = -EBADSLT;
7880			goto set_swap_failed;
7881		}
7882	}
7883
7884	if (sp->config.intr_type == MSI_X) {
7885		sp->num_entries = config->rx_ring_num + 1;
7886		ret = s2io_enable_msi_x(sp);
7887
7888		if (!ret) {
7889			ret = s2io_test_msi(sp);
7890			/* rollback MSI-X, will re-enable during add_isr() */
7891			remove_msix_isr(sp);
7892		}
7893		if (ret) {
7894
7895			DBG_PRINT(ERR_DBG,
7896				  "MSI-X requested but failed to enable\n");
7897			sp->config.intr_type = INTA;
7898		}
7899	}
7900
7901	if (config->intr_type ==  MSI_X) {
7902		for (i = 0; i < config->rx_ring_num ; i++) {
7903			struct ring_info *ring = &mac_control->rings[i];
7904
7905			netif_napi_add(dev, &ring->napi, s2io_poll_msix, 64);
7906		}
7907	} else {
7908		netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
7909	}
7910
7911	/* Not needed for Herc */
7912	if (sp->device_type & XFRAME_I_DEVICE) {
7913		/*
7914		 * Fix for all "FFs" MAC address problems observed on
7915		 * Alpha platforms
7916		 */
7917		fix_mac_address(sp);
7918		s2io_reset(sp);
7919	}
7920
7921	/*
7922	 * MAC address initialization.
7923	 * For now only one mac address will be read and used.
7924	 */
7925	bar0 = sp->bar0;
7926	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7927		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7928	writeq(val64, &bar0->rmac_addr_cmd_mem);
7929	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7930			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7931			      S2IO_BIT_RESET);
7932	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7933	mac_down = (u32)tmp64;
7934	mac_up = (u32) (tmp64 >> 32);
7935
7936	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7937	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7938	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7939	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7940	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7941	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7942
7943	/*  Set the factory defined MAC address initially   */
7944	dev->addr_len = ETH_ALEN;
7945	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7946
7947	/* initialize number of multicast & unicast MAC entries variables */
7948	if (sp->device_type == XFRAME_I_DEVICE) {
7949		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7950		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7951		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7952	} else if (sp->device_type == XFRAME_II_DEVICE) {
7953		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7954		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7955		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7956	}
7957
7958	/* MTU range: 46 - 9600 */
7959	dev->min_mtu = MIN_MTU;
7960	dev->max_mtu = S2IO_JUMBO_SIZE;
7961
7962	/* store mac addresses from CAM to s2io_nic structure */
7963	do_s2io_store_unicast_mc(sp);
7964
7965	/* Configure MSIX vector for number of rings configured plus one */
7966	if ((sp->device_type == XFRAME_II_DEVICE) &&
7967	    (config->intr_type == MSI_X))
7968		sp->num_entries = config->rx_ring_num + 1;
7969
7970	/* Store the values of the MSIX table in the s2io_nic structure */
7971	store_xmsi_data(sp);
7972	/* reset Nic and bring it to known state */
7973	s2io_reset(sp);
7974
7975	/*
7976	 * Initialize link state flags
7977	 * and the card state parameter
7978	 */
7979	sp->state = 0;
7980
7981	/* Initialize spinlocks */
7982	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7983		struct fifo_info *fifo = &mac_control->fifos[i];
7984
7985		spin_lock_init(&fifo->tx_lock);
7986	}
7987
7988	/*
7989	 * SXE-002: Configure link and activity LED to init state
7990	 * on driver load.
7991	 */
7992	subid = sp->pdev->subsystem_device;
7993	if ((subid & 0xFF) >= 0x07) {
7994		val64 = readq(&bar0->gpio_control);
7995		val64 |= 0x0000800000000000ULL;
7996		writeq(val64, &bar0->gpio_control);
7997		val64 = 0x0411040400000000ULL;
7998		writeq(val64, (void __iomem *)bar0 + 0x2700);
7999		val64 = readq(&bar0->gpio_control);
8000	}
8001
8002	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8003
8004	if (register_netdev(dev)) {
8005		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8006		ret = -ENODEV;
8007		goto register_failed;
8008	}
8009	s2io_vpd_read(sp);
8010	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8011	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8012		  sp->product_name, pdev->revision);
8013	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8014		  s2io_driver_version);
8015	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8016	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8017	if (sp->device_type & XFRAME_II_DEVICE) {
8018		mode = s2io_print_pci_mode(sp);
8019		if (mode < 0) {
8020			ret = -EBADSLT;
8021			unregister_netdev(dev);
8022			goto set_swap_failed;
8023		}
8024	}
8025	switch (sp->rxd_mode) {
8026	case RXD_MODE_1:
8027		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8028			  dev->name);
8029		break;
8030	case RXD_MODE_3B:
8031		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8032			  dev->name);
8033		break;
8034	}
8035
8036	switch (sp->config.napi) {
8037	case 0:
8038		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8039		break;
8040	case 1:
8041		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8042		break;
8043	}
8044
8045	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8046		  sp->config.tx_fifo_num);
8047
8048	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8049		  sp->config.rx_ring_num);
8050
8051	switch (sp->config.intr_type) {
8052	case INTA:
8053		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8054		break;
8055	case MSI_X:
8056		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8057		break;
8058	}
8059	if (sp->config.multiq) {
8060		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8061			struct fifo_info *fifo = &mac_control->fifos[i];
8062
8063			fifo->multiq = config->multiq;
8064		}
8065		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8066			  dev->name);
8067	} else
8068		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8069			  dev->name);
8070
8071	switch (sp->config.tx_steering_type) {
8072	case NO_STEERING:
8073		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8074			  dev->name);
8075		break;
8076	case TX_PRIORITY_STEERING:
8077		DBG_PRINT(ERR_DBG,
8078			  "%s: Priority steering enabled for transmit\n",
8079			  dev->name);
8080		break;
8081	case TX_DEFAULT_STEERING:
8082		DBG_PRINT(ERR_DBG,
8083			  "%s: Default steering enabled for transmit\n",
8084			  dev->name);
8085	}
8086
8087	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8088		  dev->name);
 
 
 
 
8089	/* Initialize device name */
8090	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8091		 sp->product_name);
8092
8093	if (vlan_tag_strip)
8094		sp->vlan_strip_flag = 1;
8095	else
8096		sp->vlan_strip_flag = 0;
8097
8098	/*
8099	 * Make Link state as off at this point, when the Link change
8100	 * interrupt comes the state will be automatically changed to
8101	 * the right state.
8102	 */
8103	netif_carrier_off(dev);
8104
8105	return 0;
8106
8107register_failed:
8108set_swap_failed:
8109	iounmap(sp->bar1);
8110bar1_remap_failed:
8111	iounmap(sp->bar0);
8112bar0_remap_failed:
8113mem_alloc_failed:
8114	free_shared_mem(sp);
8115	pci_disable_device(pdev);
8116	pci_release_regions(pdev);
8117	free_netdev(dev);
8118
8119	return ret;
8120}
8121
8122/**
8123 * s2io_rem_nic - Free the PCI device
8124 * @pdev: structure containing the PCI related information of the device.
8125 * Description: This function is called by the Pci subsystem to release a
8126 * PCI device and free up all resource held up by the device. This could
8127 * be in response to a Hot plug event or when the driver is to be removed
8128 * from memory.
8129 */
8130
8131static void s2io_rem_nic(struct pci_dev *pdev)
8132{
8133	struct net_device *dev = pci_get_drvdata(pdev);
8134	struct s2io_nic *sp;
8135
8136	if (dev == NULL) {
8137		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8138		return;
8139	}
8140
8141	sp = netdev_priv(dev);
8142
8143	cancel_work_sync(&sp->rst_timer_task);
8144	cancel_work_sync(&sp->set_link_task);
8145
8146	unregister_netdev(dev);
8147
8148	free_shared_mem(sp);
8149	iounmap(sp->bar0);
8150	iounmap(sp->bar1);
8151	pci_release_regions(pdev);
8152	free_netdev(dev);
8153	pci_disable_device(pdev);
8154}
8155
8156module_pci_driver(s2io_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8157
8158static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8159				struct tcphdr **tcp, struct RxD_t *rxdp,
8160				struct s2io_nic *sp)
8161{
8162	int ip_off;
8163	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8164
8165	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8166		DBG_PRINT(INIT_DBG,
8167			  "%s: Non-TCP frames not supported for LRO\n",
8168			  __func__);
8169		return -1;
8170	}
8171
8172	/* Checking for DIX type or DIX type with VLAN */
8173	if ((l2_type == 0) || (l2_type == 4)) {
8174		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8175		/*
8176		 * If vlan stripping is disabled and the frame is VLAN tagged,
8177		 * shift the offset by the VLAN header size bytes.
8178		 */
8179		if ((!sp->vlan_strip_flag) &&
8180		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8181			ip_off += HEADER_VLAN_SIZE;
8182	} else {
8183		/* LLC, SNAP etc are considered non-mergeable */
8184		return -1;
8185	}
8186
8187	*ip = (struct iphdr *)(buffer + ip_off);
8188	ip_len = (u8)((*ip)->ihl);
8189	ip_len <<= 2;
8190	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8191
8192	return 0;
8193}
8194
8195static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8196				  struct tcphdr *tcp)
8197{
8198	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8199	if ((lro->iph->saddr != ip->saddr) ||
8200	    (lro->iph->daddr != ip->daddr) ||
8201	    (lro->tcph->source != tcp->source) ||
8202	    (lro->tcph->dest != tcp->dest))
8203		return -1;
8204	return 0;
8205}
8206
8207static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8208{
8209	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8210}
8211
8212static void initiate_new_session(struct lro *lro, u8 *l2h,
8213				 struct iphdr *ip, struct tcphdr *tcp,
8214				 u32 tcp_pyld_len, u16 vlan_tag)
8215{
8216	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8217	lro->l2h = l2h;
8218	lro->iph = ip;
8219	lro->tcph = tcp;
8220	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8221	lro->tcp_ack = tcp->ack_seq;
8222	lro->sg_num = 1;
8223	lro->total_len = ntohs(ip->tot_len);
8224	lro->frags_len = 0;
8225	lro->vlan_tag = vlan_tag;
8226	/*
8227	 * Check if we saw TCP timestamp.
8228	 * Other consistency checks have already been done.
8229	 */
8230	if (tcp->doff == 8) {
8231		__be32 *ptr;
8232		ptr = (__be32 *)(tcp+1);
8233		lro->saw_ts = 1;
8234		lro->cur_tsval = ntohl(*(ptr+1));
8235		lro->cur_tsecr = *(ptr+2);
8236	}
8237	lro->in_use = 1;
8238}
8239
8240static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8241{
8242	struct iphdr *ip = lro->iph;
8243	struct tcphdr *tcp = lro->tcph;
8244	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8245
8246	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8247
8248	/* Update L3 header */
8249	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8250	ip->tot_len = htons(lro->total_len);
8251
8252	/* Update L4 header */
8253	tcp->ack_seq = lro->tcp_ack;
8254	tcp->window = lro->window;
8255
8256	/* Update tsecr field if this session has timestamps enabled */
8257	if (lro->saw_ts) {
8258		__be32 *ptr = (__be32 *)(tcp + 1);
8259		*(ptr+2) = lro->cur_tsecr;
8260	}
8261
8262	/* Update counters required for calculation of
8263	 * average no. of packets aggregated.
8264	 */
8265	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8266	swstats->num_aggregations++;
8267}
8268
8269static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8270			     struct tcphdr *tcp, u32 l4_pyld)
8271{
8272	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8273	lro->total_len += l4_pyld;
8274	lro->frags_len += l4_pyld;
8275	lro->tcp_next_seq += l4_pyld;
8276	lro->sg_num++;
8277
8278	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8279	lro->tcp_ack = tcp->ack_seq;
8280	lro->window = tcp->window;
8281
8282	if (lro->saw_ts) {
8283		__be32 *ptr;
8284		/* Update tsecr and tsval from this packet */
8285		ptr = (__be32 *)(tcp+1);
8286		lro->cur_tsval = ntohl(*(ptr+1));
8287		lro->cur_tsecr = *(ptr + 2);
8288	}
8289}
8290
8291static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8292				    struct tcphdr *tcp, u32 tcp_pyld_len)
8293{
8294	u8 *ptr;
8295
8296	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8297
8298	if (!tcp_pyld_len) {
8299		/* Runt frame or a pure ack */
8300		return -1;
8301	}
8302
8303	if (ip->ihl != 5) /* IP has options */
8304		return -1;
8305
8306	/* If we see CE codepoint in IP header, packet is not mergeable */
8307	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8308		return -1;
8309
8310	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8311	if (tcp->urg || tcp->psh || tcp->rst ||
8312	    tcp->syn || tcp->fin ||
8313	    tcp->ece || tcp->cwr || !tcp->ack) {
8314		/*
8315		 * Currently recognize only the ack control word and
8316		 * any other control field being set would result in
8317		 * flushing the LRO session
8318		 */
8319		return -1;
8320	}
8321
8322	/*
8323	 * Allow only one TCP timestamp option. Don't aggregate if
8324	 * any other options are detected.
8325	 */
8326	if (tcp->doff != 5 && tcp->doff != 8)
8327		return -1;
8328
8329	if (tcp->doff == 8) {
8330		ptr = (u8 *)(tcp + 1);
8331		while (*ptr == TCPOPT_NOP)
8332			ptr++;
8333		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8334			return -1;
8335
8336		/* Ensure timestamp value increases monotonically */
8337		if (l_lro)
8338			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8339				return -1;
8340
8341		/* timestamp echo reply should be non-zero */
8342		if (*((__be32 *)(ptr+6)) == 0)
8343			return -1;
8344	}
8345
8346	return 0;
8347}
8348
8349static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8350				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8351				 struct RxD_t *rxdp, struct s2io_nic *sp)
8352{
8353	struct iphdr *ip;
8354	struct tcphdr *tcph;
8355	int ret = 0, i;
8356	u16 vlan_tag = 0;
8357	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8358
8359	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8360				   rxdp, sp);
8361	if (ret)
8362		return ret;
8363
8364	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8365
8366	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8367	tcph = (struct tcphdr *)*tcp;
8368	*tcp_len = get_l4_pyld_length(ip, tcph);
8369	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8370		struct lro *l_lro = &ring_data->lro0_n[i];
8371		if (l_lro->in_use) {
8372			if (check_for_socket_match(l_lro, ip, tcph))
8373				continue;
8374			/* Sock pair matched */
8375			*lro = l_lro;
8376
8377			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8378				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8379					  "expected 0x%x, actual 0x%x\n",
8380					  __func__,
8381					  (*lro)->tcp_next_seq,
8382					  ntohl(tcph->seq));
8383
8384				swstats->outof_sequence_pkts++;
8385				ret = 2;
8386				break;
8387			}
8388
8389			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8390						      *tcp_len))
8391				ret = 1; /* Aggregate */
8392			else
8393				ret = 2; /* Flush both */
8394			break;
8395		}
8396	}
8397
8398	if (ret == 0) {
8399		/* Before searching for available LRO objects,
8400		 * check if the pkt is L3/L4 aggregatable. If not
8401		 * don't create new LRO session. Just send this
8402		 * packet up.
8403		 */
8404		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8405			return 5;
8406
8407		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8408			struct lro *l_lro = &ring_data->lro0_n[i];
8409			if (!(l_lro->in_use)) {
8410				*lro = l_lro;
8411				ret = 3; /* Begin anew */
8412				break;
8413			}
8414		}
8415	}
8416
8417	if (ret == 0) { /* sessions exceeded */
8418		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8419			  __func__);
8420		*lro = NULL;
8421		return ret;
8422	}
8423
8424	switch (ret) {
8425	case 3:
8426		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8427				     vlan_tag);
8428		break;
8429	case 2:
8430		update_L3L4_header(sp, *lro);
8431		break;
8432	case 1:
8433		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8434		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8435			update_L3L4_header(sp, *lro);
8436			ret = 4; /* Flush the LRO */
8437		}
8438		break;
8439	default:
8440		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8441		break;
8442	}
8443
8444	return ret;
8445}
8446
8447static void clear_lro_session(struct lro *lro)
8448{
8449	static u16 lro_struct_size = sizeof(struct lro);
8450
8451	memset(lro, 0, lro_struct_size);
8452}
8453
8454static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8455{
8456	struct net_device *dev = skb->dev;
8457	struct s2io_nic *sp = netdev_priv(dev);
8458
8459	skb->protocol = eth_type_trans(skb, dev);
8460	if (vlan_tag && sp->vlan_strip_flag)
8461		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8462	if (sp->config.napi)
8463		netif_receive_skb(skb);
8464	else
8465		netif_rx(skb);
8466}
8467
8468static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8469			   struct sk_buff *skb, u32 tcp_len)
8470{
8471	struct sk_buff *first = lro->parent;
8472	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8473
8474	first->len += tcp_len;
8475	first->data_len = lro->frags_len;
8476	skb_pull(skb, (skb->len - tcp_len));
8477	if (skb_shinfo(first)->frag_list)
8478		lro->last_frag->next = skb;
8479	else
8480		skb_shinfo(first)->frag_list = skb;
8481	first->truesize += skb->truesize;
8482	lro->last_frag = skb;
8483	swstats->clubbed_frms_cnt++;
8484}
8485
8486/**
8487 * s2io_io_error_detected - called when PCI error is detected
8488 * @pdev: Pointer to PCI device
8489 * @state: The current pci connection state
8490 *
8491 * This function is called after a PCI bus error affecting
8492 * this device has been detected.
8493 */
8494static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8495					       pci_channel_state_t state)
8496{
8497	struct net_device *netdev = pci_get_drvdata(pdev);
8498	struct s2io_nic *sp = netdev_priv(netdev);
8499
8500	netif_device_detach(netdev);
8501
8502	if (state == pci_channel_io_perm_failure)
8503		return PCI_ERS_RESULT_DISCONNECT;
8504
8505	if (netif_running(netdev)) {
8506		/* Bring down the card, while avoiding PCI I/O */
8507		do_s2io_card_down(sp, 0);
8508	}
8509	pci_disable_device(pdev);
8510
8511	return PCI_ERS_RESULT_NEED_RESET;
8512}
8513
8514/**
8515 * s2io_io_slot_reset - called after the pci bus has been reset.
8516 * @pdev: Pointer to PCI device
8517 *
8518 * Restart the card from scratch, as if from a cold-boot.
8519 * At this point, the card has exprienced a hard reset,
8520 * followed by fixups by BIOS, and has its config space
8521 * set up identically to what it was at cold boot.
8522 */
8523static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8524{
8525	struct net_device *netdev = pci_get_drvdata(pdev);
8526	struct s2io_nic *sp = netdev_priv(netdev);
8527
8528	if (pci_enable_device(pdev)) {
8529		pr_err("Cannot re-enable PCI device after reset.\n");
8530		return PCI_ERS_RESULT_DISCONNECT;
8531	}
8532
8533	pci_set_master(pdev);
8534	s2io_reset(sp);
8535
8536	return PCI_ERS_RESULT_RECOVERED;
8537}
8538
8539/**
8540 * s2io_io_resume - called when traffic can start flowing again.
8541 * @pdev: Pointer to PCI device
8542 *
8543 * This callback is called when the error recovery driver tells
8544 * us that its OK to resume normal operation.
8545 */
8546static void s2io_io_resume(struct pci_dev *pdev)
8547{
8548	struct net_device *netdev = pci_get_drvdata(pdev);
8549	struct s2io_nic *sp = netdev_priv(netdev);
8550
8551	if (netif_running(netdev)) {
8552		if (s2io_card_up(sp)) {
8553			pr_err("Can't bring device back up after reset.\n");
8554			return;
8555		}
8556
8557		if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8558			s2io_card_down(sp);
8559			pr_err("Can't restore mac addr after reset.\n");
8560			return;
8561		}
8562	}
8563
8564	netif_device_attach(netdev);
8565	netif_tx_wake_all_queues(netdev);
8566}