Linux Audio

Check our new training course

Loading...
v6.13.7
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik		: For pointing out the improper error condition
  15 *			  check in the s2io_xmit routine and also some
  16 *			  issues in the Tx watch dog function. Also for
  17 *			  patiently answering all those innumerable
  18 *			  questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
  20 *			  macros available only in 2.6 Kernel.
  21 * Francois Romieu	: For pointing out all code part that were
  22 *			  deprecated and also styling related comments.
  23 * Grant Grundler	: For helping me get rid of some Architecture
  24 *			  dependent code.
  25 * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *		values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
  45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  46 *                 Possible values '1' for enable , '0' for disable.
  47 *                 Default is '2' - which means disable in promisc mode
  48 *                 and enable in non-promiscuous mode.
  49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  50 *      Possible values '1' for enable and '0' for disable. Default is '0'
  51 ************************************************************************/
  52
  53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54
  55#include <linux/module.h>
  56#include <linux/types.h>
  57#include <linux/errno.h>
  58#include <linux/ioport.h>
  59#include <linux/pci.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/kernel.h>
  62#include <linux/netdevice.h>
  63#include <linux/etherdevice.h>
  64#include <linux/mdio.h>
  65#include <linux/skbuff.h>
  66#include <linux/init.h>
  67#include <linux/delay.h>
  68#include <linux/stddef.h>
  69#include <linux/ioctl.h>
  70#include <linux/timex.h>
  71#include <linux/ethtool.h>
  72#include <linux/workqueue.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/uaccess.h>
  77#include <linux/io.h>
  78#include <linux/io-64-nonatomic-lo-hi.h>
  79#include <linux/slab.h>
  80#include <linux/prefetch.h>
  81#include <net/tcp.h>
  82#include <net/checksum.h>
  83
  84#include <asm/div64.h>
  85#include <asm/irq.h>
  86
  87/* local include */
  88#include "s2io.h"
  89#include "s2io-regs.h"
  90
  91#define DRV_VERSION "2.0.26.28"
  92
  93/* S2io Driver name & version. */
  94static const char s2io_driver_name[] = "Neterion";
  95static const char s2io_driver_version[] = DRV_VERSION;
  96
  97static const int rxd_size[2] = {32, 48};
  98static const int rxd_count[2] = {127, 85};
  99
 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 101{
 102	int ret;
 103
 104	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 105	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 106
 107	return ret;
 108}
 109
 110/*
 111 * Cards with following subsystem_id have a link state indication
 112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 113 * macro below identifies these cards given the subsystem_id.
 114 */
 115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
 116	(dev_type == XFRAME_I_DEVICE) ?					\
 117	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
 118	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 119
 120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 121				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 122
 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
 124{
 125	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 126}
 127
 128/* Ethtool related variables and Macros. */
 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 130	"Register test\t(offline)",
 131	"Eeprom test\t(offline)",
 132	"Link test\t(online)",
 133	"RLDRAM test\t(offline)",
 134	"BIST Test\t(offline)"
 135};
 136
 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 138	{"tmac_frms"},
 139	{"tmac_data_octets"},
 140	{"tmac_drop_frms"},
 141	{"tmac_mcst_frms"},
 142	{"tmac_bcst_frms"},
 143	{"tmac_pause_ctrl_frms"},
 144	{"tmac_ttl_octets"},
 145	{"tmac_ucst_frms"},
 146	{"tmac_nucst_frms"},
 147	{"tmac_any_err_frms"},
 148	{"tmac_ttl_less_fb_octets"},
 149	{"tmac_vld_ip_octets"},
 150	{"tmac_vld_ip"},
 151	{"tmac_drop_ip"},
 152	{"tmac_icmp"},
 153	{"tmac_rst_tcp"},
 154	{"tmac_tcp"},
 155	{"tmac_udp"},
 156	{"rmac_vld_frms"},
 157	{"rmac_data_octets"},
 158	{"rmac_fcs_err_frms"},
 159	{"rmac_drop_frms"},
 160	{"rmac_vld_mcst_frms"},
 161	{"rmac_vld_bcst_frms"},
 162	{"rmac_in_rng_len_err_frms"},
 163	{"rmac_out_rng_len_err_frms"},
 164	{"rmac_long_frms"},
 165	{"rmac_pause_ctrl_frms"},
 166	{"rmac_unsup_ctrl_frms"},
 167	{"rmac_ttl_octets"},
 168	{"rmac_accepted_ucst_frms"},
 169	{"rmac_accepted_nucst_frms"},
 170	{"rmac_discarded_frms"},
 171	{"rmac_drop_events"},
 172	{"rmac_ttl_less_fb_octets"},
 173	{"rmac_ttl_frms"},
 174	{"rmac_usized_frms"},
 175	{"rmac_osized_frms"},
 176	{"rmac_frag_frms"},
 177	{"rmac_jabber_frms"},
 178	{"rmac_ttl_64_frms"},
 179	{"rmac_ttl_65_127_frms"},
 180	{"rmac_ttl_128_255_frms"},
 181	{"rmac_ttl_256_511_frms"},
 182	{"rmac_ttl_512_1023_frms"},
 183	{"rmac_ttl_1024_1518_frms"},
 184	{"rmac_ip"},
 185	{"rmac_ip_octets"},
 186	{"rmac_hdr_err_ip"},
 187	{"rmac_drop_ip"},
 188	{"rmac_icmp"},
 189	{"rmac_tcp"},
 190	{"rmac_udp"},
 191	{"rmac_err_drp_udp"},
 192	{"rmac_xgmii_err_sym"},
 193	{"rmac_frms_q0"},
 194	{"rmac_frms_q1"},
 195	{"rmac_frms_q2"},
 196	{"rmac_frms_q3"},
 197	{"rmac_frms_q4"},
 198	{"rmac_frms_q5"},
 199	{"rmac_frms_q6"},
 200	{"rmac_frms_q7"},
 201	{"rmac_full_q0"},
 202	{"rmac_full_q1"},
 203	{"rmac_full_q2"},
 204	{"rmac_full_q3"},
 205	{"rmac_full_q4"},
 206	{"rmac_full_q5"},
 207	{"rmac_full_q6"},
 208	{"rmac_full_q7"},
 209	{"rmac_pause_cnt"},
 210	{"rmac_xgmii_data_err_cnt"},
 211	{"rmac_xgmii_ctrl_err_cnt"},
 212	{"rmac_accepted_ip"},
 213	{"rmac_err_tcp"},
 214	{"rd_req_cnt"},
 215	{"new_rd_req_cnt"},
 216	{"new_rd_req_rtry_cnt"},
 217	{"rd_rtry_cnt"},
 218	{"wr_rtry_rd_ack_cnt"},
 219	{"wr_req_cnt"},
 220	{"new_wr_req_cnt"},
 221	{"new_wr_req_rtry_cnt"},
 222	{"wr_rtry_cnt"},
 223	{"wr_disc_cnt"},
 224	{"rd_rtry_wr_ack_cnt"},
 225	{"txp_wr_cnt"},
 226	{"txd_rd_cnt"},
 227	{"txd_wr_cnt"},
 228	{"rxd_rd_cnt"},
 229	{"rxd_wr_cnt"},
 230	{"txf_rd_cnt"},
 231	{"rxf_wr_cnt"}
 232};
 233
 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 235	{"rmac_ttl_1519_4095_frms"},
 236	{"rmac_ttl_4096_8191_frms"},
 237	{"rmac_ttl_8192_max_frms"},
 238	{"rmac_ttl_gt_max_frms"},
 239	{"rmac_osized_alt_frms"},
 240	{"rmac_jabber_alt_frms"},
 241	{"rmac_gt_max_alt_frms"},
 242	{"rmac_vlan_frms"},
 243	{"rmac_len_discard"},
 244	{"rmac_fcs_discard"},
 245	{"rmac_pf_discard"},
 246	{"rmac_da_discard"},
 247	{"rmac_red_discard"},
 248	{"rmac_rts_discard"},
 249	{"rmac_ingm_full_discard"},
 250	{"link_fault_cnt"}
 251};
 252
 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 254	{"\n DRIVER STATISTICS"},
 255	{"single_bit_ecc_errs"},
 256	{"double_bit_ecc_errs"},
 257	{"parity_err_cnt"},
 258	{"serious_err_cnt"},
 259	{"soft_reset_cnt"},
 260	{"fifo_full_cnt"},
 261	{"ring_0_full_cnt"},
 262	{"ring_1_full_cnt"},
 263	{"ring_2_full_cnt"},
 264	{"ring_3_full_cnt"},
 265	{"ring_4_full_cnt"},
 266	{"ring_5_full_cnt"},
 267	{"ring_6_full_cnt"},
 268	{"ring_7_full_cnt"},
 269	{"alarm_transceiver_temp_high"},
 270	{"alarm_transceiver_temp_low"},
 271	{"alarm_laser_bias_current_high"},
 272	{"alarm_laser_bias_current_low"},
 273	{"alarm_laser_output_power_high"},
 274	{"alarm_laser_output_power_low"},
 275	{"warn_transceiver_temp_high"},
 276	{"warn_transceiver_temp_low"},
 277	{"warn_laser_bias_current_high"},
 278	{"warn_laser_bias_current_low"},
 279	{"warn_laser_output_power_high"},
 280	{"warn_laser_output_power_low"},
 281	{"lro_aggregated_pkts"},
 282	{"lro_flush_both_count"},
 283	{"lro_out_of_sequence_pkts"},
 284	{"lro_flush_due_to_max_pkts"},
 285	{"lro_avg_aggr_pkts"},
 286	{"mem_alloc_fail_cnt"},
 287	{"pci_map_fail_cnt"},
 288	{"watchdog_timer_cnt"},
 289	{"mem_allocated"},
 290	{"mem_freed"},
 291	{"link_up_cnt"},
 292	{"link_down_cnt"},
 293	{"link_up_time"},
 294	{"link_down_time"},
 295	{"tx_tcode_buf_abort_cnt"},
 296	{"tx_tcode_desc_abort_cnt"},
 297	{"tx_tcode_parity_err_cnt"},
 298	{"tx_tcode_link_loss_cnt"},
 299	{"tx_tcode_list_proc_err_cnt"},
 300	{"rx_tcode_parity_err_cnt"},
 301	{"rx_tcode_abort_cnt"},
 302	{"rx_tcode_parity_abort_cnt"},
 303	{"rx_tcode_rda_fail_cnt"},
 304	{"rx_tcode_unkn_prot_cnt"},
 305	{"rx_tcode_fcs_err_cnt"},
 306	{"rx_tcode_buf_size_err_cnt"},
 307	{"rx_tcode_rxd_corrupt_cnt"},
 308	{"rx_tcode_unkn_err_cnt"},
 309	{"tda_err_cnt"},
 310	{"pfc_err_cnt"},
 311	{"pcc_err_cnt"},
 312	{"tti_err_cnt"},
 313	{"tpa_err_cnt"},
 314	{"sm_err_cnt"},
 315	{"lso_err_cnt"},
 316	{"mac_tmac_err_cnt"},
 317	{"mac_rmac_err_cnt"},
 318	{"xgxs_txgxs_err_cnt"},
 319	{"xgxs_rxgxs_err_cnt"},
 320	{"rc_err_cnt"},
 321	{"prc_pcix_err_cnt"},
 322	{"rpa_err_cnt"},
 323	{"rda_err_cnt"},
 324	{"rti_err_cnt"},
 325	{"mc_err_cnt"}
 326};
 327
 328#define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
 329#define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
 330#define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
 331
 332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 334
 335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 337
 338#define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
 339#define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
 340
 341/* copy mac addr to def_mac_addr array */
 342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 343{
 344	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 345	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 346	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 347	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 348	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 349	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 350}
 351
 352/*
 353 * Constants to be programmed into the Xena's registers, to configure
 354 * the XAUI.
 355 */
 356
 357#define	END_SIGN	0x0
 358static const u64 herc_act_dtx_cfg[] = {
 359	/* Set address */
 360	0x8000051536750000ULL, 0x80000515367500E0ULL,
 361	/* Write data */
 362	0x8000051536750004ULL, 0x80000515367500E4ULL,
 363	/* Set address */
 364	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 365	/* Write data */
 366	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 367	/* Set address */
 368	0x801205150D440000ULL, 0x801205150D4400E0ULL,
 369	/* Write data */
 370	0x801205150D440004ULL, 0x801205150D4400E4ULL,
 371	/* Set address */
 372	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 373	/* Write data */
 374	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 375	/* Done */
 376	END_SIGN
 377};
 378
 379static const u64 xena_dtx_cfg[] = {
 380	/* Set address */
 381	0x8000051500000000ULL, 0x80000515000000E0ULL,
 382	/* Write data */
 383	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 384	/* Set address */
 385	0x8001051500000000ULL, 0x80010515000000E0ULL,
 386	/* Write data */
 387	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 388	/* Set address */
 389	0x8002051500000000ULL, 0x80020515000000E0ULL,
 390	/* Write data */
 391	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 392	END_SIGN
 393};
 394
 395/*
 396 * Constants for Fixing the MacAddress problem seen mostly on
 397 * Alpha machines.
 398 */
 399static const u64 fix_mac[] = {
 400	0x0060000000000000ULL, 0x0060600000000000ULL,
 401	0x0040600000000000ULL, 0x0000600000000000ULL,
 402	0x0020600000000000ULL, 0x0060600000000000ULL,
 403	0x0020600000000000ULL, 0x0060600000000000ULL,
 404	0x0020600000000000ULL, 0x0060600000000000ULL,
 405	0x0020600000000000ULL, 0x0060600000000000ULL,
 406	0x0020600000000000ULL, 0x0060600000000000ULL,
 407	0x0020600000000000ULL, 0x0060600000000000ULL,
 408	0x0020600000000000ULL, 0x0060600000000000ULL,
 409	0x0020600000000000ULL, 0x0060600000000000ULL,
 410	0x0020600000000000ULL, 0x0060600000000000ULL,
 411	0x0020600000000000ULL, 0x0060600000000000ULL,
 412	0x0020600000000000ULL, 0x0000600000000000ULL,
 413	0x0040600000000000ULL, 0x0060600000000000ULL,
 414	END_SIGN
 415};
 416
 417MODULE_DESCRIPTION("Neterion 10GbE driver");
 418MODULE_LICENSE("GPL");
 419MODULE_VERSION(DRV_VERSION);
 420
 421
 422/* Module Loadable parameters. */
 423S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 424S2IO_PARM_INT(rx_ring_num, 1);
 425S2IO_PARM_INT(multiq, 0);
 426S2IO_PARM_INT(rx_ring_mode, 1);
 427S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 428S2IO_PARM_INT(rmac_pause_time, 0x100);
 429S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 430S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 431S2IO_PARM_INT(shared_splits, 0);
 432S2IO_PARM_INT(tmac_util_period, 5);
 433S2IO_PARM_INT(rmac_util_period, 5);
 434S2IO_PARM_INT(l3l4hdr_size, 128);
 435/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 436S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 437/* Frequency of Rx desc syncs expressed as power of 2 */
 438S2IO_PARM_INT(rxsync_frequency, 3);
 439/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 440S2IO_PARM_INT(intr_type, 2);
 441/* Large receive offload feature */
 442
 443/* Max pkts to be aggregated by LRO at one time. If not specified,
 444 * aggregation happens until we hit max IP pkt size(64K)
 445 */
 446S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 447S2IO_PARM_INT(indicate_max_pkts, 0);
 448
 449S2IO_PARM_INT(napi, 1);
 450S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 451
 452static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 453{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 454static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 455{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 456static unsigned int rts_frm_len[MAX_RX_RINGS] =
 457{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 458
 459module_param_array(tx_fifo_len, uint, NULL, 0);
 460module_param_array(rx_ring_sz, uint, NULL, 0);
 461module_param_array(rts_frm_len, uint, NULL, 0);
 462
 463/*
 464 * S2IO device table.
 465 * This table lists all the devices that this driver supports.
 466 */
 467static const struct pci_device_id s2io_tbl[] = {
 468	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 469	 PCI_ANY_ID, PCI_ANY_ID},
 470	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 471	 PCI_ANY_ID, PCI_ANY_ID},
 472	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 473	 PCI_ANY_ID, PCI_ANY_ID},
 474	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 475	 PCI_ANY_ID, PCI_ANY_ID},
 476	{0,}
 477};
 478
 479MODULE_DEVICE_TABLE(pci, s2io_tbl);
 480
 481static const struct pci_error_handlers s2io_err_handler = {
 482	.error_detected = s2io_io_error_detected,
 483	.slot_reset = s2io_io_slot_reset,
 484	.resume = s2io_io_resume,
 485};
 486
 487static struct pci_driver s2io_driver = {
 488	.name = "S2IO",
 489	.id_table = s2io_tbl,
 490	.probe = s2io_init_nic,
 491	.remove = s2io_rem_nic,
 492	.err_handler = &s2io_err_handler,
 493};
 494
 495/* A simplifier macro used both by init and free shared_mem Fns(). */
 496#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
 497
 498/* netqueue manipulation helper functions */
 499static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 500{
 501	if (!sp->config.multiq) {
 502		int i;
 503
 504		for (i = 0; i < sp->config.tx_fifo_num; i++)
 505			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 506	}
 507	netif_tx_stop_all_queues(sp->dev);
 508}
 509
 510static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 511{
 512	if (!sp->config.multiq)
 513		sp->mac_control.fifos[fifo_no].queue_state =
 514			FIFO_QUEUE_STOP;
 515
 516	netif_tx_stop_all_queues(sp->dev);
 517}
 518
 519static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 520{
 521	if (!sp->config.multiq) {
 522		int i;
 523
 524		for (i = 0; i < sp->config.tx_fifo_num; i++)
 525			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 526	}
 527	netif_tx_start_all_queues(sp->dev);
 528}
 529
 530static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 531{
 532	if (!sp->config.multiq) {
 533		int i;
 534
 535		for (i = 0; i < sp->config.tx_fifo_num; i++)
 536			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 537	}
 538	netif_tx_wake_all_queues(sp->dev);
 539}
 540
 541static inline void s2io_wake_tx_queue(
 542	struct fifo_info *fifo, int cnt, u8 multiq)
 543{
 544
 545	if (multiq) {
 546		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 547			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 548	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 549		if (netif_queue_stopped(fifo->dev)) {
 550			fifo->queue_state = FIFO_QUEUE_START;
 551			netif_wake_queue(fifo->dev);
 552		}
 553	}
 554}
 555
 556/**
 557 * init_shared_mem - Allocation and Initialization of Memory
 558 * @nic: Device private variable.
 559 * Description: The function allocates all the memory areas shared
 560 * between the NIC and the driver. This includes Tx descriptors,
 561 * Rx descriptors and the statistics block.
 562 */
 563
 564static int init_shared_mem(struct s2io_nic *nic)
 565{
 566	u32 size;
 567	void *tmp_v_addr, *tmp_v_addr_next;
 568	dma_addr_t tmp_p_addr, tmp_p_addr_next;
 569	struct RxD_block *pre_rxd_blk = NULL;
 570	int i, j, blk_cnt;
 571	int lst_size, lst_per_page;
 572	struct net_device *dev = nic->dev;
 573	unsigned long tmp;
 574	struct buffAdd *ba;
 575	struct config_param *config = &nic->config;
 576	struct mac_info *mac_control = &nic->mac_control;
 577	unsigned long long mem_allocated = 0;
 578
 579	/* Allocation and initialization of TXDLs in FIFOs */
 580	size = 0;
 581	for (i = 0; i < config->tx_fifo_num; i++) {
 582		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 583
 584		size += tx_cfg->fifo_len;
 585	}
 586	if (size > MAX_AVAILABLE_TXDS) {
 587		DBG_PRINT(ERR_DBG,
 588			  "Too many TxDs requested: %d, max supported: %d\n",
 589			  size, MAX_AVAILABLE_TXDS);
 590		return -EINVAL;
 591	}
 592
 593	size = 0;
 594	for (i = 0; i < config->tx_fifo_num; i++) {
 595		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 596
 597		size = tx_cfg->fifo_len;
 598		/*
 599		 * Legal values are from 2 to 8192
 600		 */
 601		if (size < 2) {
 602			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 603				  "Valid lengths are 2 through 8192\n",
 604				  i, size);
 605			return -EINVAL;
 606		}
 607	}
 608
 609	lst_size = (sizeof(struct TxD) * config->max_txds);
 610	lst_per_page = PAGE_SIZE / lst_size;
 611
 612	for (i = 0; i < config->tx_fifo_num; i++) {
 613		struct fifo_info *fifo = &mac_control->fifos[i];
 614		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 615		int fifo_len = tx_cfg->fifo_len;
 616		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 617
 618		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 619		if (!fifo->list_info) {
 620			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 621			return -ENOMEM;
 622		}
 623		mem_allocated += list_holder_size;
 624	}
 625	for (i = 0; i < config->tx_fifo_num; i++) {
 626		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 627						lst_per_page);
 628		struct fifo_info *fifo = &mac_control->fifos[i];
 629		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 630
 631		fifo->tx_curr_put_info.offset = 0;
 632		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 633		fifo->tx_curr_get_info.offset = 0;
 634		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 635		fifo->fifo_no = i;
 636		fifo->nic = nic;
 637		fifo->max_txds = MAX_SKB_FRAGS + 2;
 638		fifo->dev = dev;
 639
 640		for (j = 0; j < page_num; j++) {
 641			int k = 0;
 642			dma_addr_t tmp_p;
 643			void *tmp_v;
 644			tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
 645						   &tmp_p, GFP_KERNEL);
 646			if (!tmp_v) {
 647				DBG_PRINT(INFO_DBG,
 648					  "dma_alloc_coherent failed for TxDL\n");
 649				return -ENOMEM;
 650			}
 651			/* If we got a zero DMA address(can happen on
 652			 * certain platforms like PPC), reallocate.
 653			 * Store virtual address of page we don't want,
 654			 * to be freed later.
 655			 */
 656			if (!tmp_p) {
 657				mac_control->zerodma_virt_addr = tmp_v;
 658				DBG_PRINT(INIT_DBG,
 659					  "%s: Zero DMA address for TxDL. "
 660					  "Virtual address %p\n",
 661					  dev->name, tmp_v);
 662				tmp_v = dma_alloc_coherent(&nic->pdev->dev,
 663							   PAGE_SIZE, &tmp_p,
 664							   GFP_KERNEL);
 665				if (!tmp_v) {
 666					DBG_PRINT(INFO_DBG,
 667						  "dma_alloc_coherent failed for TxDL\n");
 668					return -ENOMEM;
 669				}
 670				mem_allocated += PAGE_SIZE;
 671			}
 672			while (k < lst_per_page) {
 673				int l = (j * lst_per_page) + k;
 674				if (l == tx_cfg->fifo_len)
 675					break;
 676				fifo->list_info[l].list_virt_addr =
 677					tmp_v + (k * lst_size);
 678				fifo->list_info[l].list_phy_addr =
 679					tmp_p + (k * lst_size);
 680				k++;
 681			}
 682		}
 683	}
 684
 685	for (i = 0; i < config->tx_fifo_num; i++) {
 686		struct fifo_info *fifo = &mac_control->fifos[i];
 687		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 688
 689		size = tx_cfg->fifo_len;
 690		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 691		if (!fifo->ufo_in_band_v)
 692			return -ENOMEM;
 693		mem_allocated += (size * sizeof(u64));
 694	}
 695
 696	/* Allocation and initialization of RXDs in Rings */
 697	size = 0;
 698	for (i = 0; i < config->rx_ring_num; i++) {
 699		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 700		struct ring_info *ring = &mac_control->rings[i];
 701
 702		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 703			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 704				  "multiple of RxDs per Block\n",
 705				  dev->name, i);
 706			return FAILURE;
 707		}
 708		size += rx_cfg->num_rxd;
 709		ring->block_count = rx_cfg->num_rxd /
 710			(rxd_count[nic->rxd_mode] + 1);
 711		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 712	}
 713	if (nic->rxd_mode == RXD_MODE_1)
 714		size = (size * (sizeof(struct RxD1)));
 715	else
 716		size = (size * (sizeof(struct RxD3)));
 717
 718	for (i = 0; i < config->rx_ring_num; i++) {
 719		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 720		struct ring_info *ring = &mac_control->rings[i];
 721
 722		ring->rx_curr_get_info.block_index = 0;
 723		ring->rx_curr_get_info.offset = 0;
 724		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 725		ring->rx_curr_put_info.block_index = 0;
 726		ring->rx_curr_put_info.offset = 0;
 727		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 728		ring->nic = nic;
 729		ring->ring_no = i;
 730
 731		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 732		/*  Allocating all the Rx blocks */
 733		for (j = 0; j < blk_cnt; j++) {
 734			struct rx_block_info *rx_blocks;
 735			int l;
 736
 737			rx_blocks = &ring->rx_blocks[j];
 738			size = SIZE_OF_BLOCK;	/* size is always page size */
 739			tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
 740							&tmp_p_addr, GFP_KERNEL);
 741			if (tmp_v_addr == NULL) {
 742				/*
 743				 * In case of failure, free_shared_mem()
 744				 * is called, which should free any
 745				 * memory that was alloced till the
 746				 * failure happened.
 747				 */
 748				rx_blocks->block_virt_addr = tmp_v_addr;
 749				return -ENOMEM;
 750			}
 751			mem_allocated += size;
 752
 753			size = sizeof(struct rxd_info) *
 754				rxd_count[nic->rxd_mode];
 755			rx_blocks->block_virt_addr = tmp_v_addr;
 756			rx_blocks->block_dma_addr = tmp_p_addr;
 757			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 758			if (!rx_blocks->rxds)
 759				return -ENOMEM;
 760			mem_allocated += size;
 761			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 762				rx_blocks->rxds[l].virt_addr =
 763					rx_blocks->block_virt_addr +
 764					(rxd_size[nic->rxd_mode] * l);
 765				rx_blocks->rxds[l].dma_addr =
 766					rx_blocks->block_dma_addr +
 767					(rxd_size[nic->rxd_mode] * l);
 768			}
 769		}
 770		/* Interlinking all Rx Blocks */
 771		for (j = 0; j < blk_cnt; j++) {
 772			int next = (j + 1) % blk_cnt;
 773			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 774			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 775			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 776			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 777
 778			pre_rxd_blk = tmp_v_addr;
 779			pre_rxd_blk->reserved_2_pNext_RxD_block =
 780				(unsigned long)tmp_v_addr_next;
 781			pre_rxd_blk->pNext_RxD_Blk_physical =
 782				(u64)tmp_p_addr_next;
 783		}
 784	}
 785	if (nic->rxd_mode == RXD_MODE_3B) {
 786		/*
 787		 * Allocation of Storages for buffer addresses in 2BUFF mode
 788		 * and the buffers as well.
 789		 */
 790		for (i = 0; i < config->rx_ring_num; i++) {
 791			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 792			struct ring_info *ring = &mac_control->rings[i];
 793
 794			blk_cnt = rx_cfg->num_rxd /
 795				(rxd_count[nic->rxd_mode] + 1);
 796			size = sizeof(struct buffAdd *) * blk_cnt;
 797			ring->ba = kmalloc(size, GFP_KERNEL);
 798			if (!ring->ba)
 799				return -ENOMEM;
 800			mem_allocated += size;
 801			for (j = 0; j < blk_cnt; j++) {
 802				int k = 0;
 803
 804				size = sizeof(struct buffAdd) *
 805					(rxd_count[nic->rxd_mode] + 1);
 806				ring->ba[j] = kmalloc(size, GFP_KERNEL);
 807				if (!ring->ba[j])
 808					return -ENOMEM;
 809				mem_allocated += size;
 810				while (k != rxd_count[nic->rxd_mode]) {
 811					ba = &ring->ba[j][k];
 812					size = BUF0_LEN + ALIGN_SIZE;
 813					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 814					if (!ba->ba_0_org)
 815						return -ENOMEM;
 816					mem_allocated += size;
 817					tmp = (unsigned long)ba->ba_0_org;
 818					tmp += ALIGN_SIZE;
 819					tmp &= ~((unsigned long)ALIGN_SIZE);
 820					ba->ba_0 = (void *)tmp;
 821
 822					size = BUF1_LEN + ALIGN_SIZE;
 823					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 824					if (!ba->ba_1_org)
 825						return -ENOMEM;
 826					mem_allocated += size;
 827					tmp = (unsigned long)ba->ba_1_org;
 828					tmp += ALIGN_SIZE;
 829					tmp &= ~((unsigned long)ALIGN_SIZE);
 830					ba->ba_1 = (void *)tmp;
 831					k++;
 832				}
 833			}
 834		}
 835	}
 836
 837	/* Allocation and initialization of Statistics block */
 838	size = sizeof(struct stat_block);
 839	mac_control->stats_mem =
 840		dma_alloc_coherent(&nic->pdev->dev, size,
 841				   &mac_control->stats_mem_phy, GFP_KERNEL);
 842
 843	if (!mac_control->stats_mem) {
 844		/*
 845		 * In case of failure, free_shared_mem() is called, which
 846		 * should free any memory that was alloced till the
 847		 * failure happened.
 848		 */
 849		return -ENOMEM;
 850	}
 851	mem_allocated += size;
 852	mac_control->stats_mem_sz = size;
 853
 854	tmp_v_addr = mac_control->stats_mem;
 855	mac_control->stats_info = tmp_v_addr;
 856	memset(tmp_v_addr, 0, size);
 857	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 858		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 859	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 860	return SUCCESS;
 861}
 862
 863/**
 864 * free_shared_mem - Free the allocated Memory
 865 * @nic:  Device private variable.
 866 * Description: This function is to free all memory locations allocated by
 867 * the init_shared_mem() function and return it to the kernel.
 868 */
 869
 870static void free_shared_mem(struct s2io_nic *nic)
 871{
 872	int i, j, blk_cnt, size;
 873	void *tmp_v_addr;
 874	dma_addr_t tmp_p_addr;
 875	int lst_size, lst_per_page;
 876	struct net_device *dev;
 877	int page_num = 0;
 878	struct config_param *config;
 879	struct mac_info *mac_control;
 880	struct stat_block *stats;
 881	struct swStat *swstats;
 882
 883	if (!nic)
 884		return;
 885
 886	dev = nic->dev;
 887
 888	config = &nic->config;
 889	mac_control = &nic->mac_control;
 890	stats = mac_control->stats_info;
 891	swstats = &stats->sw_stat;
 892
 893	lst_size = sizeof(struct TxD) * config->max_txds;
 894	lst_per_page = PAGE_SIZE / lst_size;
 895
 896	for (i = 0; i < config->tx_fifo_num; i++) {
 897		struct fifo_info *fifo = &mac_control->fifos[i];
 898		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 899
 900		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 901		for (j = 0; j < page_num; j++) {
 902			int mem_blks = (j * lst_per_page);
 903			struct list_info_hold *fli;
 904
 905			if (!fifo->list_info)
 906				return;
 907
 908			fli = &fifo->list_info[mem_blks];
 909			if (!fli->list_virt_addr)
 910				break;
 911			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 912					  fli->list_virt_addr,
 913					  fli->list_phy_addr);
 914			swstats->mem_freed += PAGE_SIZE;
 915		}
 916		/* If we got a zero DMA address during allocation,
 917		 * free the page now
 918		 */
 919		if (mac_control->zerodma_virt_addr) {
 920			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 921					  mac_control->zerodma_virt_addr,
 922					  (dma_addr_t)0);
 923			DBG_PRINT(INIT_DBG,
 924				  "%s: Freeing TxDL with zero DMA address. "
 925				  "Virtual address %p\n",
 926				  dev->name, mac_control->zerodma_virt_addr);
 927			swstats->mem_freed += PAGE_SIZE;
 928		}
 929		kfree(fifo->list_info);
 930		swstats->mem_freed += tx_cfg->fifo_len *
 931			sizeof(struct list_info_hold);
 932	}
 933
 934	size = SIZE_OF_BLOCK;
 935	for (i = 0; i < config->rx_ring_num; i++) {
 936		struct ring_info *ring = &mac_control->rings[i];
 937
 938		blk_cnt = ring->block_count;
 939		for (j = 0; j < blk_cnt; j++) {
 940			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 941			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 942			if (tmp_v_addr == NULL)
 943				break;
 944			dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
 945					  tmp_p_addr);
 946			swstats->mem_freed += size;
 947			kfree(ring->rx_blocks[j].rxds);
 948			swstats->mem_freed += sizeof(struct rxd_info) *
 949				rxd_count[nic->rxd_mode];
 950		}
 951	}
 952
 953	if (nic->rxd_mode == RXD_MODE_3B) {
 954		/* Freeing buffer storage addresses in 2BUFF mode. */
 955		for (i = 0; i < config->rx_ring_num; i++) {
 956			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 957			struct ring_info *ring = &mac_control->rings[i];
 958
 959			blk_cnt = rx_cfg->num_rxd /
 960				(rxd_count[nic->rxd_mode] + 1);
 961			for (j = 0; j < blk_cnt; j++) {
 962				int k = 0;
 963				if (!ring->ba[j])
 964					continue;
 965				while (k != rxd_count[nic->rxd_mode]) {
 966					struct buffAdd *ba = &ring->ba[j][k];
 967					kfree(ba->ba_0_org);
 968					swstats->mem_freed +=
 969						BUF0_LEN + ALIGN_SIZE;
 970					kfree(ba->ba_1_org);
 971					swstats->mem_freed +=
 972						BUF1_LEN + ALIGN_SIZE;
 973					k++;
 974				}
 975				kfree(ring->ba[j]);
 976				swstats->mem_freed += sizeof(struct buffAdd) *
 977					(rxd_count[nic->rxd_mode] + 1);
 978			}
 979			kfree(ring->ba);
 980			swstats->mem_freed += sizeof(struct buffAdd *) *
 981				blk_cnt;
 982		}
 983	}
 984
 985	for (i = 0; i < nic->config.tx_fifo_num; i++) {
 986		struct fifo_info *fifo = &mac_control->fifos[i];
 987		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 988
 989		if (fifo->ufo_in_band_v) {
 990			swstats->mem_freed += tx_cfg->fifo_len *
 991				sizeof(u64);
 992			kfree(fifo->ufo_in_band_v);
 993		}
 994	}
 995
 996	if (mac_control->stats_mem) {
 997		swstats->mem_freed += mac_control->stats_mem_sz;
 998		dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
 999				  mac_control->stats_mem,
1000				  mac_control->stats_mem_phy);
1001	}
1002}
1003
1004/*
1005 * s2io_verify_pci_mode -
1006 */
1007
1008static int s2io_verify_pci_mode(struct s2io_nic *nic)
1009{
1010	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1011	register u64 val64 = 0;
1012	int     mode;
1013
1014	val64 = readq(&bar0->pci_mode);
1015	mode = (u8)GET_PCI_MODE(val64);
1016
1017	if (val64 & PCI_MODE_UNKNOWN_MODE)
1018		return -1;      /* Unknown PCI mode */
1019	return mode;
1020}
1021
1022#define NEC_VENID   0x1033
1023#define NEC_DEVID   0x0125
1024static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1025{
1026	struct pci_dev *tdev = NULL;
1027	for_each_pci_dev(tdev) {
1028		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1029			if (tdev->bus == s2io_pdev->bus->parent) {
1030				pci_dev_put(tdev);
1031				return 1;
1032			}
1033		}
1034	}
1035	return 0;
1036}
1037
1038static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1039/*
1040 * s2io_print_pci_mode -
1041 */
1042static int s2io_print_pci_mode(struct s2io_nic *nic)
1043{
1044	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1045	register u64 val64 = 0;
1046	int	mode;
1047	struct config_param *config = &nic->config;
1048	const char *pcimode;
1049
1050	val64 = readq(&bar0->pci_mode);
1051	mode = (u8)GET_PCI_MODE(val64);
1052
1053	if (val64 & PCI_MODE_UNKNOWN_MODE)
1054		return -1;	/* Unknown PCI mode */
1055
1056	config->bus_speed = bus_speed[mode];
1057
1058	if (s2io_on_nec_bridge(nic->pdev)) {
1059		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1060			  nic->dev->name);
1061		return mode;
1062	}
1063
1064	switch (mode) {
1065	case PCI_MODE_PCI_33:
1066		pcimode = "33MHz PCI bus";
1067		break;
1068	case PCI_MODE_PCI_66:
1069		pcimode = "66MHz PCI bus";
1070		break;
1071	case PCI_MODE_PCIX_M1_66:
1072		pcimode = "66MHz PCIX(M1) bus";
1073		break;
1074	case PCI_MODE_PCIX_M1_100:
1075		pcimode = "100MHz PCIX(M1) bus";
1076		break;
1077	case PCI_MODE_PCIX_M1_133:
1078		pcimode = "133MHz PCIX(M1) bus";
1079		break;
1080	case PCI_MODE_PCIX_M2_66:
1081		pcimode = "133MHz PCIX(M2) bus";
1082		break;
1083	case PCI_MODE_PCIX_M2_100:
1084		pcimode = "200MHz PCIX(M2) bus";
1085		break;
1086	case PCI_MODE_PCIX_M2_133:
1087		pcimode = "266MHz PCIX(M2) bus";
1088		break;
1089	default:
1090		pcimode = "unsupported bus!";
1091		mode = -1;
1092	}
1093
1094	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1095		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1096
1097	return mode;
1098}
1099
1100/**
1101 *  init_tti - Initialization transmit traffic interrupt scheme
1102 *  @nic: device private variable
1103 *  @link: link status (UP/DOWN) used to enable/disable continuous
1104 *  transmit interrupts
1105 *  @may_sleep: parameter indicates if sleeping when waiting for
1106 *  command complete
1107 *  Description: The function configures transmit traffic interrupts
1108 *  Return Value:  SUCCESS on success and
1109 *  '-1' on failure
1110 */
1111
1112static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
1113{
1114	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1115	register u64 val64 = 0;
1116	int i;
1117	struct config_param *config = &nic->config;
1118
1119	for (i = 0; i < config->tx_fifo_num; i++) {
1120		/*
1121		 * TTI Initialization. Default Tx timer gets us about
1122		 * 250 interrupts per sec. Continuous interrupts are enabled
1123		 * by default.
1124		 */
1125		if (nic->device_type == XFRAME_II_DEVICE) {
1126			int count = (nic->config.bus_speed * 125)/2;
1127			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1128		} else
1129			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1130
1131		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1132			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1133			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1134			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1135		if (i == 0)
1136			if (use_continuous_tx_intrs && (link == LINK_UP))
1137				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1138		writeq(val64, &bar0->tti_data1_mem);
1139
1140		if (nic->config.intr_type == MSI_X) {
1141			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1142				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1143				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1144				TTI_DATA2_MEM_TX_UFC_D(0x300);
1145		} else {
1146			if ((nic->config.tx_steering_type ==
1147			     TX_DEFAULT_STEERING) &&
1148			    (config->tx_fifo_num > 1) &&
1149			    (i >= nic->udp_fifo_idx) &&
1150			    (i < (nic->udp_fifo_idx +
1151				  nic->total_udp_fifos)))
1152				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1153					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1154					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1155					TTI_DATA2_MEM_TX_UFC_D(0x120);
1156			else
1157				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1158					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1159					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1160					TTI_DATA2_MEM_TX_UFC_D(0x80);
1161		}
1162
1163		writeq(val64, &bar0->tti_data2_mem);
1164
1165		val64 = TTI_CMD_MEM_WE |
1166			TTI_CMD_MEM_STROBE_NEW_CMD |
1167			TTI_CMD_MEM_OFFSET(i);
1168		writeq(val64, &bar0->tti_command_mem);
1169
1170		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1171					  TTI_CMD_MEM_STROBE_NEW_CMD,
1172					  S2IO_BIT_RESET, may_sleep) != SUCCESS)
1173			return FAILURE;
1174	}
1175
1176	return SUCCESS;
1177}
1178
1179/**
1180 *  init_nic - Initialization of hardware
1181 *  @nic: device private variable
1182 *  Description: The function sequentially configures every block
1183 *  of the H/W from their reset values.
1184 *  Return Value:  SUCCESS on success and
1185 *  '-1' on failure (endian settings incorrect).
1186 */
1187
1188static int init_nic(struct s2io_nic *nic)
1189{
1190	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1191	struct net_device *dev = nic->dev;
1192	register u64 val64 = 0;
1193	void __iomem *add;
1194	u32 time;
1195	int i, j;
1196	int dtx_cnt = 0;
1197	unsigned long long mem_share;
1198	int mem_size;
1199	struct config_param *config = &nic->config;
1200	struct mac_info *mac_control = &nic->mac_control;
1201
1202	/* to set the swapper controle on the card */
1203	if (s2io_set_swapper(nic)) {
1204		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1205		return -EIO;
1206	}
1207
1208	/*
1209	 * Herc requires EOI to be removed from reset before XGXS, so..
1210	 */
1211	if (nic->device_type & XFRAME_II_DEVICE) {
1212		val64 = 0xA500000000ULL;
1213		writeq(val64, &bar0->sw_reset);
1214		msleep(500);
1215		val64 = readq(&bar0->sw_reset);
1216	}
1217
1218	/* Remove XGXS from reset state */
1219	val64 = 0;
1220	writeq(val64, &bar0->sw_reset);
1221	msleep(500);
1222	val64 = readq(&bar0->sw_reset);
1223
1224	/* Ensure that it's safe to access registers by checking
1225	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1226	 */
1227	if (nic->device_type == XFRAME_II_DEVICE) {
1228		for (i = 0; i < 50; i++) {
1229			val64 = readq(&bar0->adapter_status);
1230			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1231				break;
1232			msleep(10);
1233		}
1234		if (i == 50)
1235			return -ENODEV;
1236	}
1237
1238	/*  Enable Receiving broadcasts */
1239	add = &bar0->mac_cfg;
1240	val64 = readq(&bar0->mac_cfg);
1241	val64 |= MAC_RMAC_BCAST_ENABLE;
1242	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1243	writel((u32)val64, add);
1244	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1245	writel((u32) (val64 >> 32), (add + 4));
1246
1247	/* Read registers in all blocks */
1248	val64 = readq(&bar0->mac_int_mask);
1249	val64 = readq(&bar0->mc_int_mask);
1250	val64 = readq(&bar0->xgxs_int_mask);
1251
1252	/*  Set MTU */
1253	val64 = dev->mtu;
1254	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1255
1256	if (nic->device_type & XFRAME_II_DEVICE) {
1257		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1258			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1259					  &bar0->dtx_control, UF);
1260			if (dtx_cnt & 0x1)
1261				msleep(1); /* Necessary!! */
1262			dtx_cnt++;
1263		}
1264	} else {
1265		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1266			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1267					  &bar0->dtx_control, UF);
1268			val64 = readq(&bar0->dtx_control);
1269			dtx_cnt++;
1270		}
1271	}
1272
1273	/*  Tx DMA Initialization */
1274	val64 = 0;
1275	writeq(val64, &bar0->tx_fifo_partition_0);
1276	writeq(val64, &bar0->tx_fifo_partition_1);
1277	writeq(val64, &bar0->tx_fifo_partition_2);
1278	writeq(val64, &bar0->tx_fifo_partition_3);
1279
1280	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1281		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1282
1283		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1284			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1285
1286		if (i == (config->tx_fifo_num - 1)) {
1287			if (i % 2 == 0)
1288				i++;
1289		}
1290
1291		switch (i) {
1292		case 1:
1293			writeq(val64, &bar0->tx_fifo_partition_0);
1294			val64 = 0;
1295			j = 0;
1296			break;
1297		case 3:
1298			writeq(val64, &bar0->tx_fifo_partition_1);
1299			val64 = 0;
1300			j = 0;
1301			break;
1302		case 5:
1303			writeq(val64, &bar0->tx_fifo_partition_2);
1304			val64 = 0;
1305			j = 0;
1306			break;
1307		case 7:
1308			writeq(val64, &bar0->tx_fifo_partition_3);
1309			val64 = 0;
1310			j = 0;
1311			break;
1312		default:
1313			j++;
1314			break;
1315		}
1316	}
1317
1318	/*
1319	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1320	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1321	 */
1322	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1323		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1324
1325	val64 = readq(&bar0->tx_fifo_partition_0);
1326	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1327		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1328
1329	/*
1330	 * Initialization of Tx_PA_CONFIG register to ignore packet
1331	 * integrity checking.
1332	 */
1333	val64 = readq(&bar0->tx_pa_cfg);
1334	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1335		TX_PA_CFG_IGNORE_SNAP_OUI |
1336		TX_PA_CFG_IGNORE_LLC_CTRL |
1337		TX_PA_CFG_IGNORE_L2_ERR;
1338	writeq(val64, &bar0->tx_pa_cfg);
1339
1340	/* Rx DMA initialization. */
1341	val64 = 0;
1342	for (i = 0; i < config->rx_ring_num; i++) {
1343		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1344
1345		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1346	}
1347	writeq(val64, &bar0->rx_queue_priority);
1348
1349	/*
1350	 * Allocating equal share of memory to all the
1351	 * configured Rings.
1352	 */
1353	val64 = 0;
1354	if (nic->device_type & XFRAME_II_DEVICE)
1355		mem_size = 32;
1356	else
1357		mem_size = 64;
1358
1359	for (i = 0; i < config->rx_ring_num; i++) {
1360		switch (i) {
1361		case 0:
1362			mem_share = (mem_size / config->rx_ring_num +
1363				     mem_size % config->rx_ring_num);
1364			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1365			continue;
1366		case 1:
1367			mem_share = (mem_size / config->rx_ring_num);
1368			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1369			continue;
1370		case 2:
1371			mem_share = (mem_size / config->rx_ring_num);
1372			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1373			continue;
1374		case 3:
1375			mem_share = (mem_size / config->rx_ring_num);
1376			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1377			continue;
1378		case 4:
1379			mem_share = (mem_size / config->rx_ring_num);
1380			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1381			continue;
1382		case 5:
1383			mem_share = (mem_size / config->rx_ring_num);
1384			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1385			continue;
1386		case 6:
1387			mem_share = (mem_size / config->rx_ring_num);
1388			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1389			continue;
1390		case 7:
1391			mem_share = (mem_size / config->rx_ring_num);
1392			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1393			continue;
1394		}
1395	}
1396	writeq(val64, &bar0->rx_queue_cfg);
1397
1398	/*
1399	 * Filling Tx round robin registers
1400	 * as per the number of FIFOs for equal scheduling priority
1401	 */
1402	switch (config->tx_fifo_num) {
1403	case 1:
1404		val64 = 0x0;
1405		writeq(val64, &bar0->tx_w_round_robin_0);
1406		writeq(val64, &bar0->tx_w_round_robin_1);
1407		writeq(val64, &bar0->tx_w_round_robin_2);
1408		writeq(val64, &bar0->tx_w_round_robin_3);
1409		writeq(val64, &bar0->tx_w_round_robin_4);
1410		break;
1411	case 2:
1412		val64 = 0x0001000100010001ULL;
1413		writeq(val64, &bar0->tx_w_round_robin_0);
1414		writeq(val64, &bar0->tx_w_round_robin_1);
1415		writeq(val64, &bar0->tx_w_round_robin_2);
1416		writeq(val64, &bar0->tx_w_round_robin_3);
1417		val64 = 0x0001000100000000ULL;
1418		writeq(val64, &bar0->tx_w_round_robin_4);
1419		break;
1420	case 3:
1421		val64 = 0x0001020001020001ULL;
1422		writeq(val64, &bar0->tx_w_round_robin_0);
1423		val64 = 0x0200010200010200ULL;
1424		writeq(val64, &bar0->tx_w_round_robin_1);
1425		val64 = 0x0102000102000102ULL;
1426		writeq(val64, &bar0->tx_w_round_robin_2);
1427		val64 = 0x0001020001020001ULL;
1428		writeq(val64, &bar0->tx_w_round_robin_3);
1429		val64 = 0x0200010200000000ULL;
1430		writeq(val64, &bar0->tx_w_round_robin_4);
1431		break;
1432	case 4:
1433		val64 = 0x0001020300010203ULL;
1434		writeq(val64, &bar0->tx_w_round_robin_0);
1435		writeq(val64, &bar0->tx_w_round_robin_1);
1436		writeq(val64, &bar0->tx_w_round_robin_2);
1437		writeq(val64, &bar0->tx_w_round_robin_3);
1438		val64 = 0x0001020300000000ULL;
1439		writeq(val64, &bar0->tx_w_round_robin_4);
1440		break;
1441	case 5:
1442		val64 = 0x0001020304000102ULL;
1443		writeq(val64, &bar0->tx_w_round_robin_0);
1444		val64 = 0x0304000102030400ULL;
1445		writeq(val64, &bar0->tx_w_round_robin_1);
1446		val64 = 0x0102030400010203ULL;
1447		writeq(val64, &bar0->tx_w_round_robin_2);
1448		val64 = 0x0400010203040001ULL;
1449		writeq(val64, &bar0->tx_w_round_robin_3);
1450		val64 = 0x0203040000000000ULL;
1451		writeq(val64, &bar0->tx_w_round_robin_4);
1452		break;
1453	case 6:
1454		val64 = 0x0001020304050001ULL;
1455		writeq(val64, &bar0->tx_w_round_robin_0);
1456		val64 = 0x0203040500010203ULL;
1457		writeq(val64, &bar0->tx_w_round_robin_1);
1458		val64 = 0x0405000102030405ULL;
1459		writeq(val64, &bar0->tx_w_round_robin_2);
1460		val64 = 0x0001020304050001ULL;
1461		writeq(val64, &bar0->tx_w_round_robin_3);
1462		val64 = 0x0203040500000000ULL;
1463		writeq(val64, &bar0->tx_w_round_robin_4);
1464		break;
1465	case 7:
1466		val64 = 0x0001020304050600ULL;
1467		writeq(val64, &bar0->tx_w_round_robin_0);
1468		val64 = 0x0102030405060001ULL;
1469		writeq(val64, &bar0->tx_w_round_robin_1);
1470		val64 = 0x0203040506000102ULL;
1471		writeq(val64, &bar0->tx_w_round_robin_2);
1472		val64 = 0x0304050600010203ULL;
1473		writeq(val64, &bar0->tx_w_round_robin_3);
1474		val64 = 0x0405060000000000ULL;
1475		writeq(val64, &bar0->tx_w_round_robin_4);
1476		break;
1477	case 8:
1478		val64 = 0x0001020304050607ULL;
1479		writeq(val64, &bar0->tx_w_round_robin_0);
1480		writeq(val64, &bar0->tx_w_round_robin_1);
1481		writeq(val64, &bar0->tx_w_round_robin_2);
1482		writeq(val64, &bar0->tx_w_round_robin_3);
1483		val64 = 0x0001020300000000ULL;
1484		writeq(val64, &bar0->tx_w_round_robin_4);
1485		break;
1486	}
1487
1488	/* Enable all configured Tx FIFO partitions */
1489	val64 = readq(&bar0->tx_fifo_partition_0);
1490	val64 |= (TX_FIFO_PARTITION_EN);
1491	writeq(val64, &bar0->tx_fifo_partition_0);
1492
1493	/* Filling the Rx round robin registers as per the
1494	 * number of Rings and steering based on QoS with
1495	 * equal priority.
1496	 */
1497	switch (config->rx_ring_num) {
1498	case 1:
1499		val64 = 0x0;
1500		writeq(val64, &bar0->rx_w_round_robin_0);
1501		writeq(val64, &bar0->rx_w_round_robin_1);
1502		writeq(val64, &bar0->rx_w_round_robin_2);
1503		writeq(val64, &bar0->rx_w_round_robin_3);
1504		writeq(val64, &bar0->rx_w_round_robin_4);
1505
1506		val64 = 0x8080808080808080ULL;
1507		writeq(val64, &bar0->rts_qos_steering);
1508		break;
1509	case 2:
1510		val64 = 0x0001000100010001ULL;
1511		writeq(val64, &bar0->rx_w_round_robin_0);
1512		writeq(val64, &bar0->rx_w_round_robin_1);
1513		writeq(val64, &bar0->rx_w_round_robin_2);
1514		writeq(val64, &bar0->rx_w_round_robin_3);
1515		val64 = 0x0001000100000000ULL;
1516		writeq(val64, &bar0->rx_w_round_robin_4);
1517
1518		val64 = 0x8080808040404040ULL;
1519		writeq(val64, &bar0->rts_qos_steering);
1520		break;
1521	case 3:
1522		val64 = 0x0001020001020001ULL;
1523		writeq(val64, &bar0->rx_w_round_robin_0);
1524		val64 = 0x0200010200010200ULL;
1525		writeq(val64, &bar0->rx_w_round_robin_1);
1526		val64 = 0x0102000102000102ULL;
1527		writeq(val64, &bar0->rx_w_round_robin_2);
1528		val64 = 0x0001020001020001ULL;
1529		writeq(val64, &bar0->rx_w_round_robin_3);
1530		val64 = 0x0200010200000000ULL;
1531		writeq(val64, &bar0->rx_w_round_robin_4);
1532
1533		val64 = 0x8080804040402020ULL;
1534		writeq(val64, &bar0->rts_qos_steering);
1535		break;
1536	case 4:
1537		val64 = 0x0001020300010203ULL;
1538		writeq(val64, &bar0->rx_w_round_robin_0);
1539		writeq(val64, &bar0->rx_w_round_robin_1);
1540		writeq(val64, &bar0->rx_w_round_robin_2);
1541		writeq(val64, &bar0->rx_w_round_robin_3);
1542		val64 = 0x0001020300000000ULL;
1543		writeq(val64, &bar0->rx_w_round_robin_4);
1544
1545		val64 = 0x8080404020201010ULL;
1546		writeq(val64, &bar0->rts_qos_steering);
1547		break;
1548	case 5:
1549		val64 = 0x0001020304000102ULL;
1550		writeq(val64, &bar0->rx_w_round_robin_0);
1551		val64 = 0x0304000102030400ULL;
1552		writeq(val64, &bar0->rx_w_round_robin_1);
1553		val64 = 0x0102030400010203ULL;
1554		writeq(val64, &bar0->rx_w_round_robin_2);
1555		val64 = 0x0400010203040001ULL;
1556		writeq(val64, &bar0->rx_w_round_robin_3);
1557		val64 = 0x0203040000000000ULL;
1558		writeq(val64, &bar0->rx_w_round_robin_4);
1559
1560		val64 = 0x8080404020201008ULL;
1561		writeq(val64, &bar0->rts_qos_steering);
1562		break;
1563	case 6:
1564		val64 = 0x0001020304050001ULL;
1565		writeq(val64, &bar0->rx_w_round_robin_0);
1566		val64 = 0x0203040500010203ULL;
1567		writeq(val64, &bar0->rx_w_round_robin_1);
1568		val64 = 0x0405000102030405ULL;
1569		writeq(val64, &bar0->rx_w_round_robin_2);
1570		val64 = 0x0001020304050001ULL;
1571		writeq(val64, &bar0->rx_w_round_robin_3);
1572		val64 = 0x0203040500000000ULL;
1573		writeq(val64, &bar0->rx_w_round_robin_4);
1574
1575		val64 = 0x8080404020100804ULL;
1576		writeq(val64, &bar0->rts_qos_steering);
1577		break;
1578	case 7:
1579		val64 = 0x0001020304050600ULL;
1580		writeq(val64, &bar0->rx_w_round_robin_0);
1581		val64 = 0x0102030405060001ULL;
1582		writeq(val64, &bar0->rx_w_round_robin_1);
1583		val64 = 0x0203040506000102ULL;
1584		writeq(val64, &bar0->rx_w_round_robin_2);
1585		val64 = 0x0304050600010203ULL;
1586		writeq(val64, &bar0->rx_w_round_robin_3);
1587		val64 = 0x0405060000000000ULL;
1588		writeq(val64, &bar0->rx_w_round_robin_4);
1589
1590		val64 = 0x8080402010080402ULL;
1591		writeq(val64, &bar0->rts_qos_steering);
1592		break;
1593	case 8:
1594		val64 = 0x0001020304050607ULL;
1595		writeq(val64, &bar0->rx_w_round_robin_0);
1596		writeq(val64, &bar0->rx_w_round_robin_1);
1597		writeq(val64, &bar0->rx_w_round_robin_2);
1598		writeq(val64, &bar0->rx_w_round_robin_3);
1599		val64 = 0x0001020300000000ULL;
1600		writeq(val64, &bar0->rx_w_round_robin_4);
1601
1602		val64 = 0x8040201008040201ULL;
1603		writeq(val64, &bar0->rts_qos_steering);
1604		break;
1605	}
1606
1607	/* UDP Fix */
1608	val64 = 0;
1609	for (i = 0; i < 8; i++)
1610		writeq(val64, &bar0->rts_frm_len_n[i]);
1611
1612	/* Set the default rts frame length for the rings configured */
1613	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1614	for (i = 0 ; i < config->rx_ring_num ; i++)
1615		writeq(val64, &bar0->rts_frm_len_n[i]);
1616
1617	/* Set the frame length for the configured rings
1618	 * desired by the user
1619	 */
1620	for (i = 0; i < config->rx_ring_num; i++) {
1621		/* If rts_frm_len[i] == 0 then it is assumed that user not
1622		 * specified frame length steering.
1623		 * If the user provides the frame length then program
1624		 * the rts_frm_len register for those values or else
1625		 * leave it as it is.
1626		 */
1627		if (rts_frm_len[i] != 0) {
1628			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1629			       &bar0->rts_frm_len_n[i]);
1630		}
1631	}
1632
1633	/* Disable differentiated services steering logic */
1634	for (i = 0; i < 64; i++) {
1635		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1636			DBG_PRINT(ERR_DBG,
1637				  "%s: rts_ds_steer failed on codepoint %d\n",
1638				  dev->name, i);
1639			return -ENODEV;
1640		}
1641	}
1642
1643	/* Program statistics memory */
1644	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1645
1646	if (nic->device_type == XFRAME_II_DEVICE) {
1647		val64 = STAT_BC(0x320);
1648		writeq(val64, &bar0->stat_byte_cnt);
1649	}
1650
1651	/*
1652	 * Initializing the sampling rate for the device to calculate the
1653	 * bandwidth utilization.
1654	 */
1655	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1656		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1657	writeq(val64, &bar0->mac_link_util);
1658
1659	/*
1660	 * Initializing the Transmit and Receive Traffic Interrupt
1661	 * Scheme.
1662	 */
1663
1664	/* Initialize TTI */
1665	if (SUCCESS != init_tti(nic, nic->last_link_state, true))
1666		return -ENODEV;
1667
1668	/* RTI Initialization */
1669	if (nic->device_type == XFRAME_II_DEVICE) {
1670		/*
1671		 * Programmed to generate Apprx 500 Intrs per
1672		 * second
1673		 */
1674		int count = (nic->config.bus_speed * 125)/4;
1675		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1676	} else
1677		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1678	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1679		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1680		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1681		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1682
1683	writeq(val64, &bar0->rti_data1_mem);
1684
1685	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1686		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1687	if (nic->config.intr_type == MSI_X)
1688		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1689			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1690	else
1691		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1692			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1693	writeq(val64, &bar0->rti_data2_mem);
1694
1695	for (i = 0; i < config->rx_ring_num; i++) {
1696		val64 = RTI_CMD_MEM_WE |
1697			RTI_CMD_MEM_STROBE_NEW_CMD |
1698			RTI_CMD_MEM_OFFSET(i);
1699		writeq(val64, &bar0->rti_command_mem);
1700
1701		/*
1702		 * Once the operation completes, the Strobe bit of the
1703		 * command register will be reset. We poll for this
1704		 * particular condition. We wait for a maximum of 500ms
1705		 * for the operation to complete, if it's not complete
1706		 * by then we return error.
1707		 */
1708		time = 0;
1709		while (true) {
1710			val64 = readq(&bar0->rti_command_mem);
1711			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1712				break;
1713
1714			if (time > 10) {
1715				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1716					  dev->name);
1717				return -ENODEV;
1718			}
1719			time++;
1720			msleep(50);
1721		}
1722	}
1723
1724	/*
1725	 * Initializing proper values as Pause threshold into all
1726	 * the 8 Queues on Rx side.
1727	 */
1728	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1729	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1730
1731	/* Disable RMAC PAD STRIPPING */
1732	add = &bar0->mac_cfg;
1733	val64 = readq(&bar0->mac_cfg);
1734	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1735	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1736	writel((u32) (val64), add);
1737	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1738	writel((u32) (val64 >> 32), (add + 4));
1739	val64 = readq(&bar0->mac_cfg);
1740
1741	/* Enable FCS stripping by adapter */
1742	add = &bar0->mac_cfg;
1743	val64 = readq(&bar0->mac_cfg);
1744	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1745	if (nic->device_type == XFRAME_II_DEVICE)
1746		writeq(val64, &bar0->mac_cfg);
1747	else {
1748		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1749		writel((u32) (val64), add);
1750		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1751		writel((u32) (val64 >> 32), (add + 4));
1752	}
1753
1754	/*
1755	 * Set the time value to be inserted in the pause frame
1756	 * generated by xena.
1757	 */
1758	val64 = readq(&bar0->rmac_pause_cfg);
1759	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1760	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1761	writeq(val64, &bar0->rmac_pause_cfg);
1762
1763	/*
1764	 * Set the Threshold Limit for Generating the pause frame
1765	 * If the amount of data in any Queue exceeds ratio of
1766	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1767	 * pause frame is generated
1768	 */
1769	val64 = 0;
1770	for (i = 0; i < 4; i++) {
1771		val64 |= (((u64)0xFF00 |
1772			   nic->mac_control.mc_pause_threshold_q0q3)
1773			  << (i * 2 * 8));
1774	}
1775	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1776
1777	val64 = 0;
1778	for (i = 0; i < 4; i++) {
1779		val64 |= (((u64)0xFF00 |
1780			   nic->mac_control.mc_pause_threshold_q4q7)
1781			  << (i * 2 * 8));
1782	}
1783	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1784
1785	/*
1786	 * TxDMA will stop Read request if the number of read split has
1787	 * exceeded the limit pointed by shared_splits
1788	 */
1789	val64 = readq(&bar0->pic_control);
1790	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1791	writeq(val64, &bar0->pic_control);
1792
1793	if (nic->config.bus_speed == 266) {
1794		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1795		writeq(0x0, &bar0->read_retry_delay);
1796		writeq(0x0, &bar0->write_retry_delay);
1797	}
1798
1799	/*
1800	 * Programming the Herc to split every write transaction
1801	 * that does not start on an ADB to reduce disconnects.
1802	 */
1803	if (nic->device_type == XFRAME_II_DEVICE) {
1804		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1805			MISC_LINK_STABILITY_PRD(3);
1806		writeq(val64, &bar0->misc_control);
1807		val64 = readq(&bar0->pic_control2);
1808		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1809		writeq(val64, &bar0->pic_control2);
1810	}
1811	if (strstr(nic->product_name, "CX4")) {
1812		val64 = TMAC_AVG_IPG(0x17);
1813		writeq(val64, &bar0->tmac_avg_ipg);
1814	}
1815
1816	return SUCCESS;
1817}
1818#define LINK_UP_DOWN_INTERRUPT		1
1819#define MAC_RMAC_ERR_TIMER		2
1820
1821static int s2io_link_fault_indication(struct s2io_nic *nic)
1822{
1823	if (nic->device_type == XFRAME_II_DEVICE)
1824		return LINK_UP_DOWN_INTERRUPT;
1825	else
1826		return MAC_RMAC_ERR_TIMER;
1827}
1828
1829/**
1830 *  do_s2io_write_bits -  update alarm bits in alarm register
1831 *  @value: alarm bits
1832 *  @flag: interrupt status
1833 *  @addr: address value
1834 *  Description: update alarm bits in alarm register
1835 *  Return Value:
1836 *  NONE.
1837 */
1838static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1839{
1840	u64 temp64;
1841
1842	temp64 = readq(addr);
1843
1844	if (flag == ENABLE_INTRS)
1845		temp64 &= ~((u64)value);
1846	else
1847		temp64 |= ((u64)value);
1848	writeq(temp64, addr);
1849}
1850
1851static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1852{
1853	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1854	register u64 gen_int_mask = 0;
1855	u64 interruptible;
1856
1857	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1858	if (mask & TX_DMA_INTR) {
1859		gen_int_mask |= TXDMA_INT_M;
1860
1861		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1862				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1863				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1864				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1865
1866		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1867				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1868				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1869				   &bar0->pfc_err_mask);
1870
1871		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1872				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1873				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1874
1875		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1876				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1877				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1878				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1879				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1880				   PCC_TXB_ECC_SG_ERR,
1881				   flag, &bar0->pcc_err_mask);
1882
1883		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1884				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1885
1886		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1887				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1888				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1889				   flag, &bar0->lso_err_mask);
1890
1891		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1892				   flag, &bar0->tpa_err_mask);
1893
1894		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1895	}
1896
1897	if (mask & TX_MAC_INTR) {
1898		gen_int_mask |= TXMAC_INT_M;
1899		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1900				   &bar0->mac_int_mask);
1901		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1902				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1903				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1904				   flag, &bar0->mac_tmac_err_mask);
1905	}
1906
1907	if (mask & TX_XGXS_INTR) {
1908		gen_int_mask |= TXXGXS_INT_M;
1909		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1910				   &bar0->xgxs_int_mask);
1911		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1912				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1913				   flag, &bar0->xgxs_txgxs_err_mask);
1914	}
1915
1916	if (mask & RX_DMA_INTR) {
1917		gen_int_mask |= RXDMA_INT_M;
1918		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1919				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1920				   flag, &bar0->rxdma_int_mask);
1921		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1922				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1923				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1924				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1925		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1926				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1927				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1928				   &bar0->prc_pcix_err_mask);
1929		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1930				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1931				   &bar0->rpa_err_mask);
1932		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1933				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1934				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1935				   RDA_FRM_ECC_SG_ERR |
1936				   RDA_MISC_ERR|RDA_PCIX_ERR,
1937				   flag, &bar0->rda_err_mask);
1938		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1939				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1940				   flag, &bar0->rti_err_mask);
1941	}
1942
1943	if (mask & RX_MAC_INTR) {
1944		gen_int_mask |= RXMAC_INT_M;
1945		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1946				   &bar0->mac_int_mask);
1947		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1948				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1949				 RMAC_DOUBLE_ECC_ERR);
1950		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1951			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1952		do_s2io_write_bits(interruptible,
1953				   flag, &bar0->mac_rmac_err_mask);
1954	}
1955
1956	if (mask & RX_XGXS_INTR) {
1957		gen_int_mask |= RXXGXS_INT_M;
1958		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1959				   &bar0->xgxs_int_mask);
1960		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1961				   &bar0->xgxs_rxgxs_err_mask);
1962	}
1963
1964	if (mask & MC_INTR) {
1965		gen_int_mask |= MC_INT_M;
1966		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1967				   flag, &bar0->mc_int_mask);
1968		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1969				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1970				   &bar0->mc_err_mask);
1971	}
1972	nic->general_int_mask = gen_int_mask;
1973
1974	/* Remove this line when alarm interrupts are enabled */
1975	nic->general_int_mask = 0;
1976}
1977
1978/**
1979 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1980 *  @nic: device private variable,
1981 *  @mask: A mask indicating which Intr block must be modified and,
1982 *  @flag: A flag indicating whether to enable or disable the Intrs.
1983 *  Description: This function will either disable or enable the interrupts
1984 *  depending on the flag argument. The mask argument can be used to
1985 *  enable/disable any Intr block.
1986 *  Return Value: NONE.
1987 */
1988
1989static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1990{
1991	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1992	register u64 temp64 = 0, intr_mask = 0;
1993
1994	intr_mask = nic->general_int_mask;
1995
1996	/*  Top level interrupt classification */
1997	/*  PIC Interrupts */
1998	if (mask & TX_PIC_INTR) {
1999		/*  Enable PIC Intrs in the general intr mask register */
2000		intr_mask |= TXPIC_INT_M;
2001		if (flag == ENABLE_INTRS) {
2002			/*
2003			 * If Hercules adapter enable GPIO otherwise
2004			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2005			 * interrupts for now.
2006			 * TODO
2007			 */
2008			if (s2io_link_fault_indication(nic) ==
2009			    LINK_UP_DOWN_INTERRUPT) {
2010				do_s2io_write_bits(PIC_INT_GPIO, flag,
2011						   &bar0->pic_int_mask);
2012				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2013						   &bar0->gpio_int_mask);
2014			} else
2015				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2016		} else if (flag == DISABLE_INTRS) {
2017			/*
2018			 * Disable PIC Intrs in the general
2019			 * intr mask register
2020			 */
2021			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2022		}
2023	}
2024
2025	/*  Tx traffic interrupts */
2026	if (mask & TX_TRAFFIC_INTR) {
2027		intr_mask |= TXTRAFFIC_INT_M;
2028		if (flag == ENABLE_INTRS) {
2029			/*
2030			 * Enable all the Tx side interrupts
2031			 * writing 0 Enables all 64 TX interrupt levels
2032			 */
2033			writeq(0x0, &bar0->tx_traffic_mask);
2034		} else if (flag == DISABLE_INTRS) {
2035			/*
2036			 * Disable Tx Traffic Intrs in the general intr mask
2037			 * register.
2038			 */
2039			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2040		}
2041	}
2042
2043	/*  Rx traffic interrupts */
2044	if (mask & RX_TRAFFIC_INTR) {
2045		intr_mask |= RXTRAFFIC_INT_M;
2046		if (flag == ENABLE_INTRS) {
2047			/* writing 0 Enables all 8 RX interrupt levels */
2048			writeq(0x0, &bar0->rx_traffic_mask);
2049		} else if (flag == DISABLE_INTRS) {
2050			/*
2051			 * Disable Rx Traffic Intrs in the general intr mask
2052			 * register.
2053			 */
2054			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2055		}
2056	}
2057
2058	temp64 = readq(&bar0->general_int_mask);
2059	if (flag == ENABLE_INTRS)
2060		temp64 &= ~((u64)intr_mask);
2061	else
2062		temp64 = DISABLE_ALL_INTRS;
2063	writeq(temp64, &bar0->general_int_mask);
2064
2065	nic->general_int_mask = readq(&bar0->general_int_mask);
2066}
2067
2068/**
2069 *  verify_pcc_quiescent- Checks for PCC quiescent state
2070 *  @sp : private member of the device structure, which is a pointer to the
2071 *  s2io_nic structure.
2072 *  @flag: boolean controlling function path
2073 *  Return: 1 If PCC is quiescence
2074 *          0 If PCC is not quiescence
2075 */
2076static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2077{
2078	int ret = 0, herc;
2079	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2080	u64 val64 = readq(&bar0->adapter_status);
2081
2082	herc = (sp->device_type == XFRAME_II_DEVICE);
2083
2084	if (flag == false) {
2085		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2086			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2087				ret = 1;
2088		} else {
2089			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2090				ret = 1;
2091		}
2092	} else {
2093		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2094			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2095			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2096				ret = 1;
2097		} else {
2098			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2099			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2100				ret = 1;
2101		}
2102	}
2103
2104	return ret;
2105}
2106/**
2107 *  verify_xena_quiescence - Checks whether the H/W is ready
2108 *  @sp : private member of the device structure, which is a pointer to the
2109 *  s2io_nic structure.
2110 *  Description: Returns whether the H/W is ready to go or not. Depending
2111 *  on whether adapter enable bit was written or not the comparison
2112 *  differs and the calling function passes the input argument flag to
2113 *  indicate this.
2114 *  Return: 1 If xena is quiescence
2115 *          0 If Xena is not quiescence
2116 */
2117
2118static int verify_xena_quiescence(struct s2io_nic *sp)
2119{
2120	int  mode;
2121	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2122	u64 val64 = readq(&bar0->adapter_status);
2123	mode = s2io_verify_pci_mode(sp);
2124
2125	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2126		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2127		return 0;
2128	}
2129	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2130		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2131		return 0;
2132	}
2133	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2134		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2135		return 0;
2136	}
2137	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2138		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2139		return 0;
2140	}
2141	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2142		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2143		return 0;
2144	}
2145	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2146		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2147		return 0;
2148	}
2149	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2150		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2151		return 0;
2152	}
2153	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2154		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2155		return 0;
2156	}
2157
2158	/*
2159	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2160	 * the P_PLL_LOCK bit in the adapter_status register will
2161	 * not be asserted.
2162	 */
2163	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2164	    sp->device_type == XFRAME_II_DEVICE &&
2165	    mode != PCI_MODE_PCI_33) {
2166		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2167		return 0;
2168	}
2169	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2170	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2171		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2172		return 0;
2173	}
2174	return 1;
2175}
2176
2177/**
2178 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2179 * @sp: Pointer to device specifc structure
2180 * Description :
2181 * New procedure to clear mac address reading  problems on Alpha platforms
2182 *
2183 */
2184
2185static void fix_mac_address(struct s2io_nic *sp)
2186{
2187	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2188	int i = 0;
2189
2190	while (fix_mac[i] != END_SIGN) {
2191		writeq(fix_mac[i++], &bar0->gpio_control);
2192		udelay(10);
2193		(void) readq(&bar0->gpio_control);
2194	}
2195}
2196
2197/**
2198 *  start_nic - Turns the device on
2199 *  @nic : device private variable.
2200 *  Description:
2201 *  This function actually turns the device on. Before this  function is
2202 *  called,all Registers are configured from their reset states
2203 *  and shared memory is allocated but the NIC is still quiescent. On
2204 *  calling this function, the device interrupts are cleared and the NIC is
2205 *  literally switched on by writing into the adapter control register.
2206 *  Return Value:
2207 *  SUCCESS on success and -1 on failure.
2208 */
2209
2210static int start_nic(struct s2io_nic *nic)
2211{
2212	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2213	struct net_device *dev = nic->dev;
2214	register u64 val64 = 0;
2215	u16 subid, i;
2216	struct config_param *config = &nic->config;
2217	struct mac_info *mac_control = &nic->mac_control;
2218
2219	/*  PRC Initialization and configuration */
2220	for (i = 0; i < config->rx_ring_num; i++) {
2221		struct ring_info *ring = &mac_control->rings[i];
2222
2223		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2224		       &bar0->prc_rxd0_n[i]);
2225
2226		val64 = readq(&bar0->prc_ctrl_n[i]);
2227		if (nic->rxd_mode == RXD_MODE_1)
2228			val64 |= PRC_CTRL_RC_ENABLED;
2229		else
2230			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2231		if (nic->device_type == XFRAME_II_DEVICE)
2232			val64 |= PRC_CTRL_GROUP_READS;
2233		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2234		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2235		writeq(val64, &bar0->prc_ctrl_n[i]);
2236	}
2237
2238	if (nic->rxd_mode == RXD_MODE_3B) {
2239		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2240		val64 = readq(&bar0->rx_pa_cfg);
2241		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2242		writeq(val64, &bar0->rx_pa_cfg);
2243	}
2244
2245	if (vlan_tag_strip == 0) {
2246		val64 = readq(&bar0->rx_pa_cfg);
2247		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2248		writeq(val64, &bar0->rx_pa_cfg);
2249		nic->vlan_strip_flag = 0;
2250	}
2251
2252	/*
2253	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2254	 * for around 100ms, which is approximately the time required
2255	 * for the device to be ready for operation.
2256	 */
2257	val64 = readq(&bar0->mc_rldram_mrs);
2258	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2259	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2260	val64 = readq(&bar0->mc_rldram_mrs);
2261
2262	msleep(100);	/* Delay by around 100 ms. */
2263
2264	/* Enabling ECC Protection. */
2265	val64 = readq(&bar0->adapter_control);
2266	val64 &= ~ADAPTER_ECC_EN;
2267	writeq(val64, &bar0->adapter_control);
2268
2269	/*
2270	 * Verify if the device is ready to be enabled, if so enable
2271	 * it.
2272	 */
2273	val64 = readq(&bar0->adapter_status);
2274	if (!verify_xena_quiescence(nic)) {
2275		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2276			  "Adapter status reads: 0x%llx\n",
2277			  dev->name, (unsigned long long)val64);
2278		return FAILURE;
2279	}
2280
2281	/*
2282	 * With some switches, link might be already up at this point.
2283	 * Because of this weird behavior, when we enable laser,
2284	 * we may not get link. We need to handle this. We cannot
2285	 * figure out which switch is misbehaving. So we are forced to
2286	 * make a global change.
2287	 */
2288
2289	/* Enabling Laser. */
2290	val64 = readq(&bar0->adapter_control);
2291	val64 |= ADAPTER_EOI_TX_ON;
2292	writeq(val64, &bar0->adapter_control);
2293
2294	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2295		/*
2296		 * Dont see link state interrupts initially on some switches,
2297		 * so directly scheduling the link state task here.
2298		 */
2299		schedule_work(&nic->set_link_task);
2300	}
2301	/* SXE-002: Initialize link and activity LED */
2302	subid = nic->pdev->subsystem_device;
2303	if (((subid & 0xFF) >= 0x07) &&
2304	    (nic->device_type == XFRAME_I_DEVICE)) {
2305		val64 = readq(&bar0->gpio_control);
2306		val64 |= 0x0000800000000000ULL;
2307		writeq(val64, &bar0->gpio_control);
2308		val64 = 0x0411040400000000ULL;
2309		writeq(val64, (void __iomem *)bar0 + 0x2700);
2310	}
2311
2312	return SUCCESS;
2313}
2314/**
2315 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2316 * @fifo_data: fifo data pointer
2317 * @txdlp: descriptor
2318 * @get_off: unused
2319 */
2320static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2321					struct TxD *txdlp, int get_off)
2322{
2323	struct s2io_nic *nic = fifo_data->nic;
2324	struct sk_buff *skb;
2325	struct TxD *txds;
2326	u16 j, frg_cnt;
2327
2328	txds = txdlp;
2329	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2330		dma_unmap_single(&nic->pdev->dev,
2331				 (dma_addr_t)txds->Buffer_Pointer,
2332				 sizeof(u64), DMA_TO_DEVICE);
2333		txds++;
2334	}
2335
2336	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2337	if (!skb) {
2338		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2339		return NULL;
2340	}
2341	dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2342			 skb_headlen(skb), DMA_TO_DEVICE);
2343	frg_cnt = skb_shinfo(skb)->nr_frags;
2344	if (frg_cnt) {
2345		txds++;
2346		for (j = 0; j < frg_cnt; j++, txds++) {
2347			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2348			if (!txds->Buffer_Pointer)
2349				break;
2350			dma_unmap_page(&nic->pdev->dev,
2351				       (dma_addr_t)txds->Buffer_Pointer,
2352				       skb_frag_size(frag), DMA_TO_DEVICE);
2353		}
2354	}
2355	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2356	return skb;
2357}
2358
2359/**
2360 *  free_tx_buffers - Free all queued Tx buffers
2361 *  @nic : device private variable.
2362 *  Description:
2363 *  Free all queued Tx buffers.
2364 *  Return Value: void
2365 */
2366
2367static void free_tx_buffers(struct s2io_nic *nic)
2368{
2369	struct net_device *dev = nic->dev;
2370	struct sk_buff *skb;
2371	struct TxD *txdp;
2372	int i, j;
2373	int cnt = 0;
2374	struct config_param *config = &nic->config;
2375	struct mac_info *mac_control = &nic->mac_control;
2376	struct stat_block *stats = mac_control->stats_info;
2377	struct swStat *swstats = &stats->sw_stat;
2378
2379	for (i = 0; i < config->tx_fifo_num; i++) {
2380		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2381		struct fifo_info *fifo = &mac_control->fifos[i];
2382		unsigned long flags;
2383
2384		spin_lock_irqsave(&fifo->tx_lock, flags);
2385		for (j = 0; j < tx_cfg->fifo_len; j++) {
2386			txdp = fifo->list_info[j].list_virt_addr;
2387			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2388			if (skb) {
2389				swstats->mem_freed += skb->truesize;
2390				dev_kfree_skb_irq(skb);
2391				cnt++;
2392			}
2393		}
2394		DBG_PRINT(INTR_DBG,
2395			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2396			  dev->name, cnt, i);
2397		fifo->tx_curr_get_info.offset = 0;
2398		fifo->tx_curr_put_info.offset = 0;
2399		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2400	}
2401}
2402
2403/**
2404 *   stop_nic -  To stop the nic
2405 *   @nic : device private variable.
2406 *   Description:
2407 *   This function does exactly the opposite of what the start_nic()
2408 *   function does. This function is called to stop the device.
2409 *   Return Value:
2410 *   void.
2411 */
2412
2413static void stop_nic(struct s2io_nic *nic)
2414{
2415	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2416	register u64 val64 = 0;
2417	u16 interruptible;
2418
2419	/*  Disable all interrupts */
2420	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2421	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2422	interruptible |= TX_PIC_INTR;
2423	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2424
2425	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2426	val64 = readq(&bar0->adapter_control);
2427	val64 &= ~(ADAPTER_CNTL_EN);
2428	writeq(val64, &bar0->adapter_control);
2429}
2430
2431/**
2432 *  fill_rx_buffers - Allocates the Rx side skbs
2433 *  @nic : device private variable.
2434 *  @ring: per ring structure
2435 *  @from_card_up: If this is true, we will map the buffer to get
2436 *     the dma address for buf0 and buf1 to give it to the card.
2437 *     Else we will sync the already mapped buffer to give it to the card.
2438 *  Description:
2439 *  The function allocates Rx side skbs and puts the physical
2440 *  address of these buffers into the RxD buffer pointers, so that the NIC
2441 *  can DMA the received frame into these locations.
2442 *  The NIC supports 3 receive modes, viz
2443 *  1. single buffer,
2444 *  2. three buffer and
2445 *  3. Five buffer modes.
2446 *  Each mode defines how many fragments the received frame will be split
2447 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2448 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2449 *  is split into 3 fragments. As of now only single buffer mode is
2450 *  supported.
2451 *   Return Value:
2452 *  SUCCESS on success or an appropriate -ve value on failure.
2453 */
2454static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2455			   int from_card_up)
2456{
2457	struct sk_buff *skb;
2458	struct RxD_t *rxdp;
2459	int off, size, block_no, block_no1;
2460	u32 alloc_tab = 0;
2461	u32 alloc_cnt;
2462	u64 tmp;
2463	struct buffAdd *ba;
2464	struct RxD_t *first_rxdp = NULL;
2465	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2466	struct RxD1 *rxdp1;
2467	struct RxD3 *rxdp3;
2468	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2469
2470	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2471
2472	block_no1 = ring->rx_curr_get_info.block_index;
2473	while (alloc_tab < alloc_cnt) {
2474		block_no = ring->rx_curr_put_info.block_index;
2475
2476		off = ring->rx_curr_put_info.offset;
2477
2478		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2479
2480		if ((block_no == block_no1) &&
2481		    (off == ring->rx_curr_get_info.offset) &&
2482		    (rxdp->Host_Control)) {
2483			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2484				  ring->dev->name);
2485			goto end;
2486		}
2487		if (off && (off == ring->rxd_count)) {
2488			ring->rx_curr_put_info.block_index++;
2489			if (ring->rx_curr_put_info.block_index ==
2490			    ring->block_count)
2491				ring->rx_curr_put_info.block_index = 0;
2492			block_no = ring->rx_curr_put_info.block_index;
2493			off = 0;
2494			ring->rx_curr_put_info.offset = off;
2495			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2496			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2497				  ring->dev->name, rxdp);
2498
2499		}
2500
2501		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2502		    ((ring->rxd_mode == RXD_MODE_3B) &&
2503		     (rxdp->Control_2 & s2BIT(0)))) {
2504			ring->rx_curr_put_info.offset = off;
2505			goto end;
2506		}
2507		/* calculate size of skb based on ring mode */
2508		size = ring->mtu +
2509			HEADER_ETHERNET_II_802_3_SIZE +
2510			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2511		if (ring->rxd_mode == RXD_MODE_1)
2512			size += NET_IP_ALIGN;
2513		else
2514			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2515
2516		/* allocate skb */
2517		skb = netdev_alloc_skb(nic->dev, size);
2518		if (!skb) {
2519			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2520				  ring->dev->name);
2521			if (first_rxdp) {
2522				dma_wmb();
2523				first_rxdp->Control_1 |= RXD_OWN_XENA;
2524			}
2525			swstats->mem_alloc_fail_cnt++;
2526
2527			return -ENOMEM ;
2528		}
2529		swstats->mem_allocated += skb->truesize;
2530
2531		if (ring->rxd_mode == RXD_MODE_1) {
2532			/* 1 buffer mode - normal operation mode */
2533			rxdp1 = (struct RxD1 *)rxdp;
2534			memset(rxdp, 0, sizeof(struct RxD1));
2535			skb_reserve(skb, NET_IP_ALIGN);
2536			rxdp1->Buffer0_ptr =
2537				dma_map_single(&ring->pdev->dev, skb->data,
2538					       size - NET_IP_ALIGN,
2539					       DMA_FROM_DEVICE);
2540			if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2541				goto pci_map_failed;
2542
2543			rxdp->Control_2 =
2544				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2545			rxdp->Host_Control = (unsigned long)skb;
2546		} else if (ring->rxd_mode == RXD_MODE_3B) {
2547			/*
2548			 * 2 buffer mode -
2549			 * 2 buffer mode provides 128
2550			 * byte aligned receive buffers.
2551			 */
2552
2553			rxdp3 = (struct RxD3 *)rxdp;
2554			/* save buffer pointers to avoid frequent dma mapping */
2555			Buffer0_ptr = rxdp3->Buffer0_ptr;
2556			Buffer1_ptr = rxdp3->Buffer1_ptr;
2557			memset(rxdp, 0, sizeof(struct RxD3));
2558			/* restore the buffer pointers for dma sync*/
2559			rxdp3->Buffer0_ptr = Buffer0_ptr;
2560			rxdp3->Buffer1_ptr = Buffer1_ptr;
2561
2562			ba = &ring->ba[block_no][off];
2563			skb_reserve(skb, BUF0_LEN);
2564			tmp = (u64)(unsigned long)skb->data;
2565			tmp += ALIGN_SIZE;
2566			tmp &= ~ALIGN_SIZE;
2567			skb->data = (void *) (unsigned long)tmp;
2568			skb_reset_tail_pointer(skb);
2569
2570			if (from_card_up) {
2571				rxdp3->Buffer0_ptr =
2572					dma_map_single(&ring->pdev->dev,
2573						       ba->ba_0, BUF0_LEN,
2574						       DMA_FROM_DEVICE);
2575				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2576					goto pci_map_failed;
2577			} else
2578				dma_sync_single_for_device(&ring->pdev->dev,
2579							   (dma_addr_t)rxdp3->Buffer0_ptr,
2580							   BUF0_LEN,
2581							   DMA_FROM_DEVICE);
2582
2583			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2584			if (ring->rxd_mode == RXD_MODE_3B) {
2585				/* Two buffer mode */
2586
2587				/*
2588				 * Buffer2 will have L3/L4 header plus
2589				 * L4 payload
2590				 */
2591				rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2592								    skb->data,
2593								    ring->mtu + 4,
2594								    DMA_FROM_DEVICE);
2595
2596				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2597					goto pci_map_failed;
2598
2599				if (from_card_up) {
2600					rxdp3->Buffer1_ptr =
2601						dma_map_single(&ring->pdev->dev,
2602							       ba->ba_1,
2603							       BUF1_LEN,
2604							       DMA_FROM_DEVICE);
2605
2606					if (dma_mapping_error(&nic->pdev->dev,
2607							      rxdp3->Buffer1_ptr)) {
2608						dma_unmap_single(&ring->pdev->dev,
2609								 (dma_addr_t)(unsigned long)
2610								 skb->data,
2611								 ring->mtu + 4,
2612								 DMA_FROM_DEVICE);
2613						goto pci_map_failed;
2614					}
2615				}
2616				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2617				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2618					(ring->mtu + 4);
2619			}
2620			rxdp->Control_2 |= s2BIT(0);
2621			rxdp->Host_Control = (unsigned long) (skb);
2622		}
2623		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2624			rxdp->Control_1 |= RXD_OWN_XENA;
2625		off++;
2626		if (off == (ring->rxd_count + 1))
2627			off = 0;
2628		ring->rx_curr_put_info.offset = off;
2629
2630		rxdp->Control_2 |= SET_RXD_MARKER;
2631		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2632			if (first_rxdp) {
2633				dma_wmb();
2634				first_rxdp->Control_1 |= RXD_OWN_XENA;
2635			}
2636			first_rxdp = rxdp;
2637		}
2638		ring->rx_bufs_left += 1;
2639		alloc_tab++;
2640	}
2641
2642end:
2643	/* Transfer ownership of first descriptor to adapter just before
2644	 * exiting. Before that, use memory barrier so that ownership
2645	 * and other fields are seen by adapter correctly.
2646	 */
2647	if (first_rxdp) {
2648		dma_wmb();
2649		first_rxdp->Control_1 |= RXD_OWN_XENA;
2650	}
2651
2652	return SUCCESS;
2653
2654pci_map_failed:
2655	swstats->pci_map_fail_cnt++;
2656	swstats->mem_freed += skb->truesize;
2657	dev_kfree_skb_irq(skb);
2658	return -ENOMEM;
2659}
2660
2661static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2662{
2663	struct net_device *dev = sp->dev;
2664	int j;
2665	struct sk_buff *skb;
2666	struct RxD_t *rxdp;
2667	struct RxD1 *rxdp1;
2668	struct RxD3 *rxdp3;
2669	struct mac_info *mac_control = &sp->mac_control;
2670	struct stat_block *stats = mac_control->stats_info;
2671	struct swStat *swstats = &stats->sw_stat;
2672
2673	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2674		rxdp = mac_control->rings[ring_no].
2675			rx_blocks[blk].rxds[j].virt_addr;
2676		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2677		if (!skb)
2678			continue;
2679		if (sp->rxd_mode == RXD_MODE_1) {
2680			rxdp1 = (struct RxD1 *)rxdp;
2681			dma_unmap_single(&sp->pdev->dev,
2682					 (dma_addr_t)rxdp1->Buffer0_ptr,
2683					 dev->mtu +
2684					 HEADER_ETHERNET_II_802_3_SIZE +
2685					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2686					 DMA_FROM_DEVICE);
2687			memset(rxdp, 0, sizeof(struct RxD1));
2688		} else if (sp->rxd_mode == RXD_MODE_3B) {
2689			rxdp3 = (struct RxD3 *)rxdp;
2690			dma_unmap_single(&sp->pdev->dev,
2691					 (dma_addr_t)rxdp3->Buffer0_ptr,
2692					 BUF0_LEN, DMA_FROM_DEVICE);
2693			dma_unmap_single(&sp->pdev->dev,
2694					 (dma_addr_t)rxdp3->Buffer1_ptr,
2695					 BUF1_LEN, DMA_FROM_DEVICE);
2696			dma_unmap_single(&sp->pdev->dev,
2697					 (dma_addr_t)rxdp3->Buffer2_ptr,
2698					 dev->mtu + 4, DMA_FROM_DEVICE);
2699			memset(rxdp, 0, sizeof(struct RxD3));
2700		}
2701		swstats->mem_freed += skb->truesize;
2702		dev_kfree_skb(skb);
2703		mac_control->rings[ring_no].rx_bufs_left -= 1;
2704	}
2705}
2706
2707/**
2708 *  free_rx_buffers - Frees all Rx buffers
2709 *  @sp: device private variable.
2710 *  Description:
2711 *  This function will free all Rx buffers allocated by host.
2712 *  Return Value:
2713 *  NONE.
2714 */
2715
2716static void free_rx_buffers(struct s2io_nic *sp)
2717{
2718	struct net_device *dev = sp->dev;
2719	int i, blk = 0, buf_cnt = 0;
2720	struct config_param *config = &sp->config;
2721	struct mac_info *mac_control = &sp->mac_control;
2722
2723	for (i = 0; i < config->rx_ring_num; i++) {
2724		struct ring_info *ring = &mac_control->rings[i];
2725
2726		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2727			free_rxd_blk(sp, i, blk);
2728
2729		ring->rx_curr_put_info.block_index = 0;
2730		ring->rx_curr_get_info.block_index = 0;
2731		ring->rx_curr_put_info.offset = 0;
2732		ring->rx_curr_get_info.offset = 0;
2733		ring->rx_bufs_left = 0;
2734		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2735			  dev->name, buf_cnt, i);
2736	}
2737}
2738
2739static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2740{
2741	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2742		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2743			  ring->dev->name);
2744	}
2745	return 0;
2746}
2747
2748/**
2749 * s2io_poll_msix - Rx interrupt handler for NAPI support
2750 * @napi : pointer to the napi structure.
2751 * @budget : The number of packets that were budgeted to be processed
2752 * during  one pass through the 'Poll" function.
2753 * Description:
2754 * Comes into picture only if NAPI support has been incorporated. It does
2755 * the same thing that rx_intr_handler does, but not in a interrupt context
2756 * also It will process only a given number of packets.
2757 * Return value:
2758 * 0 on success and 1 if there are No Rx packets to be processed.
2759 */
2760
2761static int s2io_poll_msix(struct napi_struct *napi, int budget)
2762{
2763	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2764	struct net_device *dev = ring->dev;
2765	int pkts_processed = 0;
2766	u8 __iomem *addr = NULL;
2767	u8 val8 = 0;
2768	struct s2io_nic *nic = netdev_priv(dev);
2769	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2770	int budget_org = budget;
2771
2772	if (unlikely(!is_s2io_card_up(nic)))
2773		return 0;
2774
2775	pkts_processed = rx_intr_handler(ring, budget);
2776	s2io_chk_rx_buffers(nic, ring);
2777
2778	if (pkts_processed < budget_org) {
2779		napi_complete_done(napi, pkts_processed);
2780		/*Re Enable MSI-Rx Vector*/
2781		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2782		addr += 7 - ring->ring_no;
2783		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2784		writeb(val8, addr);
2785		val8 = readb(addr);
2786	}
2787	return pkts_processed;
2788}
2789
2790static int s2io_poll_inta(struct napi_struct *napi, int budget)
2791{
2792	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2793	int pkts_processed = 0;
2794	int ring_pkts_processed, i;
2795	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2796	int budget_org = budget;
2797	struct config_param *config = &nic->config;
2798	struct mac_info *mac_control = &nic->mac_control;
2799
2800	if (unlikely(!is_s2io_card_up(nic)))
2801		return 0;
2802
2803	for (i = 0; i < config->rx_ring_num; i++) {
2804		struct ring_info *ring = &mac_control->rings[i];
2805		ring_pkts_processed = rx_intr_handler(ring, budget);
2806		s2io_chk_rx_buffers(nic, ring);
2807		pkts_processed += ring_pkts_processed;
2808		budget -= ring_pkts_processed;
2809		if (budget <= 0)
2810			break;
2811	}
2812	if (pkts_processed < budget_org) {
2813		napi_complete_done(napi, pkts_processed);
2814		/* Re enable the Rx interrupts for the ring */
2815		writeq(0, &bar0->rx_traffic_mask);
2816		readl(&bar0->rx_traffic_mask);
2817	}
2818	return pkts_processed;
2819}
2820
2821#ifdef CONFIG_NET_POLL_CONTROLLER
2822/**
2823 * s2io_netpoll - netpoll event handler entry point
2824 * @dev : pointer to the device structure.
2825 * Description:
2826 * 	This function will be called by upper layer to check for events on the
2827 * interface in situations where interrupts are disabled. It is used for
2828 * specific in-kernel networking tasks, such as remote consoles and kernel
2829 * debugging over the network (example netdump in RedHat).
2830 */
2831static void s2io_netpoll(struct net_device *dev)
2832{
2833	struct s2io_nic *nic = netdev_priv(dev);
2834	const int irq = nic->pdev->irq;
2835	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2836	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2837	int i;
2838	struct config_param *config = &nic->config;
2839	struct mac_info *mac_control = &nic->mac_control;
2840
2841	if (pci_channel_offline(nic->pdev))
2842		return;
2843
2844	disable_irq(irq);
2845
2846	writeq(val64, &bar0->rx_traffic_int);
2847	writeq(val64, &bar0->tx_traffic_int);
2848
2849	/* we need to free up the transmitted skbufs or else netpoll will
2850	 * run out of skbs and will fail and eventually netpoll application such
2851	 * as netdump will fail.
2852	 */
2853	for (i = 0; i < config->tx_fifo_num; i++)
2854		tx_intr_handler(&mac_control->fifos[i]);
2855
2856	/* check for received packet and indicate up to network */
2857	for (i = 0; i < config->rx_ring_num; i++) {
2858		struct ring_info *ring = &mac_control->rings[i];
2859
2860		rx_intr_handler(ring, 0);
2861	}
2862
2863	for (i = 0; i < config->rx_ring_num; i++) {
2864		struct ring_info *ring = &mac_control->rings[i];
2865
2866		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2867			DBG_PRINT(INFO_DBG,
2868				  "%s: Out of memory in Rx Netpoll!!\n",
2869				  dev->name);
2870			break;
2871		}
2872	}
2873	enable_irq(irq);
2874}
2875#endif
2876
2877/**
2878 *  rx_intr_handler - Rx interrupt handler
2879 *  @ring_data: per ring structure.
2880 *  @budget: budget for napi processing.
2881 *  Description:
2882 *  If the interrupt is because of a received frame or if the
2883 *  receive ring contains fresh as yet un-processed frames,this function is
2884 *  called. It picks out the RxD at which place the last Rx processing had
2885 *  stopped and sends the skb to the OSM's Rx handler and then increments
2886 *  the offset.
2887 *  Return Value:
2888 *  No. of napi packets processed.
2889 */
2890static int rx_intr_handler(struct ring_info *ring_data, int budget)
2891{
2892	int get_block, put_block;
2893	struct rx_curr_get_info get_info, put_info;
2894	struct RxD_t *rxdp;
2895	struct sk_buff *skb;
2896	int pkt_cnt = 0, napi_pkts = 0;
2897	int i;
2898	struct RxD1 *rxdp1;
2899	struct RxD3 *rxdp3;
2900
2901	if (budget <= 0)
2902		return napi_pkts;
2903
2904	get_info = ring_data->rx_curr_get_info;
2905	get_block = get_info.block_index;
2906	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2907	put_block = put_info.block_index;
2908	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2909
2910	while (RXD_IS_UP2DT(rxdp)) {
2911		/*
2912		 * If your are next to put index then it's
2913		 * FIFO full condition
2914		 */
2915		if ((get_block == put_block) &&
2916		    (get_info.offset + 1) == put_info.offset) {
2917			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2918				  ring_data->dev->name);
2919			break;
2920		}
2921		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2922		if (skb == NULL) {
2923			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2924				  ring_data->dev->name);
2925			return 0;
2926		}
2927		if (ring_data->rxd_mode == RXD_MODE_1) {
2928			rxdp1 = (struct RxD1 *)rxdp;
2929			dma_unmap_single(&ring_data->pdev->dev,
2930					 (dma_addr_t)rxdp1->Buffer0_ptr,
2931					 ring_data->mtu +
2932					 HEADER_ETHERNET_II_802_3_SIZE +
2933					 HEADER_802_2_SIZE +
2934					 HEADER_SNAP_SIZE,
2935					 DMA_FROM_DEVICE);
2936		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2937			rxdp3 = (struct RxD3 *)rxdp;
2938			dma_sync_single_for_cpu(&ring_data->pdev->dev,
2939						(dma_addr_t)rxdp3->Buffer0_ptr,
2940						BUF0_LEN, DMA_FROM_DEVICE);
2941			dma_unmap_single(&ring_data->pdev->dev,
2942					 (dma_addr_t)rxdp3->Buffer2_ptr,
2943					 ring_data->mtu + 4, DMA_FROM_DEVICE);
2944		}
2945		prefetch(skb->data);
2946		rx_osm_handler(ring_data, rxdp);
2947		get_info.offset++;
2948		ring_data->rx_curr_get_info.offset = get_info.offset;
2949		rxdp = ring_data->rx_blocks[get_block].
2950			rxds[get_info.offset].virt_addr;
2951		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2952			get_info.offset = 0;
2953			ring_data->rx_curr_get_info.offset = get_info.offset;
2954			get_block++;
2955			if (get_block == ring_data->block_count)
2956				get_block = 0;
2957			ring_data->rx_curr_get_info.block_index = get_block;
2958			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2959		}
2960
2961		if (ring_data->nic->config.napi) {
2962			budget--;
2963			napi_pkts++;
2964			if (!budget)
2965				break;
2966		}
2967		pkt_cnt++;
2968		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2969			break;
2970	}
2971	if (ring_data->lro) {
2972		/* Clear all LRO sessions before exiting */
2973		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2974			struct lro *lro = &ring_data->lro0_n[i];
2975			if (lro->in_use) {
2976				update_L3L4_header(ring_data->nic, lro);
2977				queue_rx_frame(lro->parent, lro->vlan_tag);
2978				clear_lro_session(lro);
2979			}
2980		}
2981	}
2982	return napi_pkts;
2983}
2984
2985/**
2986 *  tx_intr_handler - Transmit interrupt handler
2987 *  @fifo_data : fifo data pointer
2988 *  Description:
2989 *  If an interrupt was raised to indicate DMA complete of the
2990 *  Tx packet, this function is called. It identifies the last TxD
2991 *  whose buffer was freed and frees all skbs whose data have already
2992 *  DMA'ed into the NICs internal memory.
2993 *  Return Value:
2994 *  NONE
2995 */
2996
2997static void tx_intr_handler(struct fifo_info *fifo_data)
2998{
2999	struct s2io_nic *nic = fifo_data->nic;
3000	struct tx_curr_get_info get_info, put_info;
3001	struct sk_buff *skb = NULL;
3002	struct TxD *txdlp;
3003	int pkt_cnt = 0;
3004	unsigned long flags = 0;
3005	u8 err_mask;
3006	struct stat_block *stats = nic->mac_control.stats_info;
3007	struct swStat *swstats = &stats->sw_stat;
3008
3009	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3010		return;
3011
3012	get_info = fifo_data->tx_curr_get_info;
3013	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3014	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3015	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3016	       (get_info.offset != put_info.offset) &&
3017	       (txdlp->Host_Control)) {
3018		/* Check for TxD errors */
3019		if (txdlp->Control_1 & TXD_T_CODE) {
3020			unsigned long long err;
3021			err = txdlp->Control_1 & TXD_T_CODE;
3022			if (err & 0x1) {
3023				swstats->parity_err_cnt++;
3024			}
3025
3026			/* update t_code statistics */
3027			err_mask = err >> 48;
3028			switch (err_mask) {
3029			case 2:
3030				swstats->tx_buf_abort_cnt++;
3031				break;
3032
3033			case 3:
3034				swstats->tx_desc_abort_cnt++;
3035				break;
3036
3037			case 7:
3038				swstats->tx_parity_err_cnt++;
3039				break;
3040
3041			case 10:
3042				swstats->tx_link_loss_cnt++;
3043				break;
3044
3045			case 15:
3046				swstats->tx_list_proc_err_cnt++;
3047				break;
3048			}
3049		}
3050
3051		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3052		if (skb == NULL) {
3053			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3054			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3055				  __func__);
3056			return;
3057		}
3058		pkt_cnt++;
3059
3060		/* Updating the statistics block */
3061		swstats->mem_freed += skb->truesize;
3062		dev_consume_skb_irq(skb);
3063
3064		get_info.offset++;
3065		if (get_info.offset == get_info.fifo_len + 1)
3066			get_info.offset = 0;
3067		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3068		fifo_data->tx_curr_get_info.offset = get_info.offset;
3069	}
3070
3071	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3072
3073	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3074}
3075
3076/**
3077 *  s2io_mdio_write - Function to write in to MDIO registers
3078 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3079 *  @addr     : address value
3080 *  @value    : data value
3081 *  @dev      : pointer to net_device structure
3082 *  Description:
3083 *  This function is used to write values to the MDIO registers
3084 *  NONE
3085 */
3086static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3087			    struct net_device *dev)
3088{
3089	u64 val64;
3090	struct s2io_nic *sp = netdev_priv(dev);
3091	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3092
3093	/* address transaction */
3094	val64 = MDIO_MMD_INDX_ADDR(addr) |
3095		MDIO_MMD_DEV_ADDR(mmd_type) |
3096		MDIO_MMS_PRT_ADDR(0x0);
3097	writeq(val64, &bar0->mdio_control);
3098	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3099	writeq(val64, &bar0->mdio_control);
3100	udelay(100);
3101
3102	/* Data transaction */
3103	val64 = MDIO_MMD_INDX_ADDR(addr) |
3104		MDIO_MMD_DEV_ADDR(mmd_type) |
3105		MDIO_MMS_PRT_ADDR(0x0) |
3106		MDIO_MDIO_DATA(value) |
3107		MDIO_OP(MDIO_OP_WRITE_TRANS);
3108	writeq(val64, &bar0->mdio_control);
3109	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3110	writeq(val64, &bar0->mdio_control);
3111	udelay(100);
3112
3113	val64 = MDIO_MMD_INDX_ADDR(addr) |
3114		MDIO_MMD_DEV_ADDR(mmd_type) |
3115		MDIO_MMS_PRT_ADDR(0x0) |
3116		MDIO_OP(MDIO_OP_READ_TRANS);
3117	writeq(val64, &bar0->mdio_control);
3118	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3119	writeq(val64, &bar0->mdio_control);
3120	udelay(100);
3121}
3122
3123/**
3124 *  s2io_mdio_read - Function to write in to MDIO registers
3125 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3126 *  @addr     : address value
3127 *  @dev      : pointer to net_device structure
3128 *  Description:
3129 *  This function is used to read values to the MDIO registers
3130 *  NONE
3131 */
3132static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3133{
3134	u64 val64 = 0x0;
3135	u64 rval64 = 0x0;
3136	struct s2io_nic *sp = netdev_priv(dev);
3137	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3138
3139	/* address transaction */
3140	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3141			 | MDIO_MMD_DEV_ADDR(mmd_type)
3142			 | MDIO_MMS_PRT_ADDR(0x0));
3143	writeq(val64, &bar0->mdio_control);
3144	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3145	writeq(val64, &bar0->mdio_control);
3146	udelay(100);
3147
3148	/* Data transaction */
3149	val64 = MDIO_MMD_INDX_ADDR(addr) |
3150		MDIO_MMD_DEV_ADDR(mmd_type) |
3151		MDIO_MMS_PRT_ADDR(0x0) |
3152		MDIO_OP(MDIO_OP_READ_TRANS);
3153	writeq(val64, &bar0->mdio_control);
3154	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3155	writeq(val64, &bar0->mdio_control);
3156	udelay(100);
3157
3158	/* Read the value from regs */
3159	rval64 = readq(&bar0->mdio_control);
3160	rval64 = rval64 & 0xFFFF0000;
3161	rval64 = rval64 >> 16;
3162	return rval64;
3163}
3164
3165/**
3166 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3167 *  @counter      : counter value to be updated
3168 *  @regs_stat    : registers status
3169 *  @index        : index
3170 *  @flag         : flag to indicate the status
3171 *  @type         : counter type
3172 *  Description:
3173 *  This function is to check the status of the xpak counters value
3174 *  NONE
3175 */
3176
3177static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3178				  u16 flag, u16 type)
3179{
3180	u64 mask = 0x3;
3181	u64 val64;
3182	int i;
3183	for (i = 0; i < index; i++)
3184		mask = mask << 0x2;
3185
3186	if (flag > 0) {
3187		*counter = *counter + 1;
3188		val64 = *regs_stat & mask;
3189		val64 = val64 >> (index * 0x2);
3190		val64 = val64 + 1;
3191		if (val64 == 3) {
3192			switch (type) {
3193			case 1:
3194				DBG_PRINT(ERR_DBG,
3195					  "Take Xframe NIC out of service.\n");
3196				DBG_PRINT(ERR_DBG,
3197"Excessive temperatures may result in premature transceiver failure.\n");
3198				break;
3199			case 2:
3200				DBG_PRINT(ERR_DBG,
3201					  "Take Xframe NIC out of service.\n");
3202				DBG_PRINT(ERR_DBG,
3203"Excessive bias currents may indicate imminent laser diode failure.\n");
3204				break;
3205			case 3:
3206				DBG_PRINT(ERR_DBG,
3207					  "Take Xframe NIC out of service.\n");
3208				DBG_PRINT(ERR_DBG,
3209"Excessive laser output power may saturate far-end receiver.\n");
3210				break;
3211			default:
3212				DBG_PRINT(ERR_DBG,
3213					  "Incorrect XPAK Alarm type\n");
3214			}
3215			val64 = 0x0;
3216		}
3217		val64 = val64 << (index * 0x2);
3218		*regs_stat = (*regs_stat & (~mask)) | (val64);
3219
3220	} else {
3221		*regs_stat = *regs_stat & (~mask);
3222	}
3223}
3224
3225/**
3226 *  s2io_updt_xpak_counter - Function to update the xpak counters
3227 *  @dev         : pointer to net_device struct
3228 *  Description:
3229 *  This function is to upate the status of the xpak counters value
3230 *  NONE
3231 */
3232static void s2io_updt_xpak_counter(struct net_device *dev)
3233{
3234	u16 flag  = 0x0;
3235	u16 type  = 0x0;
3236	u16 val16 = 0x0;
3237	u64 val64 = 0x0;
3238	u64 addr  = 0x0;
3239
3240	struct s2io_nic *sp = netdev_priv(dev);
3241	struct stat_block *stats = sp->mac_control.stats_info;
3242	struct xpakStat *xstats = &stats->xpak_stat;
3243
3244	/* Check the communication with the MDIO slave */
3245	addr = MDIO_CTRL1;
3246	val64 = 0x0;
3247	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3248	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3249		DBG_PRINT(ERR_DBG,
3250			  "ERR: MDIO slave access failed - Returned %llx\n",
3251			  (unsigned long long)val64);
3252		return;
3253	}
3254
3255	/* Check for the expected value of control reg 1 */
3256	if (val64 != MDIO_CTRL1_SPEED10G) {
3257		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3258			  "Returned: %llx- Expected: 0x%x\n",
3259			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3260		return;
3261	}
3262
3263	/* Loading the DOM register to MDIO register */
3264	addr = 0xA100;
3265	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3266	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3267
3268	/* Reading the Alarm flags */
3269	addr = 0xA070;
3270	val64 = 0x0;
3271	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3272
3273	flag = CHECKBIT(val64, 0x7);
3274	type = 1;
3275	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3276			      &xstats->xpak_regs_stat,
3277			      0x0, flag, type);
3278
3279	if (CHECKBIT(val64, 0x6))
3280		xstats->alarm_transceiver_temp_low++;
3281
3282	flag = CHECKBIT(val64, 0x3);
3283	type = 2;
3284	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3285			      &xstats->xpak_regs_stat,
3286			      0x2, flag, type);
3287
3288	if (CHECKBIT(val64, 0x2))
3289		xstats->alarm_laser_bias_current_low++;
3290
3291	flag = CHECKBIT(val64, 0x1);
3292	type = 3;
3293	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3294			      &xstats->xpak_regs_stat,
3295			      0x4, flag, type);
3296
3297	if (CHECKBIT(val64, 0x0))
3298		xstats->alarm_laser_output_power_low++;
3299
3300	/* Reading the Warning flags */
3301	addr = 0xA074;
3302	val64 = 0x0;
3303	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3304
3305	if (CHECKBIT(val64, 0x7))
3306		xstats->warn_transceiver_temp_high++;
3307
3308	if (CHECKBIT(val64, 0x6))
3309		xstats->warn_transceiver_temp_low++;
3310
3311	if (CHECKBIT(val64, 0x3))
3312		xstats->warn_laser_bias_current_high++;
3313
3314	if (CHECKBIT(val64, 0x2))
3315		xstats->warn_laser_bias_current_low++;
3316
3317	if (CHECKBIT(val64, 0x1))
3318		xstats->warn_laser_output_power_high++;
3319
3320	if (CHECKBIT(val64, 0x0))
3321		xstats->warn_laser_output_power_low++;
3322}
3323
3324/**
3325 *  wait_for_cmd_complete - waits for a command to complete.
3326 *  @addr: address
3327 *  @busy_bit: bit to check for busy
3328 *  @bit_state: state to check
3329 *  @may_sleep: parameter indicates if sleeping when waiting for
3330 *  command complete
3331 *  Description: Function that waits for a command to Write into RMAC
3332 *  ADDR DATA registers to be completed and returns either success or
3333 *  error depending on whether the command was complete or not.
3334 *  Return value:
3335 *   SUCCESS on success and FAILURE on failure.
3336 */
3337
3338static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3339				 int bit_state, bool may_sleep)
3340{
3341	int ret = FAILURE, cnt = 0, delay = 1;
3342	u64 val64;
3343
3344	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3345		return FAILURE;
3346
3347	do {
3348		val64 = readq(addr);
3349		if (bit_state == S2IO_BIT_RESET) {
3350			if (!(val64 & busy_bit)) {
3351				ret = SUCCESS;
3352				break;
3353			}
3354		} else {
3355			if (val64 & busy_bit) {
3356				ret = SUCCESS;
3357				break;
3358			}
3359		}
3360
3361		if (!may_sleep)
3362			mdelay(delay);
3363		else
3364			msleep(delay);
3365
3366		if (++cnt >= 10)
3367			delay = 50;
3368	} while (cnt < 20);
3369	return ret;
3370}
3371/**
3372 * check_pci_device_id - Checks if the device id is supported
3373 * @id : device id
3374 * Description: Function to check if the pci device id is supported by driver.
3375 * Return value: Actual device id if supported else PCI_ANY_ID
3376 */
3377static u16 check_pci_device_id(u16 id)
3378{
3379	switch (id) {
3380	case PCI_DEVICE_ID_HERC_WIN:
3381	case PCI_DEVICE_ID_HERC_UNI:
3382		return XFRAME_II_DEVICE;
3383	case PCI_DEVICE_ID_S2IO_UNI:
3384	case PCI_DEVICE_ID_S2IO_WIN:
3385		return XFRAME_I_DEVICE;
3386	default:
3387		return PCI_ANY_ID;
3388	}
3389}
3390
3391/**
3392 *  s2io_reset - Resets the card.
3393 *  @sp : private member of the device structure.
3394 *  Description: Function to Reset the card. This function then also
3395 *  restores the previously saved PCI configuration space registers as
3396 *  the card reset also resets the configuration space.
3397 *  Return value:
3398 *  void.
3399 */
3400
3401static void s2io_reset(struct s2io_nic *sp)
3402{
3403	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3404	u64 val64;
3405	u16 subid, pci_cmd;
3406	int i;
3407	u16 val16;
3408	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3409	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3410	struct stat_block *stats;
3411	struct swStat *swstats;
3412
3413	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3414		  __func__, pci_name(sp->pdev));
3415
3416	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3417	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3418
3419	val64 = SW_RESET_ALL;
3420	writeq(val64, &bar0->sw_reset);
3421	if (strstr(sp->product_name, "CX4"))
3422		msleep(750);
3423	msleep(250);
3424	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3425
3426		/* Restore the PCI state saved during initialization. */
3427		pci_restore_state(sp->pdev);
3428		pci_save_state(sp->pdev);
3429		pci_read_config_word(sp->pdev, 0x2, &val16);
3430		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3431			break;
3432		msleep(200);
3433	}
3434
3435	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3436		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3437
3438	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3439
3440	s2io_init_pci(sp);
3441
3442	/* Set swapper to enable I/O register access */
3443	s2io_set_swapper(sp);
3444
3445	/* restore mac_addr entries */
3446	do_s2io_restore_unicast_mc(sp);
3447
3448	/* Restore the MSIX table entries from local variables */
3449	restore_xmsi_data(sp);
3450
3451	/* Clear certain PCI/PCI-X fields after reset */
3452	if (sp->device_type == XFRAME_II_DEVICE) {
3453		/* Clear "detected parity error" bit */
3454		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3455
3456		/* Clearing PCIX Ecc status register */
3457		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3458
3459		/* Clearing PCI_STATUS error reflected here */
3460		writeq(s2BIT(62), &bar0->txpic_int_reg);
3461	}
3462
3463	/* Reset device statistics maintained by OS */
3464	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3465
3466	stats = sp->mac_control.stats_info;
3467	swstats = &stats->sw_stat;
3468
3469	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3470	up_cnt = swstats->link_up_cnt;
3471	down_cnt = swstats->link_down_cnt;
3472	up_time = swstats->link_up_time;
3473	down_time = swstats->link_down_time;
3474	reset_cnt = swstats->soft_reset_cnt;
3475	mem_alloc_cnt = swstats->mem_allocated;
3476	mem_free_cnt = swstats->mem_freed;
3477	watchdog_cnt = swstats->watchdog_timer_cnt;
3478
3479	memset(stats, 0, sizeof(struct stat_block));
3480
3481	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3482	swstats->link_up_cnt = up_cnt;
3483	swstats->link_down_cnt = down_cnt;
3484	swstats->link_up_time = up_time;
3485	swstats->link_down_time = down_time;
3486	swstats->soft_reset_cnt = reset_cnt;
3487	swstats->mem_allocated = mem_alloc_cnt;
3488	swstats->mem_freed = mem_free_cnt;
3489	swstats->watchdog_timer_cnt = watchdog_cnt;
3490
3491	/* SXE-002: Configure link and activity LED to turn it off */
3492	subid = sp->pdev->subsystem_device;
3493	if (((subid & 0xFF) >= 0x07) &&
3494	    (sp->device_type == XFRAME_I_DEVICE)) {
3495		val64 = readq(&bar0->gpio_control);
3496		val64 |= 0x0000800000000000ULL;
3497		writeq(val64, &bar0->gpio_control);
3498		val64 = 0x0411040400000000ULL;
3499		writeq(val64, (void __iomem *)bar0 + 0x2700);
3500	}
3501
3502	/*
3503	 * Clear spurious ECC interrupts that would have occurred on
3504	 * XFRAME II cards after reset.
3505	 */
3506	if (sp->device_type == XFRAME_II_DEVICE) {
3507		val64 = readq(&bar0->pcc_err_reg);
3508		writeq(val64, &bar0->pcc_err_reg);
3509	}
3510
3511	sp->device_enabled_once = false;
3512}
3513
3514/**
3515 *  s2io_set_swapper - to set the swapper controle on the card
3516 *  @sp : private member of the device structure,
3517 *  pointer to the s2io_nic structure.
3518 *  Description: Function to set the swapper control on the card
3519 *  correctly depending on the 'endianness' of the system.
3520 *  Return value:
3521 *  SUCCESS on success and FAILURE on failure.
3522 */
3523
3524static int s2io_set_swapper(struct s2io_nic *sp)
3525{
3526	struct net_device *dev = sp->dev;
3527	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3528	u64 val64, valt, valr;
3529
3530	/*
3531	 * Set proper endian settings and verify the same by reading
3532	 * the PIF Feed-back register.
3533	 */
3534
3535	val64 = readq(&bar0->pif_rd_swapper_fb);
3536	if (val64 != 0x0123456789ABCDEFULL) {
3537		int i = 0;
3538		static const u64 value[] = {
3539			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3540			0x8100008181000081ULL,	/* FE=1, SE=0 */
3541			0x4200004242000042ULL,	/* FE=0, SE=1 */
3542			0			/* FE=0, SE=0 */
3543		};
3544
3545		while (i < 4) {
3546			writeq(value[i], &bar0->swapper_ctrl);
3547			val64 = readq(&bar0->pif_rd_swapper_fb);
3548			if (val64 == 0x0123456789ABCDEFULL)
3549				break;
3550			i++;
3551		}
3552		if (i == 4) {
3553			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3554				  "feedback read %llx\n",
3555				  dev->name, (unsigned long long)val64);
3556			return FAILURE;
3557		}
3558		valr = value[i];
3559	} else {
3560		valr = readq(&bar0->swapper_ctrl);
3561	}
3562
3563	valt = 0x0123456789ABCDEFULL;
3564	writeq(valt, &bar0->xmsi_address);
3565	val64 = readq(&bar0->xmsi_address);
3566
3567	if (val64 != valt) {
3568		int i = 0;
3569		static const u64 value[] = {
3570			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3571			0x0081810000818100ULL,	/* FE=1, SE=0 */
3572			0x0042420000424200ULL,	/* FE=0, SE=1 */
3573			0			/* FE=0, SE=0 */
3574		};
3575
3576		while (i < 4) {
3577			writeq((value[i] | valr), &bar0->swapper_ctrl);
3578			writeq(valt, &bar0->xmsi_address);
3579			val64 = readq(&bar0->xmsi_address);
3580			if (val64 == valt)
3581				break;
3582			i++;
3583		}
3584		if (i == 4) {
3585			unsigned long long x = val64;
3586			DBG_PRINT(ERR_DBG,
3587				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3588			return FAILURE;
3589		}
3590	}
3591	val64 = readq(&bar0->swapper_ctrl);
3592	val64 &= 0xFFFF000000000000ULL;
3593
3594#ifdef __BIG_ENDIAN
3595	/*
3596	 * The device by default set to a big endian format, so a
3597	 * big endian driver need not set anything.
3598	 */
3599	val64 |= (SWAPPER_CTRL_TXP_FE |
3600		  SWAPPER_CTRL_TXP_SE |
3601		  SWAPPER_CTRL_TXD_R_FE |
3602		  SWAPPER_CTRL_TXD_W_FE |
3603		  SWAPPER_CTRL_TXF_R_FE |
3604		  SWAPPER_CTRL_RXD_R_FE |
3605		  SWAPPER_CTRL_RXD_W_FE |
3606		  SWAPPER_CTRL_RXF_W_FE |
3607		  SWAPPER_CTRL_XMSI_FE |
3608		  SWAPPER_CTRL_STATS_FE |
3609		  SWAPPER_CTRL_STATS_SE);
3610	if (sp->config.intr_type == INTA)
3611		val64 |= SWAPPER_CTRL_XMSI_SE;
3612	writeq(val64, &bar0->swapper_ctrl);
3613#else
3614	/*
3615	 * Initially we enable all bits to make it accessible by the
3616	 * driver, then we selectively enable only those bits that
3617	 * we want to set.
3618	 */
3619	val64 |= (SWAPPER_CTRL_TXP_FE |
3620		  SWAPPER_CTRL_TXP_SE |
3621		  SWAPPER_CTRL_TXD_R_FE |
3622		  SWAPPER_CTRL_TXD_R_SE |
3623		  SWAPPER_CTRL_TXD_W_FE |
3624		  SWAPPER_CTRL_TXD_W_SE |
3625		  SWAPPER_CTRL_TXF_R_FE |
3626		  SWAPPER_CTRL_RXD_R_FE |
3627		  SWAPPER_CTRL_RXD_R_SE |
3628		  SWAPPER_CTRL_RXD_W_FE |
3629		  SWAPPER_CTRL_RXD_W_SE |
3630		  SWAPPER_CTRL_RXF_W_FE |
3631		  SWAPPER_CTRL_XMSI_FE |
3632		  SWAPPER_CTRL_STATS_FE |
3633		  SWAPPER_CTRL_STATS_SE);
3634	if (sp->config.intr_type == INTA)
3635		val64 |= SWAPPER_CTRL_XMSI_SE;
3636	writeq(val64, &bar0->swapper_ctrl);
3637#endif
3638	val64 = readq(&bar0->swapper_ctrl);
3639
3640	/*
3641	 * Verifying if endian settings are accurate by reading a
3642	 * feedback register.
3643	 */
3644	val64 = readq(&bar0->pif_rd_swapper_fb);
3645	if (val64 != 0x0123456789ABCDEFULL) {
3646		/* Endian settings are incorrect, calls for another dekko. */
3647		DBG_PRINT(ERR_DBG,
3648			  "%s: Endian settings are wrong, feedback read %llx\n",
3649			  dev->name, (unsigned long long)val64);
3650		return FAILURE;
3651	}
3652
3653	return SUCCESS;
3654}
3655
3656static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3657{
3658	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3659	u64 val64;
3660	int ret = 0, cnt = 0;
3661
3662	do {
3663		val64 = readq(&bar0->xmsi_access);
3664		if (!(val64 & s2BIT(15)))
3665			break;
3666		mdelay(1);
3667		cnt++;
3668	} while (cnt < 5);
3669	if (cnt == 5) {
3670		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3671		ret = 1;
3672	}
3673
3674	return ret;
3675}
3676
3677static void restore_xmsi_data(struct s2io_nic *nic)
3678{
3679	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3680	u64 val64;
3681	int i, msix_index;
3682
3683	if (nic->device_type == XFRAME_I_DEVICE)
3684		return;
3685
3686	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3687		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3688		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3689		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3690		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3691		writeq(val64, &bar0->xmsi_access);
3692		if (wait_for_msix_trans(nic, msix_index))
3693			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3694				  __func__, msix_index);
3695	}
3696}
3697
3698static void store_xmsi_data(struct s2io_nic *nic)
3699{
3700	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3701	u64 val64, addr, data;
3702	int i, msix_index;
3703
3704	if (nic->device_type == XFRAME_I_DEVICE)
3705		return;
3706
3707	/* Store and display */
3708	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3709		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3710		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3711		writeq(val64, &bar0->xmsi_access);
3712		if (wait_for_msix_trans(nic, msix_index)) {
3713			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3714				  __func__, msix_index);
3715			continue;
3716		}
3717		addr = readq(&bar0->xmsi_address);
3718		data = readq(&bar0->xmsi_data);
3719		if (addr && data) {
3720			nic->msix_info[i].addr = addr;
3721			nic->msix_info[i].data = data;
3722		}
3723	}
3724}
3725
3726static int s2io_enable_msi_x(struct s2io_nic *nic)
3727{
3728	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3729	u64 rx_mat;
3730	u16 msi_control; /* Temp variable */
3731	int ret, i, j, msix_indx = 1;
3732	int size;
3733	struct stat_block *stats = nic->mac_control.stats_info;
3734	struct swStat *swstats = &stats->sw_stat;
3735
3736	size = nic->num_entries * sizeof(struct msix_entry);
3737	nic->entries = kzalloc(size, GFP_KERNEL);
3738	if (!nic->entries) {
3739		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3740			  __func__);
3741		swstats->mem_alloc_fail_cnt++;
3742		return -ENOMEM;
3743	}
3744	swstats->mem_allocated += size;
3745
3746	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3747	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3748	if (!nic->s2io_entries) {
3749		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3750			  __func__);
3751		swstats->mem_alloc_fail_cnt++;
3752		kfree(nic->entries);
3753		swstats->mem_freed
3754			+= (nic->num_entries * sizeof(struct msix_entry));
3755		return -ENOMEM;
3756	}
3757	swstats->mem_allocated += size;
3758
3759	nic->entries[0].entry = 0;
3760	nic->s2io_entries[0].entry = 0;
3761	nic->s2io_entries[0].in_use = MSIX_FLG;
3762	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3763	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3764
3765	for (i = 1; i < nic->num_entries; i++) {
3766		nic->entries[i].entry = ((i - 1) * 8) + 1;
3767		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3768		nic->s2io_entries[i].arg = NULL;
3769		nic->s2io_entries[i].in_use = 0;
3770	}
3771
3772	rx_mat = readq(&bar0->rx_mat);
3773	for (j = 0; j < nic->config.rx_ring_num; j++) {
3774		rx_mat |= RX_MAT_SET(j, msix_indx);
3775		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3776		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3777		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3778		msix_indx += 8;
3779	}
3780	writeq(rx_mat, &bar0->rx_mat);
3781	readq(&bar0->rx_mat);
3782
3783	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3784				    nic->num_entries, nic->num_entries);
3785	/* We fail init if error or we get less vectors than min required */
3786	if (ret < 0) {
3787		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3788		kfree(nic->entries);
3789		swstats->mem_freed += nic->num_entries *
3790			sizeof(struct msix_entry);
3791		kfree(nic->s2io_entries);
3792		swstats->mem_freed += nic->num_entries *
3793			sizeof(struct s2io_msix_entry);
3794		nic->entries = NULL;
3795		nic->s2io_entries = NULL;
3796		return -ENOMEM;
3797	}
3798
3799	/*
3800	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3801	 * in the herc NIC. (Temp change, needs to be removed later)
3802	 */
3803	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3804	msi_control |= 0x1; /* Enable MSI */
3805	pci_write_config_word(nic->pdev, 0x42, msi_control);
3806
3807	return 0;
3808}
3809
3810/* Handle software interrupt used during MSI(X) test */
3811static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3812{
3813	struct s2io_nic *sp = dev_id;
3814
3815	sp->msi_detected = 1;
3816	wake_up(&sp->msi_wait);
3817
3818	return IRQ_HANDLED;
3819}
3820
3821/* Test interrupt path by forcing a software IRQ */
3822static int s2io_test_msi(struct s2io_nic *sp)
3823{
3824	struct pci_dev *pdev = sp->pdev;
3825	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3826	int err;
3827	u64 val64, saved64;
3828
3829	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3830			  sp->name, sp);
3831	if (err) {
3832		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3833			  sp->dev->name, pci_name(pdev), pdev->irq);
3834		return err;
3835	}
3836
3837	init_waitqueue_head(&sp->msi_wait);
3838	sp->msi_detected = 0;
3839
3840	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3841	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3842	val64 |= SCHED_INT_CTRL_TIMER_EN;
3843	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3844	writeq(val64, &bar0->scheduled_int_ctrl);
3845
3846	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3847
3848	if (!sp->msi_detected) {
3849		/* MSI(X) test failed, go back to INTx mode */
3850		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3851			  "using MSI(X) during test\n",
3852			  sp->dev->name, pci_name(pdev));
3853
3854		err = -EOPNOTSUPP;
3855	}
3856
3857	free_irq(sp->entries[1].vector, sp);
3858
3859	writeq(saved64, &bar0->scheduled_int_ctrl);
3860
3861	return err;
3862}
3863
3864static void remove_msix_isr(struct s2io_nic *sp)
3865{
3866	int i;
3867	u16 msi_control;
3868
3869	for (i = 0; i < sp->num_entries; i++) {
3870		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3871			int vector = sp->entries[i].vector;
3872			void *arg = sp->s2io_entries[i].arg;
3873			free_irq(vector, arg);
3874		}
3875	}
3876
3877	kfree(sp->entries);
3878	kfree(sp->s2io_entries);
3879	sp->entries = NULL;
3880	sp->s2io_entries = NULL;
3881
3882	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3883	msi_control &= 0xFFFE; /* Disable MSI */
3884	pci_write_config_word(sp->pdev, 0x42, msi_control);
3885
3886	pci_disable_msix(sp->pdev);
3887}
3888
3889static void remove_inta_isr(struct s2io_nic *sp)
3890{
3891	free_irq(sp->pdev->irq, sp->dev);
3892}
3893
3894/* ********************************************************* *
3895 * Functions defined below concern the OS part of the driver *
3896 * ********************************************************* */
3897
3898/**
3899 *  s2io_open - open entry point of the driver
3900 *  @dev : pointer to the device structure.
3901 *  Description:
3902 *  This function is the open entry point of the driver. It mainly calls a
3903 *  function to allocate Rx buffers and inserts them into the buffer
3904 *  descriptors and then enables the Rx part of the NIC.
3905 *  Return value:
3906 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3907 *   file on failure.
3908 */
3909
3910static int s2io_open(struct net_device *dev)
3911{
3912	struct s2io_nic *sp = netdev_priv(dev);
3913	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3914	int err = 0;
3915
3916	/*
3917	 * Make sure you have link off by default every time
3918	 * Nic is initialized
3919	 */
3920	netif_carrier_off(dev);
3921	sp->last_link_state = 0;
3922
3923	/* Initialize H/W and enable interrupts */
3924	err = s2io_card_up(sp);
3925	if (err) {
3926		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3927			  dev->name);
3928		goto hw_init_failed;
3929	}
3930
3931	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3932		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3933		s2io_card_down(sp);
3934		err = -ENODEV;
3935		goto hw_init_failed;
3936	}
3937	s2io_start_all_tx_queue(sp);
3938	return 0;
3939
3940hw_init_failed:
3941	if (sp->config.intr_type == MSI_X) {
3942		if (sp->entries) {
3943			kfree(sp->entries);
3944			swstats->mem_freed += sp->num_entries *
3945				sizeof(struct msix_entry);
3946		}
3947		if (sp->s2io_entries) {
3948			kfree(sp->s2io_entries);
3949			swstats->mem_freed += sp->num_entries *
3950				sizeof(struct s2io_msix_entry);
3951		}
3952	}
3953	return err;
3954}
3955
3956/**
3957 *  s2io_close -close entry point of the driver
3958 *  @dev : device pointer.
3959 *  Description:
3960 *  This is the stop entry point of the driver. It needs to undo exactly
3961 *  whatever was done by the open entry point,thus it's usually referred to
3962 *  as the close function.Among other things this function mainly stops the
3963 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3964 *  Return value:
3965 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3966 *  file on failure.
3967 */
3968
3969static int s2io_close(struct net_device *dev)
3970{
3971	struct s2io_nic *sp = netdev_priv(dev);
3972	struct config_param *config = &sp->config;
3973	u64 tmp64;
3974	int offset;
3975
3976	/* Return if the device is already closed               *
3977	 *  Can happen when s2io_card_up failed in change_mtu    *
3978	 */
3979	if (!is_s2io_card_up(sp))
3980		return 0;
3981
3982	s2io_stop_all_tx_queue(sp);
3983	/* delete all populated mac entries */
3984	for (offset = 1; offset < config->max_mc_addr; offset++) {
3985		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3986		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3987			do_s2io_delete_unicast_mc(sp, tmp64);
3988	}
3989
3990	s2io_card_down(sp);
3991
3992	return 0;
3993}
3994
3995/**
3996 *  s2io_xmit - Tx entry point of te driver
3997 *  @skb : the socket buffer containing the Tx data.
3998 *  @dev : device pointer.
3999 *  Description :
4000 *  This function is the Tx entry point of the driver. S2IO NIC supports
4001 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4002 *  NOTE: when device can't queue the pkt,just the trans_start variable will
4003 *  not be upadted.
4004 *  Return value:
4005 *  0 on success & 1 on failure.
4006 */
4007
4008static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4009{
4010	struct s2io_nic *sp = netdev_priv(dev);
4011	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4012	register u64 val64;
4013	struct TxD *txdp;
4014	struct TxFIFO_element __iomem *tx_fifo;
4015	unsigned long flags = 0;
4016	u16 vlan_tag = 0;
4017	struct fifo_info *fifo = NULL;
4018	int offload_type;
4019	int enable_per_list_interrupt = 0;
4020	struct config_param *config = &sp->config;
4021	struct mac_info *mac_control = &sp->mac_control;
4022	struct stat_block *stats = mac_control->stats_info;
4023	struct swStat *swstats = &stats->sw_stat;
4024
4025	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4026
4027	if (unlikely(skb->len <= 0)) {
4028		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4029		dev_kfree_skb_any(skb);
4030		return NETDEV_TX_OK;
4031	}
4032
4033	if (!is_s2io_card_up(sp)) {
4034		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4035			  dev->name);
4036		dev_kfree_skb_any(skb);
4037		return NETDEV_TX_OK;
4038	}
4039
4040	queue = 0;
4041	if (skb_vlan_tag_present(skb))
4042		vlan_tag = skb_vlan_tag_get(skb);
4043	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4044		if (skb->protocol == htons(ETH_P_IP)) {
4045			struct iphdr *ip;
4046			struct tcphdr *th;
4047			ip = ip_hdr(skb);
4048
4049			if (!ip_is_fragment(ip)) {
4050				th = (struct tcphdr *)(((unsigned char *)ip) +
4051						       ip->ihl*4);
4052
4053				if (ip->protocol == IPPROTO_TCP) {
4054					queue_len = sp->total_tcp_fifos;
4055					queue = (ntohs(th->source) +
4056						 ntohs(th->dest)) &
4057						sp->fifo_selector[queue_len - 1];
4058					if (queue >= queue_len)
4059						queue = queue_len - 1;
4060				} else if (ip->protocol == IPPROTO_UDP) {
4061					queue_len = sp->total_udp_fifos;
4062					queue = (ntohs(th->source) +
4063						 ntohs(th->dest)) &
4064						sp->fifo_selector[queue_len - 1];
4065					if (queue >= queue_len)
4066						queue = queue_len - 1;
4067					queue += sp->udp_fifo_idx;
4068					if (skb->len > 1024)
4069						enable_per_list_interrupt = 1;
4070				}
4071			}
4072		}
4073	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4074		/* get fifo number based on skb->priority value */
4075		queue = config->fifo_mapping
4076			[skb->priority & (MAX_TX_FIFOS - 1)];
4077	fifo = &mac_control->fifos[queue];
4078
4079	spin_lock_irqsave(&fifo->tx_lock, flags);
4080
4081	if (sp->config.multiq) {
4082		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4083			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4084			return NETDEV_TX_BUSY;
4085		}
4086	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4087		if (netif_queue_stopped(dev)) {
4088			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4089			return NETDEV_TX_BUSY;
4090		}
4091	}
4092
4093	put_off = (u16)fifo->tx_curr_put_info.offset;
4094	get_off = (u16)fifo->tx_curr_get_info.offset;
4095	txdp = fifo->list_info[put_off].list_virt_addr;
4096
4097	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4098	/* Avoid "put" pointer going beyond "get" pointer */
4099	if (txdp->Host_Control ||
4100	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4101		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4102		s2io_stop_tx_queue(sp, fifo->fifo_no);
4103		dev_kfree_skb_any(skb);
4104		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4105		return NETDEV_TX_OK;
4106	}
4107
4108	offload_type = s2io_offload_type(skb);
4109	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4110		txdp->Control_1 |= TXD_TCP_LSO_EN;
4111		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4112	}
4113	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4114		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4115				    TXD_TX_CKO_TCP_EN |
4116				    TXD_TX_CKO_UDP_EN);
4117	}
4118	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4119	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4120	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4121	if (enable_per_list_interrupt)
4122		if (put_off & (queue_len >> 5))
4123			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4124	if (vlan_tag) {
4125		txdp->Control_2 |= TXD_VLAN_ENABLE;
4126		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4127	}
4128
4129	frg_len = skb_headlen(skb);
4130	txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4131					      frg_len, DMA_TO_DEVICE);
4132	if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4133		goto pci_map_failed;
4134
4135	txdp->Host_Control = (unsigned long)skb;
4136	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4137
4138	frg_cnt = skb_shinfo(skb)->nr_frags;
4139	/* For fragmented SKB. */
4140	for (i = 0; i < frg_cnt; i++) {
4141		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4142		/* A '0' length fragment will be ignored */
4143		if (!skb_frag_size(frag))
4144			continue;
4145		txdp++;
4146		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4147							     frag, 0,
4148							     skb_frag_size(frag),
4149							     DMA_TO_DEVICE);
4150		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4151	}
4152	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4153
4154	tx_fifo = mac_control->tx_FIFO_start[queue];
4155	val64 = fifo->list_info[put_off].list_phy_addr;
4156	writeq(val64, &tx_fifo->TxDL_Pointer);
4157
4158	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4159		 TX_FIFO_LAST_LIST);
4160	if (offload_type)
4161		val64 |= TX_FIFO_SPECIAL_FUNC;
4162
4163	writeq(val64, &tx_fifo->List_Control);
4164
4165	put_off++;
4166	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4167		put_off = 0;
4168	fifo->tx_curr_put_info.offset = put_off;
4169
4170	/* Avoid "put" pointer going beyond "get" pointer */
4171	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4172		swstats->fifo_full_cnt++;
4173		DBG_PRINT(TX_DBG,
4174			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4175			  put_off, get_off);
4176		s2io_stop_tx_queue(sp, fifo->fifo_no);
4177	}
4178	swstats->mem_allocated += skb->truesize;
4179	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180
4181	if (sp->config.intr_type == MSI_X)
4182		tx_intr_handler(fifo);
4183
4184	return NETDEV_TX_OK;
4185
4186pci_map_failed:
4187	swstats->pci_map_fail_cnt++;
4188	s2io_stop_tx_queue(sp, fifo->fifo_no);
4189	swstats->mem_freed += skb->truesize;
4190	dev_kfree_skb_any(skb);
4191	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4192	return NETDEV_TX_OK;
4193}
4194
4195static void
4196s2io_alarm_handle(struct timer_list *t)
4197{
4198	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4199	struct net_device *dev = sp->dev;
4200
4201	s2io_handle_errors(dev);
4202	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4203}
4204
4205static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4206{
4207	struct ring_info *ring = (struct ring_info *)dev_id;
4208	struct s2io_nic *sp = ring->nic;
4209	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4210
4211	if (unlikely(!is_s2io_card_up(sp)))
4212		return IRQ_HANDLED;
4213
4214	if (sp->config.napi) {
4215		u8 __iomem *addr = NULL;
4216		u8 val8 = 0;
4217
4218		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4219		addr += (7 - ring->ring_no);
4220		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4221		writeb(val8, addr);
4222		val8 = readb(addr);
4223		napi_schedule(&ring->napi);
4224	} else {
4225		rx_intr_handler(ring, 0);
4226		s2io_chk_rx_buffers(sp, ring);
4227	}
4228
4229	return IRQ_HANDLED;
4230}
4231
4232static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4233{
4234	int i;
4235	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4236	struct s2io_nic *sp = fifos->nic;
4237	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4238	struct config_param *config  = &sp->config;
4239	u64 reason;
4240
4241	if (unlikely(!is_s2io_card_up(sp)))
4242		return IRQ_NONE;
4243
4244	reason = readq(&bar0->general_int_status);
4245	if (unlikely(reason == S2IO_MINUS_ONE))
4246		/* Nothing much can be done. Get out */
4247		return IRQ_HANDLED;
4248
4249	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4250		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4251
4252		if (reason & GEN_INTR_TXPIC)
4253			s2io_txpic_intr_handle(sp);
4254
4255		if (reason & GEN_INTR_TXTRAFFIC)
4256			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4257
4258		for (i = 0; i < config->tx_fifo_num; i++)
4259			tx_intr_handler(&fifos[i]);
4260
4261		writeq(sp->general_int_mask, &bar0->general_int_mask);
4262		readl(&bar0->general_int_status);
4263		return IRQ_HANDLED;
4264	}
4265	/* The interrupt was not raised by us */
4266	return IRQ_NONE;
4267}
4268
4269static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4270{
4271	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4272	u64 val64;
4273
4274	val64 = readq(&bar0->pic_int_status);
4275	if (val64 & PIC_INT_GPIO) {
4276		val64 = readq(&bar0->gpio_int_reg);
4277		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4278		    (val64 & GPIO_INT_REG_LINK_UP)) {
4279			/*
4280			 * This is unstable state so clear both up/down
4281			 * interrupt and adapter to re-evaluate the link state.
4282			 */
4283			val64 |= GPIO_INT_REG_LINK_DOWN;
4284			val64 |= GPIO_INT_REG_LINK_UP;
4285			writeq(val64, &bar0->gpio_int_reg);
4286			val64 = readq(&bar0->gpio_int_mask);
4287			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4288				   GPIO_INT_MASK_LINK_DOWN);
4289			writeq(val64, &bar0->gpio_int_mask);
4290		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4291			val64 = readq(&bar0->adapter_status);
4292			/* Enable Adapter */
4293			val64 = readq(&bar0->adapter_control);
4294			val64 |= ADAPTER_CNTL_EN;
4295			writeq(val64, &bar0->adapter_control);
4296			val64 |= ADAPTER_LED_ON;
4297			writeq(val64, &bar0->adapter_control);
4298			if (!sp->device_enabled_once)
4299				sp->device_enabled_once = 1;
4300
4301			s2io_link(sp, LINK_UP);
4302			/*
4303			 * unmask link down interrupt and mask link-up
4304			 * intr
4305			 */
4306			val64 = readq(&bar0->gpio_int_mask);
4307			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4308			val64 |= GPIO_INT_MASK_LINK_UP;
4309			writeq(val64, &bar0->gpio_int_mask);
4310
4311		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4312			val64 = readq(&bar0->adapter_status);
4313			s2io_link(sp, LINK_DOWN);
4314			/* Link is down so unmaks link up interrupt */
4315			val64 = readq(&bar0->gpio_int_mask);
4316			val64 &= ~GPIO_INT_MASK_LINK_UP;
4317			val64 |= GPIO_INT_MASK_LINK_DOWN;
4318			writeq(val64, &bar0->gpio_int_mask);
4319
4320			/* turn off LED */
4321			val64 = readq(&bar0->adapter_control);
4322			val64 = val64 & (~ADAPTER_LED_ON);
4323			writeq(val64, &bar0->adapter_control);
4324		}
4325	}
4326	val64 = readq(&bar0->gpio_int_mask);
4327}
4328
4329/**
4330 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4331 *  @value: alarm bits
4332 *  @addr: address value
4333 *  @cnt: counter variable
4334 *  Description: Check for alarm and increment the counter
4335 *  Return Value:
4336 *  1 - if alarm bit set
4337 *  0 - if alarm bit is not set
4338 */
4339static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4340				 unsigned long long *cnt)
4341{
4342	u64 val64;
4343	val64 = readq(addr);
4344	if (val64 & value) {
4345		writeq(val64, addr);
4346		(*cnt)++;
4347		return 1;
4348	}
4349	return 0;
4350
4351}
4352
4353/**
4354 *  s2io_handle_errors - Xframe error indication handler
4355 *  @dev_id: opaque handle to dev
4356 *  Description: Handle alarms such as loss of link, single or
4357 *  double ECC errors, critical and serious errors.
4358 *  Return Value:
4359 *  NONE
4360 */
4361static void s2io_handle_errors(void *dev_id)
4362{
4363	struct net_device *dev = (struct net_device *)dev_id;
4364	struct s2io_nic *sp = netdev_priv(dev);
4365	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4366	u64 temp64 = 0, val64 = 0;
4367	int i = 0;
4368
4369	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4370	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4371
4372	if (!is_s2io_card_up(sp))
4373		return;
4374
4375	if (pci_channel_offline(sp->pdev))
4376		return;
4377
4378	memset(&sw_stat->ring_full_cnt, 0,
4379	       sizeof(sw_stat->ring_full_cnt));
4380
4381	/* Handling the XPAK counters update */
4382	if (stats->xpak_timer_count < 72000) {
4383		/* waiting for an hour */
4384		stats->xpak_timer_count++;
4385	} else {
4386		s2io_updt_xpak_counter(dev);
4387		/* reset the count to zero */
4388		stats->xpak_timer_count = 0;
4389	}
4390
4391	/* Handling link status change error Intr */
4392	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4393		val64 = readq(&bar0->mac_rmac_err_reg);
4394		writeq(val64, &bar0->mac_rmac_err_reg);
4395		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4396			schedule_work(&sp->set_link_task);
4397	}
4398
4399	/* In case of a serious error, the device will be Reset. */
4400	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4401				  &sw_stat->serious_err_cnt))
4402		goto reset;
4403
4404	/* Check for data parity error */
4405	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4406				  &sw_stat->parity_err_cnt))
4407		goto reset;
4408
4409	/* Check for ring full counter */
4410	if (sp->device_type == XFRAME_II_DEVICE) {
4411		val64 = readq(&bar0->ring_bump_counter1);
4412		for (i = 0; i < 4; i++) {
4413			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4414			temp64 >>= 64 - ((i+1)*16);
4415			sw_stat->ring_full_cnt[i] += temp64;
4416		}
4417
4418		val64 = readq(&bar0->ring_bump_counter2);
4419		for (i = 0; i < 4; i++) {
4420			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4421			temp64 >>= 64 - ((i+1)*16);
4422			sw_stat->ring_full_cnt[i+4] += temp64;
4423		}
4424	}
4425
4426	val64 = readq(&bar0->txdma_int_status);
4427	/*check for pfc_err*/
4428	if (val64 & TXDMA_PFC_INT) {
4429		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4430					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4431					  PFC_PCIX_ERR,
4432					  &bar0->pfc_err_reg,
4433					  &sw_stat->pfc_err_cnt))
4434			goto reset;
4435		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4436				      &bar0->pfc_err_reg,
4437				      &sw_stat->pfc_err_cnt);
4438	}
4439
4440	/*check for tda_err*/
4441	if (val64 & TXDMA_TDA_INT) {
4442		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4443					  TDA_SM0_ERR_ALARM |
4444					  TDA_SM1_ERR_ALARM,
4445					  &bar0->tda_err_reg,
4446					  &sw_stat->tda_err_cnt))
4447			goto reset;
4448		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4449				      &bar0->tda_err_reg,
4450				      &sw_stat->tda_err_cnt);
4451	}
4452	/*check for pcc_err*/
4453	if (val64 & TXDMA_PCC_INT) {
4454		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4455					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4456					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4457					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4458					  PCC_TXB_ECC_DB_ERR,
4459					  &bar0->pcc_err_reg,
4460					  &sw_stat->pcc_err_cnt))
4461			goto reset;
4462		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4463				      &bar0->pcc_err_reg,
4464				      &sw_stat->pcc_err_cnt);
4465	}
4466
4467	/*check for tti_err*/
4468	if (val64 & TXDMA_TTI_INT) {
4469		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4470					  &bar0->tti_err_reg,
4471					  &sw_stat->tti_err_cnt))
4472			goto reset;
4473		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4474				      &bar0->tti_err_reg,
4475				      &sw_stat->tti_err_cnt);
4476	}
4477
4478	/*check for lso_err*/
4479	if (val64 & TXDMA_LSO_INT) {
4480		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4481					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4482					  &bar0->lso_err_reg,
4483					  &sw_stat->lso_err_cnt))
4484			goto reset;
4485		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4486				      &bar0->lso_err_reg,
4487				      &sw_stat->lso_err_cnt);
4488	}
4489
4490	/*check for tpa_err*/
4491	if (val64 & TXDMA_TPA_INT) {
4492		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4493					  &bar0->tpa_err_reg,
4494					  &sw_stat->tpa_err_cnt))
4495			goto reset;
4496		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4497				      &bar0->tpa_err_reg,
4498				      &sw_stat->tpa_err_cnt);
4499	}
4500
4501	/*check for sm_err*/
4502	if (val64 & TXDMA_SM_INT) {
4503		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4504					  &bar0->sm_err_reg,
4505					  &sw_stat->sm_err_cnt))
4506			goto reset;
4507	}
4508
4509	val64 = readq(&bar0->mac_int_status);
4510	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4511		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4512					  &bar0->mac_tmac_err_reg,
4513					  &sw_stat->mac_tmac_err_cnt))
4514			goto reset;
4515		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4516				      TMAC_DESC_ECC_SG_ERR |
4517				      TMAC_DESC_ECC_DB_ERR,
4518				      &bar0->mac_tmac_err_reg,
4519				      &sw_stat->mac_tmac_err_cnt);
4520	}
4521
4522	val64 = readq(&bar0->xgxs_int_status);
4523	if (val64 & XGXS_INT_STATUS_TXGXS) {
4524		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4525					  &bar0->xgxs_txgxs_err_reg,
4526					  &sw_stat->xgxs_txgxs_err_cnt))
4527			goto reset;
4528		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4529				      &bar0->xgxs_txgxs_err_reg,
4530				      &sw_stat->xgxs_txgxs_err_cnt);
4531	}
4532
4533	val64 = readq(&bar0->rxdma_int_status);
4534	if (val64 & RXDMA_INT_RC_INT_M) {
4535		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4536					  RC_FTC_ECC_DB_ERR |
4537					  RC_PRCn_SM_ERR_ALARM |
4538					  RC_FTC_SM_ERR_ALARM,
4539					  &bar0->rc_err_reg,
4540					  &sw_stat->rc_err_cnt))
4541			goto reset;
4542		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4543				      RC_FTC_ECC_SG_ERR |
4544				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4545				      &sw_stat->rc_err_cnt);
4546		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4547					  PRC_PCI_AB_WR_Rn |
4548					  PRC_PCI_AB_F_WR_Rn,
4549					  &bar0->prc_pcix_err_reg,
4550					  &sw_stat->prc_pcix_err_cnt))
4551			goto reset;
4552		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4553				      PRC_PCI_DP_WR_Rn |
4554				      PRC_PCI_DP_F_WR_Rn,
4555				      &bar0->prc_pcix_err_reg,
4556				      &sw_stat->prc_pcix_err_cnt);
4557	}
4558
4559	if (val64 & RXDMA_INT_RPA_INT_M) {
4560		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4561					  &bar0->rpa_err_reg,
4562					  &sw_stat->rpa_err_cnt))
4563			goto reset;
4564		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4565				      &bar0->rpa_err_reg,
4566				      &sw_stat->rpa_err_cnt);
4567	}
4568
4569	if (val64 & RXDMA_INT_RDA_INT_M) {
4570		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4571					  RDA_FRM_ECC_DB_N_AERR |
4572					  RDA_SM1_ERR_ALARM |
4573					  RDA_SM0_ERR_ALARM |
4574					  RDA_RXD_ECC_DB_SERR,
4575					  &bar0->rda_err_reg,
4576					  &sw_stat->rda_err_cnt))
4577			goto reset;
4578		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4579				      RDA_FRM_ECC_SG_ERR |
4580				      RDA_MISC_ERR |
4581				      RDA_PCIX_ERR,
4582				      &bar0->rda_err_reg,
4583				      &sw_stat->rda_err_cnt);
4584	}
4585
4586	if (val64 & RXDMA_INT_RTI_INT_M) {
4587		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4588					  &bar0->rti_err_reg,
4589					  &sw_stat->rti_err_cnt))
4590			goto reset;
4591		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4592				      &bar0->rti_err_reg,
4593				      &sw_stat->rti_err_cnt);
4594	}
4595
4596	val64 = readq(&bar0->mac_int_status);
4597	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4598		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4599					  &bar0->mac_rmac_err_reg,
4600					  &sw_stat->mac_rmac_err_cnt))
4601			goto reset;
4602		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4603				      RMAC_SINGLE_ECC_ERR |
4604				      RMAC_DOUBLE_ECC_ERR,
4605				      &bar0->mac_rmac_err_reg,
4606				      &sw_stat->mac_rmac_err_cnt);
4607	}
4608
4609	val64 = readq(&bar0->xgxs_int_status);
4610	if (val64 & XGXS_INT_STATUS_RXGXS) {
4611		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4612					  &bar0->xgxs_rxgxs_err_reg,
4613					  &sw_stat->xgxs_rxgxs_err_cnt))
4614			goto reset;
4615	}
4616
4617	val64 = readq(&bar0->mc_int_status);
4618	if (val64 & MC_INT_STATUS_MC_INT) {
4619		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4620					  &bar0->mc_err_reg,
4621					  &sw_stat->mc_err_cnt))
4622			goto reset;
4623
4624		/* Handling Ecc errors */
4625		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4626			writeq(val64, &bar0->mc_err_reg);
4627			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4628				sw_stat->double_ecc_errs++;
4629				if (sp->device_type != XFRAME_II_DEVICE) {
4630					/*
4631					 * Reset XframeI only if critical error
4632					 */
4633					if (val64 &
4634					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4635					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4636						goto reset;
4637				}
4638			} else
4639				sw_stat->single_ecc_errs++;
4640		}
4641	}
4642	return;
4643
4644reset:
4645	s2io_stop_all_tx_queue(sp);
4646	schedule_work(&sp->rst_timer_task);
4647	sw_stat->soft_reset_cnt++;
4648}
4649
4650/**
4651 *  s2io_isr - ISR handler of the device .
4652 *  @irq: the irq of the device.
4653 *  @dev_id: a void pointer to the dev structure of the NIC.
4654 *  Description:  This function is the ISR handler of the device. It
4655 *  identifies the reason for the interrupt and calls the relevant
4656 *  service routines. As a contongency measure, this ISR allocates the
4657 *  recv buffers, if their numbers are below the panic value which is
4658 *  presently set to 25% of the original number of rcv buffers allocated.
4659 *  Return value:
4660 *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4661 *   IRQ_NONE: will be returned if interrupt is not from our device
4662 */
4663static irqreturn_t s2io_isr(int irq, void *dev_id)
4664{
4665	struct net_device *dev = (struct net_device *)dev_id;
4666	struct s2io_nic *sp = netdev_priv(dev);
4667	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4668	int i;
4669	u64 reason = 0;
4670	struct mac_info *mac_control;
4671	struct config_param *config;
4672
4673	/* Pretend we handled any irq's from a disconnected card */
4674	if (pci_channel_offline(sp->pdev))
4675		return IRQ_NONE;
4676
4677	if (!is_s2io_card_up(sp))
4678		return IRQ_NONE;
4679
4680	config = &sp->config;
4681	mac_control = &sp->mac_control;
4682
4683	/*
4684	 * Identify the cause for interrupt and call the appropriate
4685	 * interrupt handler. Causes for the interrupt could be;
4686	 * 1. Rx of packet.
4687	 * 2. Tx complete.
4688	 * 3. Link down.
4689	 */
4690	reason = readq(&bar0->general_int_status);
4691
4692	if (unlikely(reason == S2IO_MINUS_ONE))
4693		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4694
4695	if (reason &
4696	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4697		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4698
4699		if (config->napi) {
4700			if (reason & GEN_INTR_RXTRAFFIC) {
4701				napi_schedule(&sp->napi);
4702				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4703				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4704				readl(&bar0->rx_traffic_int);
4705			}
4706		} else {
4707			/*
4708			 * rx_traffic_int reg is an R1 register, writing all 1's
4709			 * will ensure that the actual interrupt causing bit
4710			 * get's cleared and hence a read can be avoided.
4711			 */
4712			if (reason & GEN_INTR_RXTRAFFIC)
4713				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4714
4715			for (i = 0; i < config->rx_ring_num; i++) {
4716				struct ring_info *ring = &mac_control->rings[i];
4717
4718				rx_intr_handler(ring, 0);
4719			}
4720		}
4721
4722		/*
4723		 * tx_traffic_int reg is an R1 register, writing all 1's
4724		 * will ensure that the actual interrupt causing bit get's
4725		 * cleared and hence a read can be avoided.
4726		 */
4727		if (reason & GEN_INTR_TXTRAFFIC)
4728			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4729
4730		for (i = 0; i < config->tx_fifo_num; i++)
4731			tx_intr_handler(&mac_control->fifos[i]);
4732
4733		if (reason & GEN_INTR_TXPIC)
4734			s2io_txpic_intr_handle(sp);
4735
4736		/*
4737		 * Reallocate the buffers from the interrupt handler itself.
4738		 */
4739		if (!config->napi) {
4740			for (i = 0; i < config->rx_ring_num; i++) {
4741				struct ring_info *ring = &mac_control->rings[i];
4742
4743				s2io_chk_rx_buffers(sp, ring);
4744			}
4745		}
4746		writeq(sp->general_int_mask, &bar0->general_int_mask);
4747		readl(&bar0->general_int_status);
4748
4749		return IRQ_HANDLED;
4750
4751	} else if (!reason) {
4752		/* The interrupt was not raised by us */
4753		return IRQ_NONE;
4754	}
4755
4756	return IRQ_HANDLED;
4757}
4758
4759/*
4760 * s2io_updt_stats -
4761 */
4762static void s2io_updt_stats(struct s2io_nic *sp)
4763{
4764	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4765	u64 val64;
4766	int cnt = 0;
4767
4768	if (is_s2io_card_up(sp)) {
4769		/* Apprx 30us on a 133 MHz bus */
4770		val64 = SET_UPDT_CLICKS(10) |
4771			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4772		writeq(val64, &bar0->stat_cfg);
4773		do {
4774			udelay(100);
4775			val64 = readq(&bar0->stat_cfg);
4776			if (!(val64 & s2BIT(0)))
4777				break;
4778			cnt++;
4779			if (cnt == 5)
4780				break; /* Updt failed */
4781		} while (1);
4782	}
4783}
4784
4785/**
4786 *  s2io_get_stats - Updates the device statistics structure.
4787 *  @dev : pointer to the device structure.
4788 *  Description:
4789 *  This function updates the device statistics structure in the s2io_nic
4790 *  structure and returns a pointer to the same.
4791 *  Return value:
4792 *  pointer to the updated net_device_stats structure.
4793 */
4794static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4795{
4796	struct s2io_nic *sp = netdev_priv(dev);
4797	struct mac_info *mac_control = &sp->mac_control;
4798	struct stat_block *stats = mac_control->stats_info;
4799	u64 delta;
4800
4801	/* Configure Stats for immediate updt */
4802	s2io_updt_stats(sp);
4803
4804	/* A device reset will cause the on-adapter statistics to be zero'ed.
4805	 * This can be done while running by changing the MTU.  To prevent the
4806	 * system from having the stats zero'ed, the driver keeps a copy of the
4807	 * last update to the system (which is also zero'ed on reset).  This
4808	 * enables the driver to accurately know the delta between the last
4809	 * update and the current update.
4810	 */
4811	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4812		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4813	sp->stats.rx_packets += delta;
4814	dev->stats.rx_packets += delta;
4815
4816	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4817		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4818	sp->stats.tx_packets += delta;
4819	dev->stats.tx_packets += delta;
4820
4821	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4822		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4823	sp->stats.rx_bytes += delta;
4824	dev->stats.rx_bytes += delta;
4825
4826	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4827		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4828	sp->stats.tx_bytes += delta;
4829	dev->stats.tx_bytes += delta;
4830
4831	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4832	sp->stats.rx_errors += delta;
4833	dev->stats.rx_errors += delta;
4834
4835	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4836		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4837	sp->stats.tx_errors += delta;
4838	dev->stats.tx_errors += delta;
4839
4840	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4841	sp->stats.rx_dropped += delta;
4842	dev->stats.rx_dropped += delta;
4843
4844	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4845	sp->stats.tx_dropped += delta;
4846	dev->stats.tx_dropped += delta;
4847
4848	/* The adapter MAC interprets pause frames as multicast packets, but
4849	 * does not pass them up.  This erroneously increases the multicast
4850	 * packet count and needs to be deducted when the multicast frame count
4851	 * is queried.
4852	 */
4853	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4854		le32_to_cpu(stats->rmac_vld_mcst_frms);
4855	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4856	delta -= sp->stats.multicast;
4857	sp->stats.multicast += delta;
4858	dev->stats.multicast += delta;
4859
4860	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4861		le32_to_cpu(stats->rmac_usized_frms)) +
4862		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4863	sp->stats.rx_length_errors += delta;
4864	dev->stats.rx_length_errors += delta;
4865
4866	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4867	sp->stats.rx_crc_errors += delta;
4868	dev->stats.rx_crc_errors += delta;
4869
4870	return &dev->stats;
4871}
4872
4873/**
4874 *  s2io_set_multicast - entry point for multicast address enable/disable.
4875 *  @dev : pointer to the device structure
4876 *  @may_sleep: parameter indicates if sleeping when waiting for command
4877 *  complete
4878 *  Description:
4879 *  This function is a driver entry point which gets called by the kernel
4880 *  whenever multicast addresses must be enabled/disabled. This also gets
4881 *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4882 *  determine, if multicast address must be enabled or if promiscuous mode
4883 *  is to be disabled etc.
4884 *  Return value:
4885 *  void.
4886 */
4887static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
4888{
4889	int i, j, prev_cnt;
4890	struct netdev_hw_addr *ha;
4891	struct s2io_nic *sp = netdev_priv(dev);
4892	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4893	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4894		0xfeffffffffffULL;
4895	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4896	void __iomem *add;
4897	struct config_param *config = &sp->config;
4898
4899	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4900		/*  Enable all Multicast addresses */
4901		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4902		       &bar0->rmac_addr_data0_mem);
4903		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4904		       &bar0->rmac_addr_data1_mem);
4905		val64 = RMAC_ADDR_CMD_MEM_WE |
4906			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4907			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4908		writeq(val64, &bar0->rmac_addr_cmd_mem);
4909		/* Wait till command completes */
4910		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4911				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4912				      S2IO_BIT_RESET, may_sleep);
4913
4914		sp->m_cast_flg = 1;
4915		sp->all_multi_pos = config->max_mc_addr - 1;
4916	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4917		/*  Disable all Multicast addresses */
4918		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4919		       &bar0->rmac_addr_data0_mem);
4920		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4921		       &bar0->rmac_addr_data1_mem);
4922		val64 = RMAC_ADDR_CMD_MEM_WE |
4923			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4924			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4925		writeq(val64, &bar0->rmac_addr_cmd_mem);
4926		/* Wait till command completes */
4927		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4928				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4929				      S2IO_BIT_RESET, may_sleep);
4930
4931		sp->m_cast_flg = 0;
4932		sp->all_multi_pos = 0;
4933	}
4934
4935	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4936		/*  Put the NIC into promiscuous mode */
4937		add = &bar0->mac_cfg;
4938		val64 = readq(&bar0->mac_cfg);
4939		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4940
4941		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4942		writel((u32)val64, add);
4943		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4944		writel((u32) (val64 >> 32), (add + 4));
4945
4946		if (vlan_tag_strip != 1) {
4947			val64 = readq(&bar0->rx_pa_cfg);
4948			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4949			writeq(val64, &bar0->rx_pa_cfg);
4950			sp->vlan_strip_flag = 0;
4951		}
4952
4953		val64 = readq(&bar0->mac_cfg);
4954		sp->promisc_flg = 1;
4955		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4956			  dev->name);
4957	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4958		/*  Remove the NIC from promiscuous mode */
4959		add = &bar0->mac_cfg;
4960		val64 = readq(&bar0->mac_cfg);
4961		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4962
4963		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4964		writel((u32)val64, add);
4965		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4966		writel((u32) (val64 >> 32), (add + 4));
4967
4968		if (vlan_tag_strip != 0) {
4969			val64 = readq(&bar0->rx_pa_cfg);
4970			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4971			writeq(val64, &bar0->rx_pa_cfg);
4972			sp->vlan_strip_flag = 1;
4973		}
4974
4975		val64 = readq(&bar0->mac_cfg);
4976		sp->promisc_flg = 0;
4977		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4978	}
4979
4980	/*  Update individual M_CAST address list */
4981	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4982		if (netdev_mc_count(dev) >
4983		    (config->max_mc_addr - config->max_mac_addr)) {
4984			DBG_PRINT(ERR_DBG,
4985				  "%s: No more Rx filters can be added - "
4986				  "please enable ALL_MULTI instead\n",
4987				  dev->name);
4988			return;
4989		}
4990
4991		prev_cnt = sp->mc_addr_count;
4992		sp->mc_addr_count = netdev_mc_count(dev);
4993
4994		/* Clear out the previous list of Mc in the H/W. */
4995		for (i = 0; i < prev_cnt; i++) {
4996			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4997			       &bar0->rmac_addr_data0_mem);
4998			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4999			       &bar0->rmac_addr_data1_mem);
5000			val64 = RMAC_ADDR_CMD_MEM_WE |
5001				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5002				RMAC_ADDR_CMD_MEM_OFFSET
5003				(config->mc_start_offset + i);
5004			writeq(val64, &bar0->rmac_addr_cmd_mem);
5005
5006			/* Wait for command completes */
5007			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5008						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5009						  S2IO_BIT_RESET, may_sleep)) {
5010				DBG_PRINT(ERR_DBG,
5011					  "%s: Adding Multicasts failed\n",
5012					  dev->name);
5013				return;
5014			}
5015		}
5016
5017		/* Create the new Rx filter list and update the same in H/W. */
5018		i = 0;
5019		netdev_for_each_mc_addr(ha, dev) {
5020			mac_addr = 0;
5021			for (j = 0; j < ETH_ALEN; j++) {
5022				mac_addr |= ha->addr[j];
5023				mac_addr <<= 8;
5024			}
5025			mac_addr >>= 8;
5026			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5027			       &bar0->rmac_addr_data0_mem);
5028			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5029			       &bar0->rmac_addr_data1_mem);
5030			val64 = RMAC_ADDR_CMD_MEM_WE |
5031				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5032				RMAC_ADDR_CMD_MEM_OFFSET
5033				(i + config->mc_start_offset);
5034			writeq(val64, &bar0->rmac_addr_cmd_mem);
5035
5036			/* Wait for command completes */
5037			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5038						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5039						  S2IO_BIT_RESET, may_sleep)) {
5040				DBG_PRINT(ERR_DBG,
5041					  "%s: Adding Multicasts failed\n",
5042					  dev->name);
5043				return;
5044			}
5045			i++;
5046		}
5047	}
5048}
5049
5050/* NDO wrapper for s2io_set_multicast */
5051static void s2io_ndo_set_multicast(struct net_device *dev)
5052{
5053	s2io_set_multicast(dev, false);
5054}
5055
5056/* read from CAM unicast & multicast addresses and store it in
5057 * def_mac_addr structure
5058 */
5059static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5060{
5061	int offset;
5062	u64 mac_addr = 0x0;
5063	struct config_param *config = &sp->config;
5064
5065	/* store unicast & multicast mac addresses */
5066	for (offset = 0; offset < config->max_mc_addr; offset++) {
5067		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5068		/* if read fails disable the entry */
5069		if (mac_addr == FAILURE)
5070			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5071		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5072	}
5073}
5074
5075/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5076static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5077{
5078	int offset;
5079	struct config_param *config = &sp->config;
5080	/* restore unicast mac address */
5081	for (offset = 0; offset < config->max_mac_addr; offset++)
5082		do_s2io_prog_unicast(sp->dev,
5083				     sp->def_mac_addr[offset].mac_addr);
5084
5085	/* restore multicast mac address */
5086	for (offset = config->mc_start_offset;
5087	     offset < config->max_mc_addr; offset++)
5088		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5089}
5090
5091/* add a multicast MAC address to CAM */
5092static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5093{
5094	int i;
5095	u64 mac_addr;
5096	struct config_param *config = &sp->config;
5097
5098	mac_addr = ether_addr_to_u64(addr);
 
 
 
5099	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5100		return SUCCESS;
5101
5102	/* check if the multicast mac already preset in CAM */
5103	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5104		u64 tmp64;
5105		tmp64 = do_s2io_read_unicast_mc(sp, i);
5106		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5107			break;
5108
5109		if (tmp64 == mac_addr)
5110			return SUCCESS;
5111	}
5112	if (i == config->max_mc_addr) {
5113		DBG_PRINT(ERR_DBG,
5114			  "CAM full no space left for multicast MAC\n");
5115		return FAILURE;
5116	}
5117	/* Update the internal structure with this new mac address */
5118	do_s2io_copy_mac_addr(sp, i, mac_addr);
5119
5120	return do_s2io_add_mac(sp, mac_addr, i);
5121}
5122
5123/* add MAC address to CAM */
5124static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5125{
5126	u64 val64;
5127	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5128
5129	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5130	       &bar0->rmac_addr_data0_mem);
5131
5132	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5133		RMAC_ADDR_CMD_MEM_OFFSET(off);
5134	writeq(val64, &bar0->rmac_addr_cmd_mem);
5135
5136	/* Wait till command completes */
5137	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5138				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5139				  S2IO_BIT_RESET, true)) {
5140		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5141		return FAILURE;
5142	}
5143	return SUCCESS;
5144}
5145/* deletes a specified unicast/multicast mac entry from CAM */
5146static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5147{
5148	int offset;
5149	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5150	struct config_param *config = &sp->config;
5151
5152	for (offset = 1;
5153	     offset < config->max_mc_addr; offset++) {
5154		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5155		if (tmp64 == addr) {
5156			/* disable the entry by writing  0xffffffffffffULL */
5157			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5158				return FAILURE;
5159			/* store the new mac list from CAM */
5160			do_s2io_store_unicast_mc(sp);
5161			return SUCCESS;
5162		}
5163	}
5164	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5165		  (unsigned long long)addr);
5166	return FAILURE;
5167}
5168
5169/* read mac entries from CAM */
5170static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5171{
5172	u64 tmp64, val64;
5173	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5174
5175	/* read mac addr */
5176	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5177		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5178	writeq(val64, &bar0->rmac_addr_cmd_mem);
5179
5180	/* Wait till command completes */
5181	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5182				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5183				  S2IO_BIT_RESET, true)) {
5184		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5185		return FAILURE;
5186	}
5187	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5188
5189	return tmp64 >> 16;
5190}
5191
5192/*
5193 * s2io_set_mac_addr - driver entry point
5194 */
5195
5196static int s2io_set_mac_addr(struct net_device *dev, void *p)
5197{
5198	struct sockaddr *addr = p;
5199
5200	if (!is_valid_ether_addr(addr->sa_data))
5201		return -EADDRNOTAVAIL;
5202
5203	eth_hw_addr_set(dev, addr->sa_data);
5204
5205	/* store the MAC address in CAM */
5206	return do_s2io_prog_unicast(dev, dev->dev_addr);
5207}
5208/**
5209 *  do_s2io_prog_unicast - Programs the Xframe mac address
5210 *  @dev : pointer to the device structure.
5211 *  @addr: a uchar pointer to the new mac address which is to be set.
5212 *  Description : This procedure will program the Xframe to receive
5213 *  frames with new Mac Address
5214 *  Return value: SUCCESS on success and an appropriate (-)ve integer
5215 *  as defined in errno.h file on failure.
5216 */
5217
5218static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
5219{
5220	struct s2io_nic *sp = netdev_priv(dev);
5221	register u64 mac_addr, perm_addr;
5222	int i;
5223	u64 tmp64;
5224	struct config_param *config = &sp->config;
5225
5226	/*
5227	 * Set the new MAC address as the new unicast filter and reflect this
5228	 * change on the device address registered with the OS. It will be
5229	 * at offset 0.
5230	 */
5231	mac_addr = ether_addr_to_u64(addr);
5232	perm_addr = ether_addr_to_u64(sp->def_mac_addr[0].mac_addr);
 
 
 
 
5233
5234	/* check if the dev_addr is different than perm_addr */
5235	if (mac_addr == perm_addr)
5236		return SUCCESS;
5237
5238	/* check if the mac already preset in CAM */
5239	for (i = 1; i < config->max_mac_addr; i++) {
5240		tmp64 = do_s2io_read_unicast_mc(sp, i);
5241		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5242			break;
5243
5244		if (tmp64 == mac_addr) {
5245			DBG_PRINT(INFO_DBG,
5246				  "MAC addr:0x%llx already present in CAM\n",
5247				  (unsigned long long)mac_addr);
5248			return SUCCESS;
5249		}
5250	}
5251	if (i == config->max_mac_addr) {
5252		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5253		return FAILURE;
5254	}
5255	/* Update the internal structure with this new mac address */
5256	do_s2io_copy_mac_addr(sp, i, mac_addr);
5257
5258	return do_s2io_add_mac(sp, mac_addr, i);
5259}
5260
5261/**
5262 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5263 * @dev : pointer to netdev
5264 * @cmd: pointer to the structure with parameters given by ethtool to set
5265 * link information.
5266 * Description:
5267 * The function sets different link parameters provided by the user onto
5268 * the NIC.
5269 * Return value:
5270 * 0 on success.
5271 */
5272
5273static int
5274s2io_ethtool_set_link_ksettings(struct net_device *dev,
5275				const struct ethtool_link_ksettings *cmd)
5276{
5277	struct s2io_nic *sp = netdev_priv(dev);
5278	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5279	    (cmd->base.speed != SPEED_10000) ||
5280	    (cmd->base.duplex != DUPLEX_FULL))
5281		return -EINVAL;
5282	else {
5283		s2io_close(sp->dev);
5284		s2io_open(sp->dev);
5285	}
5286
5287	return 0;
5288}
5289
5290/**
5291 * s2io_ethtool_get_link_ksettings - Return link specific information.
5292 * @dev: pointer to netdev
5293 * @cmd : pointer to the structure with parameters given by ethtool
5294 * to return link information.
5295 * Description:
5296 * Returns link specific information like speed, duplex etc.. to ethtool.
5297 * Return value :
5298 * return 0 on success.
5299 */
5300
5301static int
5302s2io_ethtool_get_link_ksettings(struct net_device *dev,
5303				struct ethtool_link_ksettings *cmd)
5304{
5305	struct s2io_nic *sp = netdev_priv(dev);
5306
5307	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5308	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5309	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5310
5311	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5312	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5313	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5314
5315	cmd->base.port = PORT_FIBRE;
5316
5317	if (netif_carrier_ok(sp->dev)) {
5318		cmd->base.speed = SPEED_10000;
5319		cmd->base.duplex = DUPLEX_FULL;
5320	} else {
5321		cmd->base.speed = SPEED_UNKNOWN;
5322		cmd->base.duplex = DUPLEX_UNKNOWN;
5323	}
5324
5325	cmd->base.autoneg = AUTONEG_DISABLE;
5326	return 0;
5327}
5328
5329/**
5330 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5331 * @dev: pointer to netdev
5332 * @info : pointer to the structure with parameters given by ethtool to
5333 * return driver information.
5334 * Description:
5335 * Returns driver specefic information like name, version etc.. to ethtool.
5336 * Return value:
5337 *  void
5338 */
5339
5340static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5341				  struct ethtool_drvinfo *info)
5342{
5343	struct s2io_nic *sp = netdev_priv(dev);
5344
5345	strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
5346	strscpy(info->version, s2io_driver_version, sizeof(info->version));
5347	strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5348}
5349
5350/**
5351 *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5352 *  @dev: pointer to netdev
5353 *  @regs : pointer to the structure with parameters given by ethtool for
5354 *          dumping the registers.
5355 *  @space: The input argument into which all the registers are dumped.
5356 *  Description:
5357 *  Dumps the entire register space of xFrame NIC into the user given
5358 *  buffer area.
5359 * Return value :
5360 * void .
5361 */
5362
5363static void s2io_ethtool_gregs(struct net_device *dev,
5364			       struct ethtool_regs *regs, void *space)
5365{
5366	int i;
5367	u64 reg;
5368	u8 *reg_space = (u8 *)space;
5369	struct s2io_nic *sp = netdev_priv(dev);
5370
5371	regs->len = XENA_REG_SPACE;
5372	regs->version = sp->pdev->subsystem_device;
5373
5374	for (i = 0; i < regs->len; i += 8) {
5375		reg = readq(sp->bar0 + i);
5376		memcpy((reg_space + i), &reg, 8);
5377	}
5378}
5379
5380/*
5381 *  s2io_set_led - control NIC led
5382 */
5383static void s2io_set_led(struct s2io_nic *sp, bool on)
5384{
5385	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5386	u16 subid = sp->pdev->subsystem_device;
5387	u64 val64;
5388
5389	if ((sp->device_type == XFRAME_II_DEVICE) ||
5390	    ((subid & 0xFF) >= 0x07)) {
5391		val64 = readq(&bar0->gpio_control);
5392		if (on)
5393			val64 |= GPIO_CTRL_GPIO_0;
5394		else
5395			val64 &= ~GPIO_CTRL_GPIO_0;
5396
5397		writeq(val64, &bar0->gpio_control);
5398	} else {
5399		val64 = readq(&bar0->adapter_control);
5400		if (on)
5401			val64 |= ADAPTER_LED_ON;
5402		else
5403			val64 &= ~ADAPTER_LED_ON;
5404
5405		writeq(val64, &bar0->adapter_control);
5406	}
5407
5408}
5409
5410/**
5411 * s2io_ethtool_set_led - To physically identify the nic on the system.
5412 * @dev : network device
5413 * @state: led setting
5414 *
5415 * Description: Used to physically identify the NIC on the system.
5416 * The Link LED will blink for a time specified by the user for
5417 * identification.
5418 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5419 * identification is possible only if it's link is up.
5420 */
5421
5422static int s2io_ethtool_set_led(struct net_device *dev,
5423				enum ethtool_phys_id_state state)
5424{
5425	struct s2io_nic *sp = netdev_priv(dev);
5426	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5427	u16 subid = sp->pdev->subsystem_device;
5428
5429	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5430		u64 val64 = readq(&bar0->adapter_control);
5431		if (!(val64 & ADAPTER_CNTL_EN)) {
5432			pr_err("Adapter Link down, cannot blink LED\n");
5433			return -EAGAIN;
5434		}
5435	}
5436
5437	switch (state) {
5438	case ETHTOOL_ID_ACTIVE:
5439		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5440		return 1;	/* cycle on/off once per second */
5441
5442	case ETHTOOL_ID_ON:
5443		s2io_set_led(sp, true);
5444		break;
5445
5446	case ETHTOOL_ID_OFF:
5447		s2io_set_led(sp, false);
5448		break;
5449
5450	case ETHTOOL_ID_INACTIVE:
5451		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5452			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5453	}
5454
5455	return 0;
5456}
5457
5458static void
5459s2io_ethtool_gringparam(struct net_device *dev,
5460			struct ethtool_ringparam *ering,
5461			struct kernel_ethtool_ringparam *kernel_ering,
5462			struct netlink_ext_ack *extack)
5463{
5464	struct s2io_nic *sp = netdev_priv(dev);
5465	int i, tx_desc_count = 0, rx_desc_count = 0;
5466
5467	if (sp->rxd_mode == RXD_MODE_1) {
5468		ering->rx_max_pending = MAX_RX_DESC_1;
5469		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5470	} else {
5471		ering->rx_max_pending = MAX_RX_DESC_2;
5472		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5473	}
5474
5475	ering->tx_max_pending = MAX_TX_DESC;
5476
5477	for (i = 0; i < sp->config.rx_ring_num; i++)
5478		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5479	ering->rx_pending = rx_desc_count;
5480	ering->rx_jumbo_pending = rx_desc_count;
5481
5482	for (i = 0; i < sp->config.tx_fifo_num; i++)
5483		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5484	ering->tx_pending = tx_desc_count;
5485	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5486}
5487
5488/**
5489 * s2io_ethtool_getpause_data -Pause frame generation and reception.
5490 * @dev: pointer to netdev
5491 * @ep : pointer to the structure with pause parameters given by ethtool.
5492 * Description:
5493 * Returns the Pause frame generation and reception capability of the NIC.
5494 * Return value:
5495 *  void
5496 */
5497static void s2io_ethtool_getpause_data(struct net_device *dev,
5498				       struct ethtool_pauseparam *ep)
5499{
5500	u64 val64;
5501	struct s2io_nic *sp = netdev_priv(dev);
5502	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5503
5504	val64 = readq(&bar0->rmac_pause_cfg);
5505	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5506		ep->tx_pause = true;
5507	if (val64 & RMAC_PAUSE_RX_ENABLE)
5508		ep->rx_pause = true;
5509	ep->autoneg = false;
5510}
5511
5512/**
5513 * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5514 * @dev: pointer to netdev
5515 * @ep : pointer to the structure with pause parameters given by ethtool.
5516 * Description:
5517 * It can be used to set or reset Pause frame generation or reception
5518 * support of the NIC.
5519 * Return value:
5520 * int, returns 0 on Success
5521 */
5522
5523static int s2io_ethtool_setpause_data(struct net_device *dev,
5524				      struct ethtool_pauseparam *ep)
5525{
5526	u64 val64;
5527	struct s2io_nic *sp = netdev_priv(dev);
5528	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5529
5530	val64 = readq(&bar0->rmac_pause_cfg);
5531	if (ep->tx_pause)
5532		val64 |= RMAC_PAUSE_GEN_ENABLE;
5533	else
5534		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5535	if (ep->rx_pause)
5536		val64 |= RMAC_PAUSE_RX_ENABLE;
5537	else
5538		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5539	writeq(val64, &bar0->rmac_pause_cfg);
5540	return 0;
5541}
5542
5543#define S2IO_DEV_ID		5
5544/**
5545 * read_eeprom - reads 4 bytes of data from user given offset.
5546 * @sp : private member of the device structure, which is a pointer to the
5547 *      s2io_nic structure.
5548 * @off : offset at which the data must be written
5549 * @data : Its an output parameter where the data read at the given
5550 *	offset is stored.
5551 * Description:
5552 * Will read 4 bytes of data from the user given offset and return the
5553 * read data.
5554 * NOTE: Will allow to read only part of the EEPROM visible through the
5555 *   I2C bus.
5556 * Return value:
5557 *  -1 on failure and 0 on success.
5558 */
5559static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5560{
5561	int ret = -1;
5562	u32 exit_cnt = 0;
5563	u64 val64;
5564	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5565
5566	if (sp->device_type == XFRAME_I_DEVICE) {
5567		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5568			I2C_CONTROL_ADDR(off) |
5569			I2C_CONTROL_BYTE_CNT(0x3) |
5570			I2C_CONTROL_READ |
5571			I2C_CONTROL_CNTL_START;
5572		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5573
5574		while (exit_cnt < 5) {
5575			val64 = readq(&bar0->i2c_control);
5576			if (I2C_CONTROL_CNTL_END(val64)) {
5577				*data = I2C_CONTROL_GET_DATA(val64);
5578				ret = 0;
5579				break;
5580			}
5581			msleep(50);
5582			exit_cnt++;
5583		}
5584	}
5585
5586	if (sp->device_type == XFRAME_II_DEVICE) {
5587		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5588			SPI_CONTROL_BYTECNT(0x3) |
5589			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5590		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5591		val64 |= SPI_CONTROL_REQ;
5592		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5593		while (exit_cnt < 5) {
5594			val64 = readq(&bar0->spi_control);
5595			if (val64 & SPI_CONTROL_NACK) {
5596				ret = 1;
5597				break;
5598			} else if (val64 & SPI_CONTROL_DONE) {
5599				*data = readq(&bar0->spi_data);
5600				*data &= 0xffffff;
5601				ret = 0;
5602				break;
5603			}
5604			msleep(50);
5605			exit_cnt++;
5606		}
5607	}
5608	return ret;
5609}
5610
5611/**
5612 *  write_eeprom - actually writes the relevant part of the data value.
5613 *  @sp : private member of the device structure, which is a pointer to the
5614 *       s2io_nic structure.
5615 *  @off : offset at which the data must be written
5616 *  @data : The data that is to be written
5617 *  @cnt : Number of bytes of the data that are actually to be written into
5618 *  the Eeprom. (max of 3)
5619 * Description:
5620 *  Actually writes the relevant part of the data value into the Eeprom
5621 *  through the I2C bus.
5622 * Return value:
5623 *  0 on success, -1 on failure.
5624 */
5625
5626static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5627{
5628	int exit_cnt = 0, ret = -1;
5629	u64 val64;
5630	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5631
5632	if (sp->device_type == XFRAME_I_DEVICE) {
5633		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5634			I2C_CONTROL_ADDR(off) |
5635			I2C_CONTROL_BYTE_CNT(cnt) |
5636			I2C_CONTROL_SET_DATA((u32)data) |
5637			I2C_CONTROL_CNTL_START;
5638		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5639
5640		while (exit_cnt < 5) {
5641			val64 = readq(&bar0->i2c_control);
5642			if (I2C_CONTROL_CNTL_END(val64)) {
5643				if (!(val64 & I2C_CONTROL_NACK))
5644					ret = 0;
5645				break;
5646			}
5647			msleep(50);
5648			exit_cnt++;
5649		}
5650	}
5651
5652	if (sp->device_type == XFRAME_II_DEVICE) {
5653		int write_cnt = (cnt == 8) ? 0 : cnt;
5654		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5655
5656		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5657			SPI_CONTROL_BYTECNT(write_cnt) |
5658			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5659		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5660		val64 |= SPI_CONTROL_REQ;
5661		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5662		while (exit_cnt < 5) {
5663			val64 = readq(&bar0->spi_control);
5664			if (val64 & SPI_CONTROL_NACK) {
5665				ret = 1;
5666				break;
5667			} else if (val64 & SPI_CONTROL_DONE) {
5668				ret = 0;
5669				break;
5670			}
5671			msleep(50);
5672			exit_cnt++;
5673		}
5674	}
5675	return ret;
5676}
5677static void s2io_vpd_read(struct s2io_nic *nic)
5678{
5679	u8 *vpd_data;
5680	u8 data;
5681	int i = 0, cnt, len, fail = 0;
5682	int vpd_addr = 0x80;
5683	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5684
5685	if (nic->device_type == XFRAME_II_DEVICE) {
5686		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5687		vpd_addr = 0x80;
5688	} else {
5689		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5690		vpd_addr = 0x50;
5691	}
5692	strcpy(nic->serial_num, "NOT AVAILABLE");
5693
5694	vpd_data = kmalloc(256, GFP_KERNEL);
5695	if (!vpd_data) {
5696		swstats->mem_alloc_fail_cnt++;
5697		return;
5698	}
5699	swstats->mem_allocated += 256;
5700
5701	for (i = 0; i < 256; i += 4) {
5702		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5703		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5704		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5705		for (cnt = 0; cnt < 5; cnt++) {
5706			msleep(2);
5707			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5708			if (data == 0x80)
5709				break;
5710		}
5711		if (cnt >= 5) {
5712			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5713			fail = 1;
5714			break;
5715		}
5716		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5717				      (u32 *)&vpd_data[i]);
5718	}
5719
5720	if (!fail) {
5721		/* read serial number of adapter */
5722		for (cnt = 0; cnt < 252; cnt++) {
5723			if ((vpd_data[cnt] == 'S') &&
5724			    (vpd_data[cnt+1] == 'N')) {
5725				len = vpd_data[cnt+2];
5726				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5727					memcpy(nic->serial_num,
5728					       &vpd_data[cnt + 3],
5729					       len);
5730					memset(nic->serial_num+len,
5731					       0,
5732					       VPD_STRING_LEN-len);
5733					break;
5734				}
5735			}
5736		}
5737	}
5738
5739	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5740		len = vpd_data[1];
5741		memcpy(nic->product_name, &vpd_data[3], len);
5742		nic->product_name[len] = 0;
5743	}
5744	kfree(vpd_data);
5745	swstats->mem_freed += 256;
5746}
5747
5748/**
5749 *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5750 *  @dev: pointer to netdev
5751 *  @eeprom : pointer to the user level structure provided by ethtool,
5752 *  containing all relevant information.
5753 *  @data_buf : user defined value to be written into Eeprom.
5754 *  Description: Reads the values stored in the Eeprom at given offset
5755 *  for a given length. Stores these values int the input argument data
5756 *  buffer 'data_buf' and returns these to the caller (ethtool.)
5757 *  Return value:
5758 *  int  0 on success
5759 */
5760
5761static int s2io_ethtool_geeprom(struct net_device *dev,
5762				struct ethtool_eeprom *eeprom, u8 * data_buf)
5763{
5764	u32 i, valid;
5765	u64 data;
5766	struct s2io_nic *sp = netdev_priv(dev);
5767
5768	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5769
5770	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5771		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5772
5773	for (i = 0; i < eeprom->len; i += 4) {
5774		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5775			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5776			return -EFAULT;
5777		}
5778		valid = INV(data);
5779		memcpy((data_buf + i), &valid, 4);
5780	}
5781	return 0;
5782}
5783
5784/**
5785 *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5786 *  @dev: pointer to netdev
5787 *  @eeprom : pointer to the user level structure provided by ethtool,
5788 *  containing all relevant information.
5789 *  @data_buf : user defined value to be written into Eeprom.
5790 *  Description:
5791 *  Tries to write the user provided value in the Eeprom, at the offset
5792 *  given by the user.
5793 *  Return value:
5794 *  0 on success, -EFAULT on failure.
5795 */
5796
5797static int s2io_ethtool_seeprom(struct net_device *dev,
5798				struct ethtool_eeprom *eeprom,
5799				u8 *data_buf)
5800{
5801	int len = eeprom->len, cnt = 0;
5802	u64 valid = 0, data;
5803	struct s2io_nic *sp = netdev_priv(dev);
5804
5805	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5806		DBG_PRINT(ERR_DBG,
5807			  "ETHTOOL_WRITE_EEPROM Err: "
5808			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5809			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5810			  eeprom->magic);
5811		return -EFAULT;
5812	}
5813
5814	while (len) {
5815		data = (u32)data_buf[cnt] & 0x000000FF;
5816		if (data)
5817			valid = (u32)(data << 24);
5818		else
5819			valid = data;
5820
5821		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5822			DBG_PRINT(ERR_DBG,
5823				  "ETHTOOL_WRITE_EEPROM Err: "
5824				  "Cannot write into the specified offset\n");
5825			return -EFAULT;
5826		}
5827		cnt++;
5828		len--;
5829	}
5830
5831	return 0;
5832}
5833
5834/**
5835 * s2io_register_test - reads and writes into all clock domains.
5836 * @sp : private member of the device structure, which is a pointer to the
5837 * s2io_nic structure.
5838 * @data : variable that returns the result of each of the test conducted b
5839 * by the driver.
5840 * Description:
5841 * Read and write into all clock domains. The NIC has 3 clock domains,
5842 * see that registers in all the three regions are accessible.
5843 * Return value:
5844 * 0 on success.
5845 */
5846
5847static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5848{
5849	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5850	u64 val64 = 0, exp_val;
5851	int fail = 0;
5852
5853	val64 = readq(&bar0->pif_rd_swapper_fb);
5854	if (val64 != 0x123456789abcdefULL) {
5855		fail = 1;
5856		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5857	}
5858
5859	val64 = readq(&bar0->rmac_pause_cfg);
5860	if (val64 != 0xc000ffff00000000ULL) {
5861		fail = 1;
5862		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5863	}
5864
5865	val64 = readq(&bar0->rx_queue_cfg);
5866	if (sp->device_type == XFRAME_II_DEVICE)
5867		exp_val = 0x0404040404040404ULL;
5868	else
5869		exp_val = 0x0808080808080808ULL;
5870	if (val64 != exp_val) {
5871		fail = 1;
5872		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5873	}
5874
5875	val64 = readq(&bar0->xgxs_efifo_cfg);
5876	if (val64 != 0x000000001923141EULL) {
5877		fail = 1;
5878		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5879	}
5880
5881	val64 = 0x5A5A5A5A5A5A5A5AULL;
5882	writeq(val64, &bar0->xmsi_data);
5883	val64 = readq(&bar0->xmsi_data);
5884	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5885		fail = 1;
5886		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5887	}
5888
5889	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5890	writeq(val64, &bar0->xmsi_data);
5891	val64 = readq(&bar0->xmsi_data);
5892	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5893		fail = 1;
5894		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5895	}
5896
5897	*data = fail;
5898	return fail;
5899}
5900
5901/**
5902 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5903 * @sp : private member of the device structure, which is a pointer to the
5904 * s2io_nic structure.
5905 * @data:variable that returns the result of each of the test conducted by
5906 * the driver.
5907 * Description:
5908 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5909 * register.
5910 * Return value:
5911 * 0 on success.
5912 */
5913
5914static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5915{
5916	int fail = 0;
5917	u64 ret_data, org_4F0, org_7F0;
5918	u8 saved_4F0 = 0, saved_7F0 = 0;
5919	struct net_device *dev = sp->dev;
5920
5921	/* Test Write Error at offset 0 */
5922	/* Note that SPI interface allows write access to all areas
5923	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5924	 */
5925	if (sp->device_type == XFRAME_I_DEVICE)
5926		if (!write_eeprom(sp, 0, 0, 3))
5927			fail = 1;
5928
5929	/* Save current values at offsets 0x4F0 and 0x7F0 */
5930	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5931		saved_4F0 = 1;
5932	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5933		saved_7F0 = 1;
5934
5935	/* Test Write at offset 4f0 */
5936	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5937		fail = 1;
5938	if (read_eeprom(sp, 0x4F0, &ret_data))
5939		fail = 1;
5940
5941	if (ret_data != 0x012345) {
5942		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5943			  "Data written %llx Data read %llx\n",
5944			  dev->name, (unsigned long long)0x12345,
5945			  (unsigned long long)ret_data);
5946		fail = 1;
5947	}
5948
5949	/* Reset the EEPROM data go FFFF */
5950	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5951
5952	/* Test Write Request Error at offset 0x7c */
5953	if (sp->device_type == XFRAME_I_DEVICE)
5954		if (!write_eeprom(sp, 0x07C, 0, 3))
5955			fail = 1;
5956
5957	/* Test Write Request at offset 0x7f0 */
5958	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5959		fail = 1;
5960	if (read_eeprom(sp, 0x7F0, &ret_data))
5961		fail = 1;
5962
5963	if (ret_data != 0x012345) {
5964		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5965			  "Data written %llx Data read %llx\n",
5966			  dev->name, (unsigned long long)0x12345,
5967			  (unsigned long long)ret_data);
5968		fail = 1;
5969	}
5970
5971	/* Reset the EEPROM data go FFFF */
5972	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5973
5974	if (sp->device_type == XFRAME_I_DEVICE) {
5975		/* Test Write Error at offset 0x80 */
5976		if (!write_eeprom(sp, 0x080, 0, 3))
5977			fail = 1;
5978
5979		/* Test Write Error at offset 0xfc */
5980		if (!write_eeprom(sp, 0x0FC, 0, 3))
5981			fail = 1;
5982
5983		/* Test Write Error at offset 0x100 */
5984		if (!write_eeprom(sp, 0x100, 0, 3))
5985			fail = 1;
5986
5987		/* Test Write Error at offset 4ec */
5988		if (!write_eeprom(sp, 0x4EC, 0, 3))
5989			fail = 1;
5990	}
5991
5992	/* Restore values at offsets 0x4F0 and 0x7F0 */
5993	if (saved_4F0)
5994		write_eeprom(sp, 0x4F0, org_4F0, 3);
5995	if (saved_7F0)
5996		write_eeprom(sp, 0x7F0, org_7F0, 3);
5997
5998	*data = fail;
5999	return fail;
6000}
6001
6002/**
6003 * s2io_bist_test - invokes the MemBist test of the card .
6004 * @sp : private member of the device structure, which is a pointer to the
6005 * s2io_nic structure.
6006 * @data:variable that returns the result of each of the test conducted by
6007 * the driver.
6008 * Description:
6009 * This invokes the MemBist test of the card. We give around
6010 * 2 secs time for the Test to complete. If it's still not complete
6011 * within this peiod, we consider that the test failed.
6012 * Return value:
6013 * 0 on success and -1 on failure.
6014 */
6015
6016static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6017{
6018	u8 bist = 0;
6019	int cnt = 0, ret = -1;
6020
6021	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6022	bist |= PCI_BIST_START;
6023	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6024
6025	while (cnt < 20) {
6026		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6027		if (!(bist & PCI_BIST_START)) {
6028			*data = (bist & PCI_BIST_CODE_MASK);
6029			ret = 0;
6030			break;
6031		}
6032		msleep(100);
6033		cnt++;
6034	}
6035
6036	return ret;
6037}
6038
6039/**
6040 * s2io_link_test - verifies the link state of the nic
6041 * @sp: private member of the device structure, which is a pointer to the
6042 * s2io_nic structure.
6043 * @data: variable that returns the result of each of the test conducted by
6044 * the driver.
6045 * Description:
6046 * The function verifies the link state of the NIC and updates the input
6047 * argument 'data' appropriately.
6048 * Return value:
6049 * 0 on success.
6050 */
6051
6052static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6053{
6054	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6055	u64 val64;
6056
6057	val64 = readq(&bar0->adapter_status);
6058	if (!(LINK_IS_UP(val64)))
6059		*data = 1;
6060	else
6061		*data = 0;
6062
6063	return *data;
6064}
6065
6066/**
6067 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6068 * @sp: private member of the device structure, which is a pointer to the
6069 * s2io_nic structure.
6070 * @data: variable that returns the result of each of the test
6071 * conducted by the driver.
6072 * Description:
6073 *  This is one of the offline test that tests the read and write
6074 *  access to the RldRam chip on the NIC.
6075 * Return value:
6076 *  0 on success.
6077 */
6078
6079static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6080{
6081	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6082	u64 val64;
6083	int cnt, iteration = 0, test_fail = 0;
6084
6085	val64 = readq(&bar0->adapter_control);
6086	val64 &= ~ADAPTER_ECC_EN;
6087	writeq(val64, &bar0->adapter_control);
6088
6089	val64 = readq(&bar0->mc_rldram_test_ctrl);
6090	val64 |= MC_RLDRAM_TEST_MODE;
6091	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6092
6093	val64 = readq(&bar0->mc_rldram_mrs);
6094	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6095	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6096
6097	val64 |= MC_RLDRAM_MRS_ENABLE;
6098	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6099
6100	while (iteration < 2) {
6101		val64 = 0x55555555aaaa0000ULL;
6102		if (iteration == 1)
6103			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6104		writeq(val64, &bar0->mc_rldram_test_d0);
6105
6106		val64 = 0xaaaa5a5555550000ULL;
6107		if (iteration == 1)
6108			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6109		writeq(val64, &bar0->mc_rldram_test_d1);
6110
6111		val64 = 0x55aaaaaaaa5a0000ULL;
6112		if (iteration == 1)
6113			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6114		writeq(val64, &bar0->mc_rldram_test_d2);
6115
6116		val64 = (u64) (0x0000003ffffe0100ULL);
6117		writeq(val64, &bar0->mc_rldram_test_add);
6118
6119		val64 = MC_RLDRAM_TEST_MODE |
6120			MC_RLDRAM_TEST_WRITE |
6121			MC_RLDRAM_TEST_GO;
6122		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6123
6124		for (cnt = 0; cnt < 5; cnt++) {
6125			val64 = readq(&bar0->mc_rldram_test_ctrl);
6126			if (val64 & MC_RLDRAM_TEST_DONE)
6127				break;
6128			msleep(200);
6129		}
6130
6131		if (cnt == 5)
6132			break;
6133
6134		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6135		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6136
6137		for (cnt = 0; cnt < 5; cnt++) {
6138			val64 = readq(&bar0->mc_rldram_test_ctrl);
6139			if (val64 & MC_RLDRAM_TEST_DONE)
6140				break;
6141			msleep(500);
6142		}
6143
6144		if (cnt == 5)
6145			break;
6146
6147		val64 = readq(&bar0->mc_rldram_test_ctrl);
6148		if (!(val64 & MC_RLDRAM_TEST_PASS))
6149			test_fail = 1;
6150
6151		iteration++;
6152	}
6153
6154	*data = test_fail;
6155
6156	/* Bring the adapter out of test mode */
6157	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6158
6159	return test_fail;
6160}
6161
6162/**
6163 *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6164 *  @dev: pointer to netdev
6165 *  @ethtest : pointer to a ethtool command specific structure that will be
6166 *  returned to the user.
6167 *  @data : variable that returns the result of each of the test
6168 * conducted by the driver.
6169 * Description:
6170 *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6171 *  the health of the card.
6172 * Return value:
6173 *  void
6174 */
6175
6176static void s2io_ethtool_test(struct net_device *dev,
6177			      struct ethtool_test *ethtest,
6178			      uint64_t *data)
6179{
6180	struct s2io_nic *sp = netdev_priv(dev);
6181	int orig_state = netif_running(sp->dev);
6182
6183	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6184		/* Offline Tests. */
6185		if (orig_state)
6186			s2io_close(sp->dev);
6187
6188		if (s2io_register_test(sp, &data[0]))
6189			ethtest->flags |= ETH_TEST_FL_FAILED;
6190
6191		s2io_reset(sp);
6192
6193		if (s2io_rldram_test(sp, &data[3]))
6194			ethtest->flags |= ETH_TEST_FL_FAILED;
6195
6196		s2io_reset(sp);
6197
6198		if (s2io_eeprom_test(sp, &data[1]))
6199			ethtest->flags |= ETH_TEST_FL_FAILED;
6200
6201		if (s2io_bist_test(sp, &data[4]))
6202			ethtest->flags |= ETH_TEST_FL_FAILED;
6203
6204		if (orig_state)
6205			s2io_open(sp->dev);
6206
6207		data[2] = 0;
6208	} else {
6209		/* Online Tests. */
6210		if (!orig_state) {
6211			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6212				  dev->name);
6213			data[0] = -1;
6214			data[1] = -1;
6215			data[2] = -1;
6216			data[3] = -1;
6217			data[4] = -1;
6218		}
6219
6220		if (s2io_link_test(sp, &data[2]))
6221			ethtest->flags |= ETH_TEST_FL_FAILED;
6222
6223		data[0] = 0;
6224		data[1] = 0;
6225		data[3] = 0;
6226		data[4] = 0;
6227	}
6228}
6229
6230static void s2io_get_ethtool_stats(struct net_device *dev,
6231				   struct ethtool_stats *estats,
6232				   u64 *tmp_stats)
6233{
6234	int i = 0, k;
6235	struct s2io_nic *sp = netdev_priv(dev);
6236	struct stat_block *stats = sp->mac_control.stats_info;
6237	struct swStat *swstats = &stats->sw_stat;
6238	struct xpakStat *xstats = &stats->xpak_stat;
6239
6240	s2io_updt_stats(sp);
6241	tmp_stats[i++] =
6242		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6243		le32_to_cpu(stats->tmac_frms);
6244	tmp_stats[i++] =
6245		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6246		le32_to_cpu(stats->tmac_data_octets);
6247	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6248	tmp_stats[i++] =
6249		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6250		le32_to_cpu(stats->tmac_mcst_frms);
6251	tmp_stats[i++] =
6252		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6253		le32_to_cpu(stats->tmac_bcst_frms);
6254	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6255	tmp_stats[i++] =
6256		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6257		le32_to_cpu(stats->tmac_ttl_octets);
6258	tmp_stats[i++] =
6259		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6260		le32_to_cpu(stats->tmac_ucst_frms);
6261	tmp_stats[i++] =
6262		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6263		le32_to_cpu(stats->tmac_nucst_frms);
6264	tmp_stats[i++] =
6265		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6266		le32_to_cpu(stats->tmac_any_err_frms);
6267	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6268	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6269	tmp_stats[i++] =
6270		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6271		le32_to_cpu(stats->tmac_vld_ip);
6272	tmp_stats[i++] =
6273		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6274		le32_to_cpu(stats->tmac_drop_ip);
6275	tmp_stats[i++] =
6276		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6277		le32_to_cpu(stats->tmac_icmp);
6278	tmp_stats[i++] =
6279		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6280		le32_to_cpu(stats->tmac_rst_tcp);
6281	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6282	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6283		le32_to_cpu(stats->tmac_udp);
6284	tmp_stats[i++] =
6285		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6286		le32_to_cpu(stats->rmac_vld_frms);
6287	tmp_stats[i++] =
6288		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6289		le32_to_cpu(stats->rmac_data_octets);
6290	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6291	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6292	tmp_stats[i++] =
6293		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6294		le32_to_cpu(stats->rmac_vld_mcst_frms);
6295	tmp_stats[i++] =
6296		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6297		le32_to_cpu(stats->rmac_vld_bcst_frms);
6298	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6299	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6300	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6301	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6302	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6303	tmp_stats[i++] =
6304		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6305		le32_to_cpu(stats->rmac_ttl_octets);
6306	tmp_stats[i++] =
6307		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6308		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6309	tmp_stats[i++] =
6310		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6311		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6312	tmp_stats[i++] =
6313		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6314		le32_to_cpu(stats->rmac_discarded_frms);
6315	tmp_stats[i++] =
6316		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6317		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6318	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6319	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6320	tmp_stats[i++] =
6321		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6322		le32_to_cpu(stats->rmac_usized_frms);
6323	tmp_stats[i++] =
6324		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6325		le32_to_cpu(stats->rmac_osized_frms);
6326	tmp_stats[i++] =
6327		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6328		le32_to_cpu(stats->rmac_frag_frms);
6329	tmp_stats[i++] =
6330		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6331		le32_to_cpu(stats->rmac_jabber_frms);
6332	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6333	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6334	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6335	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6336	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6337	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6338	tmp_stats[i++] =
6339		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6340		le32_to_cpu(stats->rmac_ip);
6341	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6342	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6343	tmp_stats[i++] =
6344		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6345		le32_to_cpu(stats->rmac_drop_ip);
6346	tmp_stats[i++] =
6347		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6348		le32_to_cpu(stats->rmac_icmp);
6349	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6350	tmp_stats[i++] =
6351		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6352		le32_to_cpu(stats->rmac_udp);
6353	tmp_stats[i++] =
6354		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6355		le32_to_cpu(stats->rmac_err_drp_udp);
6356	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6357	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6358	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6359	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6360	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6361	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6362	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6363	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6364	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6365	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6366	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6367	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6368	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6369	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6370	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6371	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6372	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6373	tmp_stats[i++] =
6374		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6375		le32_to_cpu(stats->rmac_pause_cnt);
6376	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6377	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6378	tmp_stats[i++] =
6379		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6380		le32_to_cpu(stats->rmac_accepted_ip);
6381	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6382	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6383	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6384	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6385	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6386	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6387	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6388	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6389	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6390	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6391	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6392	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6393	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6394	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6395	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6396	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6397	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6398	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6399	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6400
6401	/* Enhanced statistics exist only for Hercules */
6402	if (sp->device_type == XFRAME_II_DEVICE) {
6403		tmp_stats[i++] =
6404			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6405		tmp_stats[i++] =
6406			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6407		tmp_stats[i++] =
6408			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6409		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6410		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6411		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6412		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6413		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6414		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6415		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6416		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6417		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6418		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6419		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6420		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6421		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6422	}
6423
6424	tmp_stats[i++] = 0;
6425	tmp_stats[i++] = swstats->single_ecc_errs;
6426	tmp_stats[i++] = swstats->double_ecc_errs;
6427	tmp_stats[i++] = swstats->parity_err_cnt;
6428	tmp_stats[i++] = swstats->serious_err_cnt;
6429	tmp_stats[i++] = swstats->soft_reset_cnt;
6430	tmp_stats[i++] = swstats->fifo_full_cnt;
6431	for (k = 0; k < MAX_RX_RINGS; k++)
6432		tmp_stats[i++] = swstats->ring_full_cnt[k];
6433	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6434	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6435	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6436	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6437	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6438	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6439	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6440	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6441	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6442	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6443	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6444	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6445	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6446	tmp_stats[i++] = swstats->sending_both;
6447	tmp_stats[i++] = swstats->outof_sequence_pkts;
6448	tmp_stats[i++] = swstats->flush_max_pkts;
6449	if (swstats->num_aggregations) {
6450		u64 tmp = swstats->sum_avg_pkts_aggregated;
6451		int count = 0;
6452		/*
6453		 * Since 64-bit divide does not work on all platforms,
6454		 * do repeated subtraction.
6455		 */
6456		while (tmp >= swstats->num_aggregations) {
6457			tmp -= swstats->num_aggregations;
6458			count++;
6459		}
6460		tmp_stats[i++] = count;
6461	} else
6462		tmp_stats[i++] = 0;
6463	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6464	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6465	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6466	tmp_stats[i++] = swstats->mem_allocated;
6467	tmp_stats[i++] = swstats->mem_freed;
6468	tmp_stats[i++] = swstats->link_up_cnt;
6469	tmp_stats[i++] = swstats->link_down_cnt;
6470	tmp_stats[i++] = swstats->link_up_time;
6471	tmp_stats[i++] = swstats->link_down_time;
6472
6473	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6474	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6475	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6476	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6477	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6478
6479	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6480	tmp_stats[i++] = swstats->rx_abort_cnt;
6481	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6482	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6483	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6484	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6485	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6486	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6487	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6488	tmp_stats[i++] = swstats->tda_err_cnt;
6489	tmp_stats[i++] = swstats->pfc_err_cnt;
6490	tmp_stats[i++] = swstats->pcc_err_cnt;
6491	tmp_stats[i++] = swstats->tti_err_cnt;
6492	tmp_stats[i++] = swstats->tpa_err_cnt;
6493	tmp_stats[i++] = swstats->sm_err_cnt;
6494	tmp_stats[i++] = swstats->lso_err_cnt;
6495	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6496	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6497	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6498	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6499	tmp_stats[i++] = swstats->rc_err_cnt;
6500	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6501	tmp_stats[i++] = swstats->rpa_err_cnt;
6502	tmp_stats[i++] = swstats->rda_err_cnt;
6503	tmp_stats[i++] = swstats->rti_err_cnt;
6504	tmp_stats[i++] = swstats->mc_err_cnt;
6505}
6506
6507static int s2io_ethtool_get_regs_len(struct net_device *dev)
6508{
6509	return XENA_REG_SPACE;
6510}
6511
6512
6513static int s2io_get_eeprom_len(struct net_device *dev)
6514{
6515	return XENA_EEPROM_SPACE;
6516}
6517
6518static int s2io_get_sset_count(struct net_device *dev, int sset)
6519{
6520	struct s2io_nic *sp = netdev_priv(dev);
6521
6522	switch (sset) {
6523	case ETH_SS_TEST:
6524		return S2IO_TEST_LEN;
6525	case ETH_SS_STATS:
6526		switch (sp->device_type) {
6527		case XFRAME_I_DEVICE:
6528			return XFRAME_I_STAT_LEN;
6529		case XFRAME_II_DEVICE:
6530			return XFRAME_II_STAT_LEN;
6531		default:
6532			return 0;
6533		}
6534	default:
6535		return -EOPNOTSUPP;
6536	}
6537}
6538
6539static void s2io_ethtool_get_strings(struct net_device *dev,
6540				     u32 stringset, u8 *data)
6541{
6542	int stat_size = 0;
6543	struct s2io_nic *sp = netdev_priv(dev);
6544
6545	switch (stringset) {
6546	case ETH_SS_TEST:
6547		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6548		break;
6549	case ETH_SS_STATS:
6550		stat_size = sizeof(ethtool_xena_stats_keys);
6551		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6552		if (sp->device_type == XFRAME_II_DEVICE) {
6553			memcpy(data + stat_size,
6554			       &ethtool_enhanced_stats_keys,
6555			       sizeof(ethtool_enhanced_stats_keys));
6556			stat_size += sizeof(ethtool_enhanced_stats_keys);
6557		}
6558
6559		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6560		       sizeof(ethtool_driver_stats_keys));
6561	}
6562}
6563
6564static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6565{
6566	struct s2io_nic *sp = netdev_priv(dev);
6567	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6568
6569	if (changed && netif_running(dev)) {
6570		int rc;
6571
6572		s2io_stop_all_tx_queue(sp);
6573		s2io_card_down(sp);
6574		dev->features = features;
6575		rc = s2io_card_up(sp);
6576		if (rc)
6577			s2io_reset(sp);
6578		else
6579			s2io_start_all_tx_queue(sp);
6580
6581		return rc ? rc : 1;
6582	}
6583
6584	return 0;
6585}
6586
6587static const struct ethtool_ops netdev_ethtool_ops = {
6588	.get_drvinfo = s2io_ethtool_gdrvinfo,
6589	.get_regs_len = s2io_ethtool_get_regs_len,
6590	.get_regs = s2io_ethtool_gregs,
6591	.get_link = ethtool_op_get_link,
6592	.get_eeprom_len = s2io_get_eeprom_len,
6593	.get_eeprom = s2io_ethtool_geeprom,
6594	.set_eeprom = s2io_ethtool_seeprom,
6595	.get_ringparam = s2io_ethtool_gringparam,
6596	.get_pauseparam = s2io_ethtool_getpause_data,
6597	.set_pauseparam = s2io_ethtool_setpause_data,
6598	.self_test = s2io_ethtool_test,
6599	.get_strings = s2io_ethtool_get_strings,
6600	.set_phys_id = s2io_ethtool_set_led,
6601	.get_ethtool_stats = s2io_get_ethtool_stats,
6602	.get_sset_count = s2io_get_sset_count,
6603	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6604	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6605};
6606
6607/**
6608 *  s2io_ioctl - Entry point for the Ioctl
6609 *  @dev :  Device pointer.
6610 *  @rq :  An IOCTL specefic structure, that can contain a pointer to
6611 *  a proprietary structure used to pass information to the driver.
6612 *  @cmd :  This is used to distinguish between the different commands that
6613 *  can be passed to the IOCTL functions.
6614 *  Description:
6615 *  Currently there are no special functionality supported in IOCTL, hence
6616 *  function always return EOPNOTSUPPORTED
6617 */
6618
6619static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6620{
6621	return -EOPNOTSUPP;
6622}
6623
6624/**
6625 *  s2io_change_mtu - entry point to change MTU size for the device.
6626 *   @dev : device pointer.
6627 *   @new_mtu : the new MTU size for the device.
6628 *   Description: A driver entry point to change MTU size for the device.
6629 *   Before changing the MTU the device must be stopped.
6630 *  Return value:
6631 *   0 on success and an appropriate (-)ve integer as defined in errno.h
6632 *   file on failure.
6633 */
6634
6635static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6636{
6637	struct s2io_nic *sp = netdev_priv(dev);
6638	int ret = 0;
6639
6640	WRITE_ONCE(dev->mtu, new_mtu);
6641	if (netif_running(dev)) {
6642		s2io_stop_all_tx_queue(sp);
6643		s2io_card_down(sp);
6644		ret = s2io_card_up(sp);
6645		if (ret) {
6646			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6647				  __func__);
6648			return ret;
6649		}
6650		s2io_wake_all_tx_queue(sp);
6651	} else { /* Device is down */
6652		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6653		u64 val64 = new_mtu;
6654
6655		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6656	}
6657
6658	return ret;
6659}
6660
6661/**
6662 * s2io_set_link - Set the LInk status
6663 * @work: work struct containing a pointer to device private structure
6664 * Description: Sets the link status for the adapter
6665 */
6666
6667static void s2io_set_link(struct work_struct *work)
6668{
6669	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6670					    set_link_task);
6671	struct net_device *dev = nic->dev;
6672	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6673	register u64 val64;
6674	u16 subid;
6675
6676	rtnl_lock();
6677
6678	if (!netif_running(dev))
6679		goto out_unlock;
6680
6681	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6682		/* The card is being reset, no point doing anything */
6683		goto out_unlock;
6684	}
6685
6686	subid = nic->pdev->subsystem_device;
6687	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6688		/*
6689		 * Allow a small delay for the NICs self initiated
6690		 * cleanup to complete.
6691		 */
6692		msleep(100);
6693	}
6694
6695	val64 = readq(&bar0->adapter_status);
6696	if (LINK_IS_UP(val64)) {
6697		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6698			if (verify_xena_quiescence(nic)) {
6699				val64 = readq(&bar0->adapter_control);
6700				val64 |= ADAPTER_CNTL_EN;
6701				writeq(val64, &bar0->adapter_control);
6702				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6703					    nic->device_type, subid)) {
6704					val64 = readq(&bar0->gpio_control);
6705					val64 |= GPIO_CTRL_GPIO_0;
6706					writeq(val64, &bar0->gpio_control);
6707					val64 = readq(&bar0->gpio_control);
6708				} else {
6709					val64 |= ADAPTER_LED_ON;
6710					writeq(val64, &bar0->adapter_control);
6711				}
6712				nic->device_enabled_once = true;
6713			} else {
6714				DBG_PRINT(ERR_DBG,
6715					  "%s: Error: device is not Quiescent\n",
6716					  dev->name);
6717				s2io_stop_all_tx_queue(nic);
6718			}
6719		}
6720		val64 = readq(&bar0->adapter_control);
6721		val64 |= ADAPTER_LED_ON;
6722		writeq(val64, &bar0->adapter_control);
6723		s2io_link(nic, LINK_UP);
6724	} else {
6725		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6726						      subid)) {
6727			val64 = readq(&bar0->gpio_control);
6728			val64 &= ~GPIO_CTRL_GPIO_0;
6729			writeq(val64, &bar0->gpio_control);
6730			val64 = readq(&bar0->gpio_control);
6731		}
6732		/* turn off LED */
6733		val64 = readq(&bar0->adapter_control);
6734		val64 = val64 & (~ADAPTER_LED_ON);
6735		writeq(val64, &bar0->adapter_control);
6736		s2io_link(nic, LINK_DOWN);
6737	}
6738	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6739
6740out_unlock:
6741	rtnl_unlock();
6742}
6743
6744static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6745				  struct buffAdd *ba,
6746				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6747				  u64 *temp2, int size)
6748{
6749	struct net_device *dev = sp->dev;
6750	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6751
6752	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6753		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6754		/* allocate skb */
6755		if (*skb) {
6756			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6757			/*
6758			 * As Rx frame are not going to be processed,
6759			 * using same mapped address for the Rxd
6760			 * buffer pointer
6761			 */
6762			rxdp1->Buffer0_ptr = *temp0;
6763		} else {
6764			*skb = netdev_alloc_skb(dev, size);
6765			if (!(*skb)) {
6766				DBG_PRINT(INFO_DBG,
6767					  "%s: Out of memory to allocate %s\n",
6768					  dev->name, "1 buf mode SKBs");
6769				stats->mem_alloc_fail_cnt++;
6770				return -ENOMEM ;
6771			}
6772			stats->mem_allocated += (*skb)->truesize;
6773			/* storing the mapped addr in a temp variable
6774			 * such it will be used for next rxd whose
6775			 * Host Control is NULL
6776			 */
6777			rxdp1->Buffer0_ptr = *temp0 =
6778				dma_map_single(&sp->pdev->dev, (*skb)->data,
6779					       size - NET_IP_ALIGN,
6780					       DMA_FROM_DEVICE);
6781			if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6782				goto memalloc_failed;
6783			rxdp->Host_Control = (unsigned long) (*skb);
6784		}
6785	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6786		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6787		/* Two buffer Mode */
6788		if (*skb) {
6789			rxdp3->Buffer2_ptr = *temp2;
6790			rxdp3->Buffer0_ptr = *temp0;
6791			rxdp3->Buffer1_ptr = *temp1;
6792		} else {
6793			*skb = netdev_alloc_skb(dev, size);
6794			if (!(*skb)) {
6795				DBG_PRINT(INFO_DBG,
6796					  "%s: Out of memory to allocate %s\n",
6797					  dev->name,
6798					  "2 buf mode SKBs");
6799				stats->mem_alloc_fail_cnt++;
6800				return -ENOMEM;
6801			}
6802			stats->mem_allocated += (*skb)->truesize;
6803			rxdp3->Buffer2_ptr = *temp2 =
6804				dma_map_single(&sp->pdev->dev, (*skb)->data,
6805					       dev->mtu + 4, DMA_FROM_DEVICE);
6806			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6807				goto memalloc_failed;
6808			rxdp3->Buffer0_ptr = *temp0 =
6809				dma_map_single(&sp->pdev->dev, ba->ba_0,
6810					       BUF0_LEN, DMA_FROM_DEVICE);
6811			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6812				dma_unmap_single(&sp->pdev->dev,
6813						 (dma_addr_t)rxdp3->Buffer2_ptr,
6814						 dev->mtu + 4,
6815						 DMA_FROM_DEVICE);
6816				goto memalloc_failed;
6817			}
6818			rxdp->Host_Control = (unsigned long) (*skb);
6819
6820			/* Buffer-1 will be dummy buffer not used */
6821			rxdp3->Buffer1_ptr = *temp1 =
6822				dma_map_single(&sp->pdev->dev, ba->ba_1,
6823					       BUF1_LEN, DMA_FROM_DEVICE);
6824			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6825				dma_unmap_single(&sp->pdev->dev,
6826						 (dma_addr_t)rxdp3->Buffer0_ptr,
6827						 BUF0_LEN, DMA_FROM_DEVICE);
6828				dma_unmap_single(&sp->pdev->dev,
6829						 (dma_addr_t)rxdp3->Buffer2_ptr,
6830						 dev->mtu + 4,
6831						 DMA_FROM_DEVICE);
6832				goto memalloc_failed;
6833			}
6834		}
6835	}
6836	return 0;
6837
6838memalloc_failed:
6839	stats->pci_map_fail_cnt++;
6840	stats->mem_freed += (*skb)->truesize;
6841	dev_kfree_skb(*skb);
6842	return -ENOMEM;
6843}
6844
6845static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6846				int size)
6847{
6848	struct net_device *dev = sp->dev;
6849	if (sp->rxd_mode == RXD_MODE_1) {
6850		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6851	} else if (sp->rxd_mode == RXD_MODE_3B) {
6852		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6853		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6854		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6855	}
6856}
6857
6858static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6859{
6860	int i, j, k, blk_cnt = 0, size;
6861	struct config_param *config = &sp->config;
6862	struct mac_info *mac_control = &sp->mac_control;
6863	struct net_device *dev = sp->dev;
6864	struct RxD_t *rxdp = NULL;
6865	struct sk_buff *skb = NULL;
6866	struct buffAdd *ba = NULL;
6867	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6868
6869	/* Calculate the size based on ring mode */
6870	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6871		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6872	if (sp->rxd_mode == RXD_MODE_1)
6873		size += NET_IP_ALIGN;
6874	else if (sp->rxd_mode == RXD_MODE_3B)
6875		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6876
6877	for (i = 0; i < config->rx_ring_num; i++) {
6878		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6879		struct ring_info *ring = &mac_control->rings[i];
6880
6881		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6882
6883		for (j = 0; j < blk_cnt; j++) {
6884			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6885				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6886				if (sp->rxd_mode == RXD_MODE_3B)
6887					ba = &ring->ba[j][k];
6888				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6889							   &temp0_64,
6890							   &temp1_64,
6891							   &temp2_64,
6892							   size) == -ENOMEM) {
6893					return 0;
6894				}
6895
6896				set_rxd_buffer_size(sp, rxdp, size);
6897				dma_wmb();
6898				/* flip the Ownership bit to Hardware */
6899				rxdp->Control_1 |= RXD_OWN_XENA;
6900			}
6901		}
6902	}
6903	return 0;
6904
6905}
6906
6907static int s2io_add_isr(struct s2io_nic *sp)
6908{
6909	int ret = 0;
6910	struct net_device *dev = sp->dev;
6911	int err = 0;
6912
6913	if (sp->config.intr_type == MSI_X)
6914		ret = s2io_enable_msi_x(sp);
6915	if (ret) {
6916		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6917		sp->config.intr_type = INTA;
6918	}
6919
6920	/*
6921	 * Store the values of the MSIX table in
6922	 * the struct s2io_nic structure
6923	 */
6924	store_xmsi_data(sp);
6925
6926	/* After proper initialization of H/W, register ISR */
6927	if (sp->config.intr_type == MSI_X) {
6928		int i, msix_rx_cnt = 0;
6929
6930		for (i = 0; i < sp->num_entries; i++) {
6931			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6932				if (sp->s2io_entries[i].type ==
6933				    MSIX_RING_TYPE) {
6934					snprintf(sp->desc[i],
6935						sizeof(sp->desc[i]),
6936						"%s:MSI-X-%d-RX",
6937						dev->name, i);
6938					err = request_irq(sp->entries[i].vector,
6939							  s2io_msix_ring_handle,
6940							  0,
6941							  sp->desc[i],
6942							  sp->s2io_entries[i].arg);
6943				} else if (sp->s2io_entries[i].type ==
6944					   MSIX_ALARM_TYPE) {
6945					snprintf(sp->desc[i],
6946						sizeof(sp->desc[i]),
6947						"%s:MSI-X-%d-TX",
6948						dev->name, i);
6949					err = request_irq(sp->entries[i].vector,
6950							  s2io_msix_fifo_handle,
6951							  0,
6952							  sp->desc[i],
6953							  sp->s2io_entries[i].arg);
6954
6955				}
6956				/* if either data or addr is zero print it. */
6957				if (!(sp->msix_info[i].addr &&
6958				      sp->msix_info[i].data)) {
6959					DBG_PRINT(ERR_DBG,
6960						  "%s @Addr:0x%llx Data:0x%llx\n",
6961						  sp->desc[i],
6962						  (unsigned long long)
6963						  sp->msix_info[i].addr,
6964						  (unsigned long long)
6965						  ntohl(sp->msix_info[i].data));
6966				} else
6967					msix_rx_cnt++;
6968				if (err) {
6969					remove_msix_isr(sp);
6970
6971					DBG_PRINT(ERR_DBG,
6972						  "%s:MSI-X-%d registration "
6973						  "failed\n", dev->name, i);
6974
6975					DBG_PRINT(ERR_DBG,
6976						  "%s: Defaulting to INTA\n",
6977						  dev->name);
6978					sp->config.intr_type = INTA;
6979					break;
6980				}
6981				sp->s2io_entries[i].in_use =
6982					MSIX_REGISTERED_SUCCESS;
6983			}
6984		}
6985		if (!err) {
6986			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6987			DBG_PRINT(INFO_DBG,
6988				  "MSI-X-TX entries enabled through alarm vector\n");
6989		}
6990	}
6991	if (sp->config.intr_type == INTA) {
6992		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6993				  sp->name, dev);
6994		if (err) {
6995			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6996				  dev->name);
6997			return -1;
6998		}
6999	}
7000	return 0;
7001}
7002
7003static void s2io_rem_isr(struct s2io_nic *sp)
7004{
7005	if (sp->config.intr_type == MSI_X)
7006		remove_msix_isr(sp);
7007	else
7008		remove_inta_isr(sp);
7009}
7010
7011static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7012{
7013	int cnt = 0;
7014	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7015	register u64 val64 = 0;
7016	struct config_param *config;
7017	config = &sp->config;
7018
7019	if (!is_s2io_card_up(sp))
7020		return;
7021
7022	del_timer_sync(&sp->alarm_timer);
7023	/* If s2io_set_link task is executing, wait till it completes. */
7024	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7025		msleep(50);
7026	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7027
7028	/* Disable napi */
7029	if (sp->config.napi) {
7030		int off = 0;
7031		if (config->intr_type ==  MSI_X) {
7032			for (; off < sp->config.rx_ring_num; off++)
7033				napi_disable(&sp->mac_control.rings[off].napi);
7034		}
7035		else
7036			napi_disable(&sp->napi);
7037	}
7038
7039	/* disable Tx and Rx traffic on the NIC */
7040	if (do_io)
7041		stop_nic(sp);
7042
7043	s2io_rem_isr(sp);
7044
7045	/* stop the tx queue, indicate link down */
7046	s2io_link(sp, LINK_DOWN);
7047
7048	/* Check if the device is Quiescent and then Reset the NIC */
7049	while (do_io) {
7050		/* As per the HW requirement we need to replenish the
7051		 * receive buffer to avoid the ring bump. Since there is
7052		 * no intention of processing the Rx frame at this pointwe are
7053		 * just setting the ownership bit of rxd in Each Rx
7054		 * ring to HW and set the appropriate buffer size
7055		 * based on the ring mode
7056		 */
7057		rxd_owner_bit_reset(sp);
7058
7059		val64 = readq(&bar0->adapter_status);
7060		if (verify_xena_quiescence(sp)) {
7061			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7062				break;
7063		}
7064
7065		msleep(50);
7066		cnt++;
7067		if (cnt == 10) {
7068			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7069				  "adapter status reads 0x%llx\n",
7070				  (unsigned long long)val64);
7071			break;
7072		}
7073	}
7074	if (do_io)
7075		s2io_reset(sp);
7076
7077	/* Free all Tx buffers */
7078	free_tx_buffers(sp);
7079
7080	/* Free all Rx buffers */
7081	free_rx_buffers(sp);
7082
7083	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7084}
7085
7086static void s2io_card_down(struct s2io_nic *sp)
7087{
7088	do_s2io_card_down(sp, 1);
7089}
7090
7091static int s2io_card_up(struct s2io_nic *sp)
7092{
7093	int i, ret = 0;
7094	struct config_param *config;
7095	struct mac_info *mac_control;
7096	struct net_device *dev = sp->dev;
7097	u16 interruptible;
7098
7099	/* Initialize the H/W I/O registers */
7100	ret = init_nic(sp);
7101	if (ret != 0) {
7102		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7103			  dev->name);
7104		if (ret != -EIO)
7105			s2io_reset(sp);
7106		return ret;
7107	}
7108
7109	/*
7110	 * Initializing the Rx buffers. For now we are considering only 1
7111	 * Rx ring and initializing buffers into 30 Rx blocks
7112	 */
7113	config = &sp->config;
7114	mac_control = &sp->mac_control;
7115
7116	for (i = 0; i < config->rx_ring_num; i++) {
7117		struct ring_info *ring = &mac_control->rings[i];
7118
7119		ring->mtu = dev->mtu;
7120		ring->lro = !!(dev->features & NETIF_F_LRO);
7121		ret = fill_rx_buffers(sp, ring, 1);
7122		if (ret) {
7123			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7124				  dev->name);
7125			ret = -ENOMEM;
7126			goto err_fill_buff;
7127		}
7128		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7129			  ring->rx_bufs_left);
7130	}
7131
7132	/* Initialise napi */
7133	if (config->napi) {
7134		if (config->intr_type ==  MSI_X) {
7135			for (i = 0; i < sp->config.rx_ring_num; i++)
7136				napi_enable(&sp->mac_control.rings[i].napi);
7137		} else {
7138			napi_enable(&sp->napi);
7139		}
7140	}
7141
7142	/* Maintain the state prior to the open */
7143	if (sp->promisc_flg)
7144		sp->promisc_flg = 0;
7145	if (sp->m_cast_flg) {
7146		sp->m_cast_flg = 0;
7147		sp->all_multi_pos = 0;
7148	}
7149
7150	/* Setting its receive mode */
7151	s2io_set_multicast(dev, true);
7152
7153	if (dev->features & NETIF_F_LRO) {
7154		/* Initialize max aggregatable pkts per session based on MTU */
7155		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7156		/* Check if we can use (if specified) user provided value */
7157		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7158			sp->lro_max_aggr_per_sess = lro_max_pkts;
7159	}
7160
7161	/* Enable Rx Traffic and interrupts on the NIC */
7162	if (start_nic(sp)) {
7163		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7164		ret = -ENODEV;
7165		goto err_out;
7166	}
7167
7168	/* Add interrupt service routine */
7169	if (s2io_add_isr(sp) != 0) {
7170		if (sp->config.intr_type == MSI_X)
7171			s2io_rem_isr(sp);
7172		ret = -ENODEV;
7173		goto err_out;
7174	}
7175
7176	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7177	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7178
7179	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7180
7181	/*  Enable select interrupts */
7182	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7183	if (sp->config.intr_type != INTA) {
7184		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7185		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7186	} else {
7187		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7188		interruptible |= TX_PIC_INTR;
7189		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7190	}
7191
7192	return 0;
7193
7194err_out:
7195	if (config->napi) {
7196		if (config->intr_type == MSI_X) {
7197			for (i = 0; i < sp->config.rx_ring_num; i++)
7198				napi_disable(&sp->mac_control.rings[i].napi);
7199		} else {
7200			napi_disable(&sp->napi);
7201		}
7202	}
7203err_fill_buff:
7204	s2io_reset(sp);
7205	free_rx_buffers(sp);
7206	return ret;
7207}
7208
7209/**
7210 * s2io_restart_nic - Resets the NIC.
7211 * @work : work struct containing a pointer to the device private structure
7212 * Description:
7213 * This function is scheduled to be run by the s2io_tx_watchdog
7214 * function after 0.5 secs to reset the NIC. The idea is to reduce
7215 * the run time of the watch dog routine which is run holding a
7216 * spin lock.
7217 */
7218
7219static void s2io_restart_nic(struct work_struct *work)
7220{
7221	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7222	struct net_device *dev = sp->dev;
7223
7224	rtnl_lock();
7225
7226	if (!netif_running(dev))
7227		goto out_unlock;
7228
7229	s2io_card_down(sp);
7230	if (s2io_card_up(sp)) {
7231		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7232	}
7233	s2io_wake_all_tx_queue(sp);
7234	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7235out_unlock:
7236	rtnl_unlock();
7237}
7238
7239/**
7240 *  s2io_tx_watchdog - Watchdog for transmit side.
7241 *  @dev : Pointer to net device structure
7242 *  @txqueue: index of the hanging queue
7243 *  Description:
7244 *  This function is triggered if the Tx Queue is stopped
7245 *  for a pre-defined amount of time when the Interface is still up.
7246 *  If the Interface is jammed in such a situation, the hardware is
7247 *  reset (by s2io_close) and restarted again (by s2io_open) to
7248 *  overcome any problem that might have been caused in the hardware.
7249 *  Return value:
7250 *  void
7251 */
7252
7253static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7254{
7255	struct s2io_nic *sp = netdev_priv(dev);
7256	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7257
7258	if (netif_carrier_ok(dev)) {
7259		swstats->watchdog_timer_cnt++;
7260		schedule_work(&sp->rst_timer_task);
7261		swstats->soft_reset_cnt++;
7262	}
7263}
7264
7265/**
7266 *   rx_osm_handler - To perform some OS related operations on SKB.
7267 *   @ring_data : the ring from which this RxD was extracted.
7268 *   @rxdp: descriptor
7269 *   Description:
7270 *   This function is called by the Rx interrupt serivce routine to perform
7271 *   some OS related operations on the SKB before passing it to the upper
7272 *   layers. It mainly checks if the checksum is OK, if so adds it to the
7273 *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7274 *   to the upper layer. If the checksum is wrong, it increments the Rx
7275 *   packet error count, frees the SKB and returns error.
7276 *   Return value:
7277 *   SUCCESS on success and -1 on failure.
7278 */
7279static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7280{
7281	struct s2io_nic *sp = ring_data->nic;
7282	struct net_device *dev = ring_data->dev;
7283	struct sk_buff *skb = (struct sk_buff *)
7284		((unsigned long)rxdp->Host_Control);
7285	int ring_no = ring_data->ring_no;
7286	u16 l3_csum, l4_csum;
7287	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7288	struct lro *lro;
7289	u8 err_mask;
7290	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7291
7292	skb->dev = dev;
7293
7294	if (err) {
7295		/* Check for parity error */
7296		if (err & 0x1)
7297			swstats->parity_err_cnt++;
7298
7299		err_mask = err >> 48;
7300		switch (err_mask) {
7301		case 1:
7302			swstats->rx_parity_err_cnt++;
7303			break;
7304
7305		case 2:
7306			swstats->rx_abort_cnt++;
7307			break;
7308
7309		case 3:
7310			swstats->rx_parity_abort_cnt++;
7311			break;
7312
7313		case 4:
7314			swstats->rx_rda_fail_cnt++;
7315			break;
7316
7317		case 5:
7318			swstats->rx_unkn_prot_cnt++;
7319			break;
7320
7321		case 6:
7322			swstats->rx_fcs_err_cnt++;
7323			break;
7324
7325		case 7:
7326			swstats->rx_buf_size_err_cnt++;
7327			break;
7328
7329		case 8:
7330			swstats->rx_rxd_corrupt_cnt++;
7331			break;
7332
7333		case 15:
7334			swstats->rx_unkn_err_cnt++;
7335			break;
7336		}
7337		/*
7338		 * Drop the packet if bad transfer code. Exception being
7339		 * 0x5, which could be due to unsupported IPv6 extension header.
7340		 * In this case, we let stack handle the packet.
7341		 * Note that in this case, since checksum will be incorrect,
7342		 * stack will validate the same.
7343		 */
7344		if (err_mask != 0x5) {
7345			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7346				  dev->name, err_mask);
7347			dev->stats.rx_crc_errors++;
7348			swstats->mem_freed
7349				+= skb->truesize;
7350			dev_kfree_skb(skb);
7351			ring_data->rx_bufs_left -= 1;
7352			rxdp->Host_Control = 0;
7353			return 0;
7354		}
7355	}
7356
7357	rxdp->Host_Control = 0;
7358	if (sp->rxd_mode == RXD_MODE_1) {
7359		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7360
7361		skb_put(skb, len);
7362	} else if (sp->rxd_mode == RXD_MODE_3B) {
7363		int get_block = ring_data->rx_curr_get_info.block_index;
7364		int get_off = ring_data->rx_curr_get_info.offset;
7365		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7366		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7367
7368		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7369		skb_put_data(skb, ba->ba_0, buf0_len);
7370		skb_put(skb, buf2_len);
7371	}
7372
7373	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7374	    ((!ring_data->lro) ||
7375	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7376	    (dev->features & NETIF_F_RXCSUM)) {
7377		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7378		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7379		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7380			/*
7381			 * NIC verifies if the Checksum of the received
7382			 * frame is Ok or not and accordingly returns
7383			 * a flag in the RxD.
7384			 */
7385			skb->ip_summed = CHECKSUM_UNNECESSARY;
7386			if (ring_data->lro) {
7387				u32 tcp_len = 0;
7388				u8 *tcp;
7389				int ret = 0;
7390
7391				ret = s2io_club_tcp_session(ring_data,
7392							    skb->data, &tcp,
7393							    &tcp_len, &lro,
7394							    rxdp, sp);
7395				switch (ret) {
7396				case 3: /* Begin anew */
7397					lro->parent = skb;
7398					goto aggregate;
7399				case 1: /* Aggregate */
7400					lro_append_pkt(sp, lro, skb, tcp_len);
7401					goto aggregate;
7402				case 4: /* Flush session */
7403					lro_append_pkt(sp, lro, skb, tcp_len);
7404					queue_rx_frame(lro->parent,
7405						       lro->vlan_tag);
7406					clear_lro_session(lro);
7407					swstats->flush_max_pkts++;
7408					goto aggregate;
7409				case 2: /* Flush both */
7410					lro->parent->data_len = lro->frags_len;
7411					swstats->sending_both++;
7412					queue_rx_frame(lro->parent,
7413						       lro->vlan_tag);
7414					clear_lro_session(lro);
7415					goto send_up;
7416				case 0: /* sessions exceeded */
7417				case -1: /* non-TCP or not L2 aggregatable */
7418				case 5: /*
7419					 * First pkt in session not
7420					 * L3/L4 aggregatable
7421					 */
7422					break;
7423				default:
7424					DBG_PRINT(ERR_DBG,
7425						  "%s: Samadhana!!\n",
7426						  __func__);
7427					BUG();
7428				}
7429			}
7430		} else {
7431			/*
7432			 * Packet with erroneous checksum, let the
7433			 * upper layers deal with it.
7434			 */
7435			skb_checksum_none_assert(skb);
7436		}
7437	} else
7438		skb_checksum_none_assert(skb);
7439
7440	swstats->mem_freed += skb->truesize;
7441send_up:
7442	skb_record_rx_queue(skb, ring_no);
7443	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7444aggregate:
7445	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7446	return SUCCESS;
7447}
7448
7449/**
7450 *  s2io_link - stops/starts the Tx queue.
7451 *  @sp : private member of the device structure, which is a pointer to the
7452 *  s2io_nic structure.
7453 *  @link : inidicates whether link is UP/DOWN.
7454 *  Description:
7455 *  This function stops/starts the Tx queue depending on whether the link
7456 *  status of the NIC is down or up. This is called by the Alarm
7457 *  interrupt handler whenever a link change interrupt comes up.
7458 *  Return value:
7459 *  void.
7460 */
7461
7462static void s2io_link(struct s2io_nic *sp, int link)
7463{
7464	struct net_device *dev = sp->dev;
7465	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7466
7467	if (link != sp->last_link_state) {
7468		init_tti(sp, link, false);
7469		if (link == LINK_DOWN) {
7470			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7471			s2io_stop_all_tx_queue(sp);
7472			netif_carrier_off(dev);
7473			if (swstats->link_up_cnt)
7474				swstats->link_up_time =
7475					jiffies - sp->start_time;
7476			swstats->link_down_cnt++;
7477		} else {
7478			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7479			if (swstats->link_down_cnt)
7480				swstats->link_down_time =
7481					jiffies - sp->start_time;
7482			swstats->link_up_cnt++;
7483			netif_carrier_on(dev);
7484			s2io_wake_all_tx_queue(sp);
7485		}
7486	}
7487	sp->last_link_state = link;
7488	sp->start_time = jiffies;
7489}
7490
7491/**
7492 *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7493 *  @sp : private member of the device structure, which is a pointer to the
7494 *  s2io_nic structure.
7495 *  Description:
7496 *  This function initializes a few of the PCI and PCI-X configuration registers
7497 *  with recommended values.
7498 *  Return value:
7499 *  void
7500 */
7501
7502static void s2io_init_pci(struct s2io_nic *sp)
7503{
7504	u16 pci_cmd = 0, pcix_cmd = 0;
7505
7506	/* Enable Data Parity Error Recovery in PCI-X command register. */
7507	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7508			     &(pcix_cmd));
7509	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7510			      (pcix_cmd | 1));
7511	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7512			     &(pcix_cmd));
7513
7514	/* Set the PErr Response bit in PCI command register. */
7515	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7516	pci_write_config_word(sp->pdev, PCI_COMMAND,
7517			      (pci_cmd | PCI_COMMAND_PARITY));
7518	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7519}
7520
7521static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7522			    u8 *dev_multiq)
7523{
7524	int i;
7525
7526	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7527		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7528			  "(%d) not supported\n", tx_fifo_num);
7529
7530		if (tx_fifo_num < 1)
7531			tx_fifo_num = 1;
7532		else
7533			tx_fifo_num = MAX_TX_FIFOS;
7534
7535		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7536	}
7537
7538	if (multiq)
7539		*dev_multiq = multiq;
7540
7541	if (tx_steering_type && (1 == tx_fifo_num)) {
7542		if (tx_steering_type != TX_DEFAULT_STEERING)
7543			DBG_PRINT(ERR_DBG,
7544				  "Tx steering is not supported with "
7545				  "one fifo. Disabling Tx steering.\n");
7546		tx_steering_type = NO_STEERING;
7547	}
7548
7549	if ((tx_steering_type < NO_STEERING) ||
7550	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7551		DBG_PRINT(ERR_DBG,
7552			  "Requested transmit steering not supported\n");
7553		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7554		tx_steering_type = NO_STEERING;
7555	}
7556
7557	if (rx_ring_num > MAX_RX_RINGS) {
7558		DBG_PRINT(ERR_DBG,
7559			  "Requested number of rx rings not supported\n");
7560		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7561			  MAX_RX_RINGS);
7562		rx_ring_num = MAX_RX_RINGS;
7563	}
7564
7565	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7566		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7567			  "Defaulting to INTA\n");
7568		*dev_intr_type = INTA;
7569	}
7570
7571	if ((*dev_intr_type == MSI_X) &&
7572	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7573	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7574		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7575			  "Defaulting to INTA\n");
7576		*dev_intr_type = INTA;
7577	}
7578
7579	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7580		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7581		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7582		rx_ring_mode = 1;
7583	}
7584
7585	for (i = 0; i < MAX_RX_RINGS; i++)
7586		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7587			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7588				  "supported\nDefaulting to %d\n",
7589				  MAX_RX_BLOCKS_PER_RING);
7590			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7591		}
7592
7593	return SUCCESS;
7594}
7595
7596/**
7597 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7598 * @nic: device private variable
7599 * @ds_codepoint: data
7600 * @ring: ring index
7601 * Description: The function configures the receive steering to
7602 * desired receive ring.
7603 * Return Value:  SUCCESS on success and
7604 * '-1' on failure (endian settings incorrect).
7605 */
7606static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7607{
7608	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7609	register u64 val64 = 0;
7610
7611	if (ds_codepoint > 63)
7612		return FAILURE;
7613
7614	val64 = RTS_DS_MEM_DATA(ring);
7615	writeq(val64, &bar0->rts_ds_mem_data);
7616
7617	val64 = RTS_DS_MEM_CTRL_WE |
7618		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7619		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7620
7621	writeq(val64, &bar0->rts_ds_mem_ctrl);
7622
7623	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7624				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7625				     S2IO_BIT_RESET, true);
7626}
7627
7628static const struct net_device_ops s2io_netdev_ops = {
7629	.ndo_open	        = s2io_open,
7630	.ndo_stop	        = s2io_close,
7631	.ndo_get_stats	        = s2io_get_stats,
7632	.ndo_start_xmit    	= s2io_xmit,
7633	.ndo_validate_addr	= eth_validate_addr,
7634	.ndo_set_rx_mode	= s2io_ndo_set_multicast,
7635	.ndo_eth_ioctl		= s2io_ioctl,
7636	.ndo_set_mac_address    = s2io_set_mac_addr,
7637	.ndo_change_mtu	   	= s2io_change_mtu,
7638	.ndo_set_features	= s2io_set_features,
7639	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7640#ifdef CONFIG_NET_POLL_CONTROLLER
7641	.ndo_poll_controller    = s2io_netpoll,
7642#endif
7643};
7644
7645/**
7646 *  s2io_init_nic - Initialization of the adapter .
7647 *  @pdev : structure containing the PCI related information of the device.
7648 *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7649 *  Description:
7650 *  The function initializes an adapter identified by the pci_dec structure.
7651 *  All OS related initialization including memory and device structure and
7652 *  initlaization of the device private variable is done. Also the swapper
7653 *  control register is initialized to enable read and write into the I/O
7654 *  registers of the device.
7655 *  Return value:
7656 *  returns 0 on success and negative on failure.
7657 */
7658
7659static int
7660s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7661{
7662	struct s2io_nic *sp;
7663	struct net_device *dev;
7664	int i, j, ret;
7665	u32 mac_up, mac_down;
7666	u64 val64 = 0, tmp64 = 0;
7667	struct XENA_dev_config __iomem *bar0 = NULL;
7668	u16 subid;
7669	struct config_param *config;
7670	struct mac_info *mac_control;
7671	int mode;
7672	u8 dev_intr_type = intr_type;
7673	u8 dev_multiq = 0;
7674
7675	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7676	if (ret)
7677		return ret;
7678
7679	ret = pci_enable_device(pdev);
7680	if (ret) {
7681		DBG_PRINT(ERR_DBG,
7682			  "%s: pci_enable_device failed\n", __func__);
7683		return ret;
7684	}
7685
7686	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
7687		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7688	} else {
7689		pci_disable_device(pdev);
7690		return -ENOMEM;
7691	}
7692	ret = pci_request_regions(pdev, s2io_driver_name);
7693	if (ret) {
7694		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7695			  __func__, ret);
7696		pci_disable_device(pdev);
7697		return -ENODEV;
7698	}
7699	if (dev_multiq)
7700		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7701	else
7702		dev = alloc_etherdev(sizeof(struct s2io_nic));
7703	if (dev == NULL) {
7704		pci_disable_device(pdev);
7705		pci_release_regions(pdev);
7706		return -ENODEV;
7707	}
7708
7709	pci_set_master(pdev);
7710	pci_set_drvdata(pdev, dev);
7711	SET_NETDEV_DEV(dev, &pdev->dev);
7712
7713	/*  Private member variable initialized to s2io NIC structure */
7714	sp = netdev_priv(dev);
7715	sp->dev = dev;
7716	sp->pdev = pdev;
7717	sp->device_enabled_once = false;
7718	if (rx_ring_mode == 1)
7719		sp->rxd_mode = RXD_MODE_1;
7720	if (rx_ring_mode == 2)
7721		sp->rxd_mode = RXD_MODE_3B;
7722
7723	sp->config.intr_type = dev_intr_type;
7724
7725	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7726	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7727		sp->device_type = XFRAME_II_DEVICE;
7728	else
7729		sp->device_type = XFRAME_I_DEVICE;
7730
7731
7732	/* Initialize some PCI/PCI-X fields of the NIC. */
7733	s2io_init_pci(sp);
7734
7735	/*
7736	 * Setting the device configuration parameters.
7737	 * Most of these parameters can be specified by the user during
7738	 * module insertion as they are module loadable parameters. If
7739	 * these parameters are not specified during load time, they
7740	 * are initialized with default values.
7741	 */
7742	config = &sp->config;
7743	mac_control = &sp->mac_control;
7744
7745	config->napi = napi;
7746	config->tx_steering_type = tx_steering_type;
7747
7748	/* Tx side parameters. */
7749	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7750		config->tx_fifo_num = MAX_TX_FIFOS;
7751	else
7752		config->tx_fifo_num = tx_fifo_num;
7753
7754	/* Initialize the fifos used for tx steering */
7755	if (config->tx_fifo_num < 5) {
7756		if (config->tx_fifo_num  == 1)
7757			sp->total_tcp_fifos = 1;
7758		else
7759			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7760		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7761		sp->total_udp_fifos = 1;
7762		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7763	} else {
7764		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7765				       FIFO_OTHER_MAX_NUM);
7766		sp->udp_fifo_idx = sp->total_tcp_fifos;
7767		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7768		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7769	}
7770
7771	config->multiq = dev_multiq;
7772	for (i = 0; i < config->tx_fifo_num; i++) {
7773		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7774
7775		tx_cfg->fifo_len = tx_fifo_len[i];
7776		tx_cfg->fifo_priority = i;
7777	}
7778
7779	/* mapping the QoS priority to the configured fifos */
7780	for (i = 0; i < MAX_TX_FIFOS; i++)
7781		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7782
7783	/* map the hashing selector table to the configured fifos */
7784	for (i = 0; i < config->tx_fifo_num; i++)
7785		sp->fifo_selector[i] = fifo_selector[i];
7786
7787
7788	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7789	for (i = 0; i < config->tx_fifo_num; i++) {
7790		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7791
7792		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7793		if (tx_cfg->fifo_len < 65) {
7794			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7795			break;
7796		}
7797	}
7798	/* + 2 because one Txd for skb->data and one Txd for UFO */
7799	config->max_txds = MAX_SKB_FRAGS + 2;
7800
7801	/* Rx side parameters. */
7802	config->rx_ring_num = rx_ring_num;
7803	for (i = 0; i < config->rx_ring_num; i++) {
7804		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7805		struct ring_info *ring = &mac_control->rings[i];
7806
7807		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7808		rx_cfg->ring_priority = i;
7809		ring->rx_bufs_left = 0;
7810		ring->rxd_mode = sp->rxd_mode;
7811		ring->rxd_count = rxd_count[sp->rxd_mode];
7812		ring->pdev = sp->pdev;
7813		ring->dev = sp->dev;
7814	}
7815
7816	for (i = 0; i < rx_ring_num; i++) {
7817		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7818
7819		rx_cfg->ring_org = RING_ORG_BUFF1;
7820		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7821	}
7822
7823	/*  Setting Mac Control parameters */
7824	mac_control->rmac_pause_time = rmac_pause_time;
7825	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7826	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7827
7828
7829	/*  initialize the shared memory used by the NIC and the host */
7830	if (init_shared_mem(sp)) {
7831		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7832		ret = -ENOMEM;
7833		goto mem_alloc_failed;
7834	}
7835
7836	sp->bar0 = pci_ioremap_bar(pdev, 0);
7837	if (!sp->bar0) {
7838		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7839			  dev->name);
7840		ret = -ENOMEM;
7841		goto bar0_remap_failed;
7842	}
7843
7844	sp->bar1 = pci_ioremap_bar(pdev, 2);
7845	if (!sp->bar1) {
7846		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7847			  dev->name);
7848		ret = -ENOMEM;
7849		goto bar1_remap_failed;
7850	}
7851
7852	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7853	for (j = 0; j < MAX_TX_FIFOS; j++) {
7854		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7855	}
7856
7857	/*  Driver entry points */
7858	dev->netdev_ops = &s2io_netdev_ops;
7859	dev->ethtool_ops = &netdev_ethtool_ops;
7860	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7861		NETIF_F_TSO | NETIF_F_TSO6 |
7862		NETIF_F_RXCSUM | NETIF_F_LRO;
7863	dev->features |= dev->hw_features |
7864		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
7865		NETIF_F_HIGHDMA;
7866	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7867	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7868	INIT_WORK(&sp->set_link_task, s2io_set_link);
7869
7870	pci_save_state(sp->pdev);
7871
7872	/* Setting swapper control on the NIC, for proper reset operation */
7873	if (s2io_set_swapper(sp)) {
7874		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7875			  dev->name);
7876		ret = -EAGAIN;
7877		goto set_swap_failed;
7878	}
7879
7880	/* Verify if the Herc works on the slot its placed into */
7881	if (sp->device_type & XFRAME_II_DEVICE) {
7882		mode = s2io_verify_pci_mode(sp);
7883		if (mode < 0) {
7884			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7885				  __func__);
7886			ret = -EBADSLT;
7887			goto set_swap_failed;
7888		}
7889	}
7890
7891	if (sp->config.intr_type == MSI_X) {
7892		sp->num_entries = config->rx_ring_num + 1;
7893		ret = s2io_enable_msi_x(sp);
7894
7895		if (!ret) {
7896			ret = s2io_test_msi(sp);
7897			/* rollback MSI-X, will re-enable during add_isr() */
7898			remove_msix_isr(sp);
7899		}
7900		if (ret) {
7901
7902			DBG_PRINT(ERR_DBG,
7903				  "MSI-X requested but failed to enable\n");
7904			sp->config.intr_type = INTA;
7905		}
7906	}
7907
7908	if (config->intr_type ==  MSI_X) {
7909		for (i = 0; i < config->rx_ring_num ; i++) {
7910			struct ring_info *ring = &mac_control->rings[i];
7911
7912			netif_napi_add(dev, &ring->napi, s2io_poll_msix);
7913		}
7914	} else {
7915		netif_napi_add(dev, &sp->napi, s2io_poll_inta);
7916	}
7917
7918	/* Not needed for Herc */
7919	if (sp->device_type & XFRAME_I_DEVICE) {
7920		/*
7921		 * Fix for all "FFs" MAC address problems observed on
7922		 * Alpha platforms
7923		 */
7924		fix_mac_address(sp);
7925		s2io_reset(sp);
7926	}
7927
7928	/*
7929	 * MAC address initialization.
7930	 * For now only one mac address will be read and used.
7931	 */
7932	bar0 = sp->bar0;
7933	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7934		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7935	writeq(val64, &bar0->rmac_addr_cmd_mem);
7936	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7937			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7938			      S2IO_BIT_RESET, true);
7939	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7940	mac_down = (u32)tmp64;
7941	mac_up = (u32) (tmp64 >> 32);
7942
7943	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7944	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7945	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7946	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7947	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7948	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7949
7950	/*  Set the factory defined MAC address initially   */
7951	dev->addr_len = ETH_ALEN;
7952	eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
7953
7954	/* initialize number of multicast & unicast MAC entries variables */
7955	if (sp->device_type == XFRAME_I_DEVICE) {
7956		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7957		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7958		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7959	} else if (sp->device_type == XFRAME_II_DEVICE) {
7960		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7961		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7962		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7963	}
7964
7965	/* MTU range: 46 - 9600 */
7966	dev->min_mtu = MIN_MTU;
7967	dev->max_mtu = S2IO_JUMBO_SIZE;
7968
7969	/* store mac addresses from CAM to s2io_nic structure */
7970	do_s2io_store_unicast_mc(sp);
7971
7972	/* Configure MSIX vector for number of rings configured plus one */
7973	if ((sp->device_type == XFRAME_II_DEVICE) &&
7974	    (config->intr_type == MSI_X))
7975		sp->num_entries = config->rx_ring_num + 1;
7976
7977	/* Store the values of the MSIX table in the s2io_nic structure */
7978	store_xmsi_data(sp);
7979	/* reset Nic and bring it to known state */
7980	s2io_reset(sp);
7981
7982	/*
7983	 * Initialize link state flags
7984	 * and the card state parameter
7985	 */
7986	sp->state = 0;
7987
7988	/* Initialize spinlocks */
7989	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7990		struct fifo_info *fifo = &mac_control->fifos[i];
7991
7992		spin_lock_init(&fifo->tx_lock);
7993	}
7994
7995	/*
7996	 * SXE-002: Configure link and activity LED to init state
7997	 * on driver load.
7998	 */
7999	subid = sp->pdev->subsystem_device;
8000	if ((subid & 0xFF) >= 0x07) {
8001		val64 = readq(&bar0->gpio_control);
8002		val64 |= 0x0000800000000000ULL;
8003		writeq(val64, &bar0->gpio_control);
8004		val64 = 0x0411040400000000ULL;
8005		writeq(val64, (void __iomem *)bar0 + 0x2700);
8006		val64 = readq(&bar0->gpio_control);
8007	}
8008
8009	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8010
8011	if (register_netdev(dev)) {
8012		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8013		ret = -ENODEV;
8014		goto register_failed;
8015	}
8016	s2io_vpd_read(sp);
8017	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8018	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8019		  sp->product_name, pdev->revision);
8020	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8021		  s2io_driver_version);
8022	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8023	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8024	if (sp->device_type & XFRAME_II_DEVICE) {
8025		mode = s2io_print_pci_mode(sp);
8026		if (mode < 0) {
8027			ret = -EBADSLT;
8028			unregister_netdev(dev);
8029			goto set_swap_failed;
8030		}
8031	}
8032	switch (sp->rxd_mode) {
8033	case RXD_MODE_1:
8034		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8035			  dev->name);
8036		break;
8037	case RXD_MODE_3B:
8038		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8039			  dev->name);
8040		break;
8041	}
8042
8043	switch (sp->config.napi) {
8044	case 0:
8045		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8046		break;
8047	case 1:
8048		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8049		break;
8050	}
8051
8052	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8053		  sp->config.tx_fifo_num);
8054
8055	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8056		  sp->config.rx_ring_num);
8057
8058	switch (sp->config.intr_type) {
8059	case INTA:
8060		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8061		break;
8062	case MSI_X:
8063		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8064		break;
8065	}
8066	if (sp->config.multiq) {
8067		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8068			struct fifo_info *fifo = &mac_control->fifos[i];
8069
8070			fifo->multiq = config->multiq;
8071		}
8072		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8073			  dev->name);
8074	} else
8075		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8076			  dev->name);
8077
8078	switch (sp->config.tx_steering_type) {
8079	case NO_STEERING:
8080		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8081			  dev->name);
8082		break;
8083	case TX_PRIORITY_STEERING:
8084		DBG_PRINT(ERR_DBG,
8085			  "%s: Priority steering enabled for transmit\n",
8086			  dev->name);
8087		break;
8088	case TX_DEFAULT_STEERING:
8089		DBG_PRINT(ERR_DBG,
8090			  "%s: Default steering enabled for transmit\n",
8091			  dev->name);
8092	}
8093
8094	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8095		  dev->name);
8096	/* Initialize device name */
8097	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8098		 sp->product_name);
8099
8100	if (vlan_tag_strip)
8101		sp->vlan_strip_flag = 1;
8102	else
8103		sp->vlan_strip_flag = 0;
8104
8105	/*
8106	 * Make Link state as off at this point, when the Link change
8107	 * interrupt comes the state will be automatically changed to
8108	 * the right state.
8109	 */
8110	netif_carrier_off(dev);
8111
8112	return 0;
8113
8114register_failed:
8115set_swap_failed:
8116	iounmap(sp->bar1);
8117bar1_remap_failed:
8118	iounmap(sp->bar0);
8119bar0_remap_failed:
8120mem_alloc_failed:
8121	free_shared_mem(sp);
8122	pci_disable_device(pdev);
8123	pci_release_regions(pdev);
8124	free_netdev(dev);
8125
8126	return ret;
8127}
8128
8129/**
8130 * s2io_rem_nic - Free the PCI device
8131 * @pdev: structure containing the PCI related information of the device.
8132 * Description: This function is called by the Pci subsystem to release a
8133 * PCI device and free up all resource held up by the device. This could
8134 * be in response to a Hot plug event or when the driver is to be removed
8135 * from memory.
8136 */
8137
8138static void s2io_rem_nic(struct pci_dev *pdev)
8139{
8140	struct net_device *dev = pci_get_drvdata(pdev);
8141	struct s2io_nic *sp;
8142
8143	if (dev == NULL) {
8144		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8145		return;
8146	}
8147
8148	sp = netdev_priv(dev);
8149
8150	cancel_work_sync(&sp->rst_timer_task);
8151	cancel_work_sync(&sp->set_link_task);
8152
8153	unregister_netdev(dev);
8154
8155	free_shared_mem(sp);
8156	iounmap(sp->bar0);
8157	iounmap(sp->bar1);
8158	pci_release_regions(pdev);
8159	free_netdev(dev);
8160	pci_disable_device(pdev);
8161}
8162
8163module_pci_driver(s2io_driver);
8164
8165static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8166				struct tcphdr **tcp, struct RxD_t *rxdp,
8167				struct s2io_nic *sp)
8168{
8169	int ip_off;
8170	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8171
8172	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8173		DBG_PRINT(INIT_DBG,
8174			  "%s: Non-TCP frames not supported for LRO\n",
8175			  __func__);
8176		return -1;
8177	}
8178
8179	/* Checking for DIX type or DIX type with VLAN */
8180	if ((l2_type == 0) || (l2_type == 4)) {
8181		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8182		/*
8183		 * If vlan stripping is disabled and the frame is VLAN tagged,
8184		 * shift the offset by the VLAN header size bytes.
8185		 */
8186		if ((!sp->vlan_strip_flag) &&
8187		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8188			ip_off += HEADER_VLAN_SIZE;
8189	} else {
8190		/* LLC, SNAP etc are considered non-mergeable */
8191		return -1;
8192	}
8193
8194	*ip = (struct iphdr *)(buffer + ip_off);
8195	ip_len = (u8)((*ip)->ihl);
8196	ip_len <<= 2;
8197	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8198
8199	return 0;
8200}
8201
8202static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8203				  struct tcphdr *tcp)
8204{
8205	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8206	if ((lro->iph->saddr != ip->saddr) ||
8207	    (lro->iph->daddr != ip->daddr) ||
8208	    (lro->tcph->source != tcp->source) ||
8209	    (lro->tcph->dest != tcp->dest))
8210		return -1;
8211	return 0;
8212}
8213
8214static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8215{
8216	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8217}
8218
8219static void initiate_new_session(struct lro *lro, u8 *l2h,
8220				 struct iphdr *ip, struct tcphdr *tcp,
8221				 u32 tcp_pyld_len, u16 vlan_tag)
8222{
8223	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8224	lro->l2h = l2h;
8225	lro->iph = ip;
8226	lro->tcph = tcp;
8227	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8228	lro->tcp_ack = tcp->ack_seq;
8229	lro->sg_num = 1;
8230	lro->total_len = ntohs(ip->tot_len);
8231	lro->frags_len = 0;
8232	lro->vlan_tag = vlan_tag;
8233	/*
8234	 * Check if we saw TCP timestamp.
8235	 * Other consistency checks have already been done.
8236	 */
8237	if (tcp->doff == 8) {
8238		__be32 *ptr;
8239		ptr = (__be32 *)(tcp+1);
8240		lro->saw_ts = 1;
8241		lro->cur_tsval = ntohl(*(ptr+1));
8242		lro->cur_tsecr = *(ptr+2);
8243	}
8244	lro->in_use = 1;
8245}
8246
8247static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8248{
8249	struct iphdr *ip = lro->iph;
8250	struct tcphdr *tcp = lro->tcph;
8251	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8252
8253	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8254
8255	/* Update L3 header */
8256	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8257	ip->tot_len = htons(lro->total_len);
8258
8259	/* Update L4 header */
8260	tcp->ack_seq = lro->tcp_ack;
8261	tcp->window = lro->window;
8262
8263	/* Update tsecr field if this session has timestamps enabled */
8264	if (lro->saw_ts) {
8265		__be32 *ptr = (__be32 *)(tcp + 1);
8266		*(ptr+2) = lro->cur_tsecr;
8267	}
8268
8269	/* Update counters required for calculation of
8270	 * average no. of packets aggregated.
8271	 */
8272	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8273	swstats->num_aggregations++;
8274}
8275
8276static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8277			     struct tcphdr *tcp, u32 l4_pyld)
8278{
8279	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8280	lro->total_len += l4_pyld;
8281	lro->frags_len += l4_pyld;
8282	lro->tcp_next_seq += l4_pyld;
8283	lro->sg_num++;
8284
8285	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8286	lro->tcp_ack = tcp->ack_seq;
8287	lro->window = tcp->window;
8288
8289	if (lro->saw_ts) {
8290		__be32 *ptr;
8291		/* Update tsecr and tsval from this packet */
8292		ptr = (__be32 *)(tcp+1);
8293		lro->cur_tsval = ntohl(*(ptr+1));
8294		lro->cur_tsecr = *(ptr + 2);
8295	}
8296}
8297
8298static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8299				    struct tcphdr *tcp, u32 tcp_pyld_len)
8300{
8301	u8 *ptr;
8302
8303	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8304
8305	if (!tcp_pyld_len) {
8306		/* Runt frame or a pure ack */
8307		return -1;
8308	}
8309
8310	if (ip->ihl != 5) /* IP has options */
8311		return -1;
8312
8313	/* If we see CE codepoint in IP header, packet is not mergeable */
8314	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8315		return -1;
8316
8317	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8318	if (tcp->urg || tcp->psh || tcp->rst ||
8319	    tcp->syn || tcp->fin ||
8320	    tcp->ece || tcp->cwr || !tcp->ack) {
8321		/*
8322		 * Currently recognize only the ack control word and
8323		 * any other control field being set would result in
8324		 * flushing the LRO session
8325		 */
8326		return -1;
8327	}
8328
8329	/*
8330	 * Allow only one TCP timestamp option. Don't aggregate if
8331	 * any other options are detected.
8332	 */
8333	if (tcp->doff != 5 && tcp->doff != 8)
8334		return -1;
8335
8336	if (tcp->doff == 8) {
8337		ptr = (u8 *)(tcp + 1);
8338		while (*ptr == TCPOPT_NOP)
8339			ptr++;
8340		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8341			return -1;
8342
8343		/* Ensure timestamp value increases monotonically */
8344		if (l_lro)
8345			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8346				return -1;
8347
8348		/* timestamp echo reply should be non-zero */
8349		if (*((__be32 *)(ptr+6)) == 0)
8350			return -1;
8351	}
8352
8353	return 0;
8354}
8355
8356static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8357				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8358				 struct RxD_t *rxdp, struct s2io_nic *sp)
8359{
8360	struct iphdr *ip;
8361	struct tcphdr *tcph;
8362	int ret = 0, i;
8363	u16 vlan_tag = 0;
8364	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8365
8366	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8367				   rxdp, sp);
8368	if (ret)
8369		return ret;
8370
8371	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8372
8373	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8374	tcph = (struct tcphdr *)*tcp;
8375	*tcp_len = get_l4_pyld_length(ip, tcph);
8376	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8377		struct lro *l_lro = &ring_data->lro0_n[i];
8378		if (l_lro->in_use) {
8379			if (check_for_socket_match(l_lro, ip, tcph))
8380				continue;
8381			/* Sock pair matched */
8382			*lro = l_lro;
8383
8384			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8385				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8386					  "expected 0x%x, actual 0x%x\n",
8387					  __func__,
8388					  (*lro)->tcp_next_seq,
8389					  ntohl(tcph->seq));
8390
8391				swstats->outof_sequence_pkts++;
8392				ret = 2;
8393				break;
8394			}
8395
8396			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8397						      *tcp_len))
8398				ret = 1; /* Aggregate */
8399			else
8400				ret = 2; /* Flush both */
8401			break;
8402		}
8403	}
8404
8405	if (ret == 0) {
8406		/* Before searching for available LRO objects,
8407		 * check if the pkt is L3/L4 aggregatable. If not
8408		 * don't create new LRO session. Just send this
8409		 * packet up.
8410		 */
8411		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8412			return 5;
8413
8414		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8415			struct lro *l_lro = &ring_data->lro0_n[i];
8416			if (!(l_lro->in_use)) {
8417				*lro = l_lro;
8418				ret = 3; /* Begin anew */
8419				break;
8420			}
8421		}
8422	}
8423
8424	if (ret == 0) { /* sessions exceeded */
8425		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8426			  __func__);
8427		*lro = NULL;
8428		return ret;
8429	}
8430
8431	switch (ret) {
8432	case 3:
8433		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8434				     vlan_tag);
8435		break;
8436	case 2:
8437		update_L3L4_header(sp, *lro);
8438		break;
8439	case 1:
8440		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8441		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8442			update_L3L4_header(sp, *lro);
8443			ret = 4; /* Flush the LRO */
8444		}
8445		break;
8446	default:
8447		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8448		break;
8449	}
8450
8451	return ret;
8452}
8453
8454static void clear_lro_session(struct lro *lro)
8455{
8456	static u16 lro_struct_size = sizeof(struct lro);
8457
8458	memset(lro, 0, lro_struct_size);
8459}
8460
8461static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8462{
8463	struct net_device *dev = skb->dev;
8464	struct s2io_nic *sp = netdev_priv(dev);
8465
8466	skb->protocol = eth_type_trans(skb, dev);
8467	if (vlan_tag && sp->vlan_strip_flag)
8468		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8469	if (sp->config.napi)
8470		netif_receive_skb(skb);
8471	else
8472		netif_rx(skb);
8473}
8474
8475static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8476			   struct sk_buff *skb, u32 tcp_len)
8477{
8478	struct sk_buff *first = lro->parent;
8479	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8480
8481	first->len += tcp_len;
8482	first->data_len = lro->frags_len;
8483	skb_pull(skb, (skb->len - tcp_len));
8484	if (skb_shinfo(first)->frag_list)
8485		lro->last_frag->next = skb;
8486	else
8487		skb_shinfo(first)->frag_list = skb;
8488	first->truesize += skb->truesize;
8489	lro->last_frag = skb;
8490	swstats->clubbed_frms_cnt++;
8491}
8492
8493/**
8494 * s2io_io_error_detected - called when PCI error is detected
8495 * @pdev: Pointer to PCI device
8496 * @state: The current pci connection state
8497 *
8498 * This function is called after a PCI bus error affecting
8499 * this device has been detected.
8500 */
8501static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8502					       pci_channel_state_t state)
8503{
8504	struct net_device *netdev = pci_get_drvdata(pdev);
8505	struct s2io_nic *sp = netdev_priv(netdev);
8506
8507	netif_device_detach(netdev);
8508
8509	if (state == pci_channel_io_perm_failure)
8510		return PCI_ERS_RESULT_DISCONNECT;
8511
8512	if (netif_running(netdev)) {
8513		/* Bring down the card, while avoiding PCI I/O */
8514		do_s2io_card_down(sp, 0);
8515	}
8516	pci_disable_device(pdev);
8517
8518	return PCI_ERS_RESULT_NEED_RESET;
8519}
8520
8521/**
8522 * s2io_io_slot_reset - called after the pci bus has been reset.
8523 * @pdev: Pointer to PCI device
8524 *
8525 * Restart the card from scratch, as if from a cold-boot.
8526 * At this point, the card has experienced a hard reset,
8527 * followed by fixups by BIOS, and has its config space
8528 * set up identically to what it was at cold boot.
8529 */
8530static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8531{
8532	struct net_device *netdev = pci_get_drvdata(pdev);
8533	struct s2io_nic *sp = netdev_priv(netdev);
8534
8535	if (pci_enable_device(pdev)) {
8536		pr_err("Cannot re-enable PCI device after reset.\n");
8537		return PCI_ERS_RESULT_DISCONNECT;
8538	}
8539
8540	pci_set_master(pdev);
8541	s2io_reset(sp);
8542
8543	return PCI_ERS_RESULT_RECOVERED;
8544}
8545
8546/**
8547 * s2io_io_resume - called when traffic can start flowing again.
8548 * @pdev: Pointer to PCI device
8549 *
8550 * This callback is called when the error recovery driver tells
8551 * us that its OK to resume normal operation.
8552 */
8553static void s2io_io_resume(struct pci_dev *pdev)
8554{
8555	struct net_device *netdev = pci_get_drvdata(pdev);
8556	struct s2io_nic *sp = netdev_priv(netdev);
8557
8558	if (netif_running(netdev)) {
8559		if (s2io_card_up(sp)) {
8560			pr_err("Can't bring device back up after reset.\n");
8561			return;
8562		}
8563
8564		if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8565			s2io_card_down(sp);
8566			pr_err("Can't restore mac addr after reset.\n");
8567			return;
8568		}
8569	}
8570
8571	netif_device_attach(netdev);
8572	netif_tx_wake_all_queues(netdev);
8573}
v6.2
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik		: For pointing out the improper error condition
  15 *			  check in the s2io_xmit routine and also some
  16 *			  issues in the Tx watch dog function. Also for
  17 *			  patiently answering all those innumerable
  18 *			  questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
  20 *			  macros available only in 2.6 Kernel.
  21 * Francois Romieu	: For pointing out all code part that were
  22 *			  deprecated and also styling related comments.
  23 * Grant Grundler	: For helping me get rid of some Architecture
  24 *			  dependent code.
  25 * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *		values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
  45 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  46 *                 Possible values '1' for enable , '0' for disable.
  47 *                 Default is '2' - which means disable in promisc mode
  48 *                 and enable in non-promiscuous mode.
  49 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  50 *      Possible values '1' for enable and '0' for disable. Default is '0'
  51 ************************************************************************/
  52
  53#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  54
  55#include <linux/module.h>
  56#include <linux/types.h>
  57#include <linux/errno.h>
  58#include <linux/ioport.h>
  59#include <linux/pci.h>
  60#include <linux/dma-mapping.h>
  61#include <linux/kernel.h>
  62#include <linux/netdevice.h>
  63#include <linux/etherdevice.h>
  64#include <linux/mdio.h>
  65#include <linux/skbuff.h>
  66#include <linux/init.h>
  67#include <linux/delay.h>
  68#include <linux/stddef.h>
  69#include <linux/ioctl.h>
  70#include <linux/timex.h>
  71#include <linux/ethtool.h>
  72#include <linux/workqueue.h>
  73#include <linux/if_vlan.h>
  74#include <linux/ip.h>
  75#include <linux/tcp.h>
  76#include <linux/uaccess.h>
  77#include <linux/io.h>
  78#include <linux/io-64-nonatomic-lo-hi.h>
  79#include <linux/slab.h>
  80#include <linux/prefetch.h>
  81#include <net/tcp.h>
  82#include <net/checksum.h>
  83
  84#include <asm/div64.h>
  85#include <asm/irq.h>
  86
  87/* local include */
  88#include "s2io.h"
  89#include "s2io-regs.h"
  90
  91#define DRV_VERSION "2.0.26.28"
  92
  93/* S2io Driver name & version. */
  94static const char s2io_driver_name[] = "Neterion";
  95static const char s2io_driver_version[] = DRV_VERSION;
  96
  97static const int rxd_size[2] = {32, 48};
  98static const int rxd_count[2] = {127, 85};
  99
 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 101{
 102	int ret;
 103
 104	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 105	       (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 106
 107	return ret;
 108}
 109
 110/*
 111 * Cards with following subsystem_id have a link state indication
 112 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 113 * macro below identifies these cards given the subsystem_id.
 114 */
 115#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)		\
 116	(dev_type == XFRAME_I_DEVICE) ?					\
 117	((((subid >= 0x600B) && (subid <= 0x600D)) ||			\
 118	  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 119
 120#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 121				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 122
 123static inline int is_s2io_card_up(const struct s2io_nic *sp)
 124{
 125	return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 126}
 127
 128/* Ethtool related variables and Macros. */
 129static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 130	"Register test\t(offline)",
 131	"Eeprom test\t(offline)",
 132	"Link test\t(online)",
 133	"RLDRAM test\t(offline)",
 134	"BIST Test\t(offline)"
 135};
 136
 137static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 138	{"tmac_frms"},
 139	{"tmac_data_octets"},
 140	{"tmac_drop_frms"},
 141	{"tmac_mcst_frms"},
 142	{"tmac_bcst_frms"},
 143	{"tmac_pause_ctrl_frms"},
 144	{"tmac_ttl_octets"},
 145	{"tmac_ucst_frms"},
 146	{"tmac_nucst_frms"},
 147	{"tmac_any_err_frms"},
 148	{"tmac_ttl_less_fb_octets"},
 149	{"tmac_vld_ip_octets"},
 150	{"tmac_vld_ip"},
 151	{"tmac_drop_ip"},
 152	{"tmac_icmp"},
 153	{"tmac_rst_tcp"},
 154	{"tmac_tcp"},
 155	{"tmac_udp"},
 156	{"rmac_vld_frms"},
 157	{"rmac_data_octets"},
 158	{"rmac_fcs_err_frms"},
 159	{"rmac_drop_frms"},
 160	{"rmac_vld_mcst_frms"},
 161	{"rmac_vld_bcst_frms"},
 162	{"rmac_in_rng_len_err_frms"},
 163	{"rmac_out_rng_len_err_frms"},
 164	{"rmac_long_frms"},
 165	{"rmac_pause_ctrl_frms"},
 166	{"rmac_unsup_ctrl_frms"},
 167	{"rmac_ttl_octets"},
 168	{"rmac_accepted_ucst_frms"},
 169	{"rmac_accepted_nucst_frms"},
 170	{"rmac_discarded_frms"},
 171	{"rmac_drop_events"},
 172	{"rmac_ttl_less_fb_octets"},
 173	{"rmac_ttl_frms"},
 174	{"rmac_usized_frms"},
 175	{"rmac_osized_frms"},
 176	{"rmac_frag_frms"},
 177	{"rmac_jabber_frms"},
 178	{"rmac_ttl_64_frms"},
 179	{"rmac_ttl_65_127_frms"},
 180	{"rmac_ttl_128_255_frms"},
 181	{"rmac_ttl_256_511_frms"},
 182	{"rmac_ttl_512_1023_frms"},
 183	{"rmac_ttl_1024_1518_frms"},
 184	{"rmac_ip"},
 185	{"rmac_ip_octets"},
 186	{"rmac_hdr_err_ip"},
 187	{"rmac_drop_ip"},
 188	{"rmac_icmp"},
 189	{"rmac_tcp"},
 190	{"rmac_udp"},
 191	{"rmac_err_drp_udp"},
 192	{"rmac_xgmii_err_sym"},
 193	{"rmac_frms_q0"},
 194	{"rmac_frms_q1"},
 195	{"rmac_frms_q2"},
 196	{"rmac_frms_q3"},
 197	{"rmac_frms_q4"},
 198	{"rmac_frms_q5"},
 199	{"rmac_frms_q6"},
 200	{"rmac_frms_q7"},
 201	{"rmac_full_q0"},
 202	{"rmac_full_q1"},
 203	{"rmac_full_q2"},
 204	{"rmac_full_q3"},
 205	{"rmac_full_q4"},
 206	{"rmac_full_q5"},
 207	{"rmac_full_q6"},
 208	{"rmac_full_q7"},
 209	{"rmac_pause_cnt"},
 210	{"rmac_xgmii_data_err_cnt"},
 211	{"rmac_xgmii_ctrl_err_cnt"},
 212	{"rmac_accepted_ip"},
 213	{"rmac_err_tcp"},
 214	{"rd_req_cnt"},
 215	{"new_rd_req_cnt"},
 216	{"new_rd_req_rtry_cnt"},
 217	{"rd_rtry_cnt"},
 218	{"wr_rtry_rd_ack_cnt"},
 219	{"wr_req_cnt"},
 220	{"new_wr_req_cnt"},
 221	{"new_wr_req_rtry_cnt"},
 222	{"wr_rtry_cnt"},
 223	{"wr_disc_cnt"},
 224	{"rd_rtry_wr_ack_cnt"},
 225	{"txp_wr_cnt"},
 226	{"txd_rd_cnt"},
 227	{"txd_wr_cnt"},
 228	{"rxd_rd_cnt"},
 229	{"rxd_wr_cnt"},
 230	{"txf_rd_cnt"},
 231	{"rxf_wr_cnt"}
 232};
 233
 234static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 235	{"rmac_ttl_1519_4095_frms"},
 236	{"rmac_ttl_4096_8191_frms"},
 237	{"rmac_ttl_8192_max_frms"},
 238	{"rmac_ttl_gt_max_frms"},
 239	{"rmac_osized_alt_frms"},
 240	{"rmac_jabber_alt_frms"},
 241	{"rmac_gt_max_alt_frms"},
 242	{"rmac_vlan_frms"},
 243	{"rmac_len_discard"},
 244	{"rmac_fcs_discard"},
 245	{"rmac_pf_discard"},
 246	{"rmac_da_discard"},
 247	{"rmac_red_discard"},
 248	{"rmac_rts_discard"},
 249	{"rmac_ingm_full_discard"},
 250	{"link_fault_cnt"}
 251};
 252
 253static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 254	{"\n DRIVER STATISTICS"},
 255	{"single_bit_ecc_errs"},
 256	{"double_bit_ecc_errs"},
 257	{"parity_err_cnt"},
 258	{"serious_err_cnt"},
 259	{"soft_reset_cnt"},
 260	{"fifo_full_cnt"},
 261	{"ring_0_full_cnt"},
 262	{"ring_1_full_cnt"},
 263	{"ring_2_full_cnt"},
 264	{"ring_3_full_cnt"},
 265	{"ring_4_full_cnt"},
 266	{"ring_5_full_cnt"},
 267	{"ring_6_full_cnt"},
 268	{"ring_7_full_cnt"},
 269	{"alarm_transceiver_temp_high"},
 270	{"alarm_transceiver_temp_low"},
 271	{"alarm_laser_bias_current_high"},
 272	{"alarm_laser_bias_current_low"},
 273	{"alarm_laser_output_power_high"},
 274	{"alarm_laser_output_power_low"},
 275	{"warn_transceiver_temp_high"},
 276	{"warn_transceiver_temp_low"},
 277	{"warn_laser_bias_current_high"},
 278	{"warn_laser_bias_current_low"},
 279	{"warn_laser_output_power_high"},
 280	{"warn_laser_output_power_low"},
 281	{"lro_aggregated_pkts"},
 282	{"lro_flush_both_count"},
 283	{"lro_out_of_sequence_pkts"},
 284	{"lro_flush_due_to_max_pkts"},
 285	{"lro_avg_aggr_pkts"},
 286	{"mem_alloc_fail_cnt"},
 287	{"pci_map_fail_cnt"},
 288	{"watchdog_timer_cnt"},
 289	{"mem_allocated"},
 290	{"mem_freed"},
 291	{"link_up_cnt"},
 292	{"link_down_cnt"},
 293	{"link_up_time"},
 294	{"link_down_time"},
 295	{"tx_tcode_buf_abort_cnt"},
 296	{"tx_tcode_desc_abort_cnt"},
 297	{"tx_tcode_parity_err_cnt"},
 298	{"tx_tcode_link_loss_cnt"},
 299	{"tx_tcode_list_proc_err_cnt"},
 300	{"rx_tcode_parity_err_cnt"},
 301	{"rx_tcode_abort_cnt"},
 302	{"rx_tcode_parity_abort_cnt"},
 303	{"rx_tcode_rda_fail_cnt"},
 304	{"rx_tcode_unkn_prot_cnt"},
 305	{"rx_tcode_fcs_err_cnt"},
 306	{"rx_tcode_buf_size_err_cnt"},
 307	{"rx_tcode_rxd_corrupt_cnt"},
 308	{"rx_tcode_unkn_err_cnt"},
 309	{"tda_err_cnt"},
 310	{"pfc_err_cnt"},
 311	{"pcc_err_cnt"},
 312	{"tti_err_cnt"},
 313	{"tpa_err_cnt"},
 314	{"sm_err_cnt"},
 315	{"lso_err_cnt"},
 316	{"mac_tmac_err_cnt"},
 317	{"mac_rmac_err_cnt"},
 318	{"xgxs_txgxs_err_cnt"},
 319	{"xgxs_rxgxs_err_cnt"},
 320	{"rc_err_cnt"},
 321	{"prc_pcix_err_cnt"},
 322	{"rpa_err_cnt"},
 323	{"rda_err_cnt"},
 324	{"rti_err_cnt"},
 325	{"mc_err_cnt"}
 326};
 327
 328#define S2IO_XENA_STAT_LEN	ARRAY_SIZE(ethtool_xena_stats_keys)
 329#define S2IO_ENHANCED_STAT_LEN	ARRAY_SIZE(ethtool_enhanced_stats_keys)
 330#define S2IO_DRIVER_STAT_LEN	ARRAY_SIZE(ethtool_driver_stats_keys)
 331
 332#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 333#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 334
 335#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 336#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 337
 338#define S2IO_TEST_LEN	ARRAY_SIZE(s2io_gstrings)
 339#define S2IO_STRINGS_LEN	(S2IO_TEST_LEN * ETH_GSTRING_LEN)
 340
 341/* copy mac addr to def_mac_addr array */
 342static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 343{
 344	sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 345	sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 346	sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 347	sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 348	sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 349	sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 350}
 351
 352/*
 353 * Constants to be programmed into the Xena's registers, to configure
 354 * the XAUI.
 355 */
 356
 357#define	END_SIGN	0x0
 358static const u64 herc_act_dtx_cfg[] = {
 359	/* Set address */
 360	0x8000051536750000ULL, 0x80000515367500E0ULL,
 361	/* Write data */
 362	0x8000051536750004ULL, 0x80000515367500E4ULL,
 363	/* Set address */
 364	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 365	/* Write data */
 366	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 367	/* Set address */
 368	0x801205150D440000ULL, 0x801205150D4400E0ULL,
 369	/* Write data */
 370	0x801205150D440004ULL, 0x801205150D4400E4ULL,
 371	/* Set address */
 372	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 373	/* Write data */
 374	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 375	/* Done */
 376	END_SIGN
 377};
 378
 379static const u64 xena_dtx_cfg[] = {
 380	/* Set address */
 381	0x8000051500000000ULL, 0x80000515000000E0ULL,
 382	/* Write data */
 383	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 384	/* Set address */
 385	0x8001051500000000ULL, 0x80010515000000E0ULL,
 386	/* Write data */
 387	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 388	/* Set address */
 389	0x8002051500000000ULL, 0x80020515000000E0ULL,
 390	/* Write data */
 391	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 392	END_SIGN
 393};
 394
 395/*
 396 * Constants for Fixing the MacAddress problem seen mostly on
 397 * Alpha machines.
 398 */
 399static const u64 fix_mac[] = {
 400	0x0060000000000000ULL, 0x0060600000000000ULL,
 401	0x0040600000000000ULL, 0x0000600000000000ULL,
 402	0x0020600000000000ULL, 0x0060600000000000ULL,
 403	0x0020600000000000ULL, 0x0060600000000000ULL,
 404	0x0020600000000000ULL, 0x0060600000000000ULL,
 405	0x0020600000000000ULL, 0x0060600000000000ULL,
 406	0x0020600000000000ULL, 0x0060600000000000ULL,
 407	0x0020600000000000ULL, 0x0060600000000000ULL,
 408	0x0020600000000000ULL, 0x0060600000000000ULL,
 409	0x0020600000000000ULL, 0x0060600000000000ULL,
 410	0x0020600000000000ULL, 0x0060600000000000ULL,
 411	0x0020600000000000ULL, 0x0060600000000000ULL,
 412	0x0020600000000000ULL, 0x0000600000000000ULL,
 413	0x0040600000000000ULL, 0x0060600000000000ULL,
 414	END_SIGN
 415};
 416
 
 417MODULE_LICENSE("GPL");
 418MODULE_VERSION(DRV_VERSION);
 419
 420
 421/* Module Loadable parameters. */
 422S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 423S2IO_PARM_INT(rx_ring_num, 1);
 424S2IO_PARM_INT(multiq, 0);
 425S2IO_PARM_INT(rx_ring_mode, 1);
 426S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 427S2IO_PARM_INT(rmac_pause_time, 0x100);
 428S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 429S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 430S2IO_PARM_INT(shared_splits, 0);
 431S2IO_PARM_INT(tmac_util_period, 5);
 432S2IO_PARM_INT(rmac_util_period, 5);
 433S2IO_PARM_INT(l3l4hdr_size, 128);
 434/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 435S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 436/* Frequency of Rx desc syncs expressed as power of 2 */
 437S2IO_PARM_INT(rxsync_frequency, 3);
 438/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 439S2IO_PARM_INT(intr_type, 2);
 440/* Large receive offload feature */
 441
 442/* Max pkts to be aggregated by LRO at one time. If not specified,
 443 * aggregation happens until we hit max IP pkt size(64K)
 444 */
 445S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 446S2IO_PARM_INT(indicate_max_pkts, 0);
 447
 448S2IO_PARM_INT(napi, 1);
 449S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 450
 451static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 452{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 453static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 454{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 455static unsigned int rts_frm_len[MAX_RX_RINGS] =
 456{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 457
 458module_param_array(tx_fifo_len, uint, NULL, 0);
 459module_param_array(rx_ring_sz, uint, NULL, 0);
 460module_param_array(rts_frm_len, uint, NULL, 0);
 461
 462/*
 463 * S2IO device table.
 464 * This table lists all the devices that this driver supports.
 465 */
 466static const struct pci_device_id s2io_tbl[] = {
 467	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 468	 PCI_ANY_ID, PCI_ANY_ID},
 469	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 470	 PCI_ANY_ID, PCI_ANY_ID},
 471	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 472	 PCI_ANY_ID, PCI_ANY_ID},
 473	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 474	 PCI_ANY_ID, PCI_ANY_ID},
 475	{0,}
 476};
 477
 478MODULE_DEVICE_TABLE(pci, s2io_tbl);
 479
 480static const struct pci_error_handlers s2io_err_handler = {
 481	.error_detected = s2io_io_error_detected,
 482	.slot_reset = s2io_io_slot_reset,
 483	.resume = s2io_io_resume,
 484};
 485
 486static struct pci_driver s2io_driver = {
 487	.name = "S2IO",
 488	.id_table = s2io_tbl,
 489	.probe = s2io_init_nic,
 490	.remove = s2io_rem_nic,
 491	.err_handler = &s2io_err_handler,
 492};
 493
 494/* A simplifier macro used both by init and free shared_mem Fns(). */
 495#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each)
 496
 497/* netqueue manipulation helper functions */
 498static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 499{
 500	if (!sp->config.multiq) {
 501		int i;
 502
 503		for (i = 0; i < sp->config.tx_fifo_num; i++)
 504			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 505	}
 506	netif_tx_stop_all_queues(sp->dev);
 507}
 508
 509static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 510{
 511	if (!sp->config.multiq)
 512		sp->mac_control.fifos[fifo_no].queue_state =
 513			FIFO_QUEUE_STOP;
 514
 515	netif_tx_stop_all_queues(sp->dev);
 516}
 517
 518static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 519{
 520	if (!sp->config.multiq) {
 521		int i;
 522
 523		for (i = 0; i < sp->config.tx_fifo_num; i++)
 524			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 525	}
 526	netif_tx_start_all_queues(sp->dev);
 527}
 528
 529static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 530{
 531	if (!sp->config.multiq) {
 532		int i;
 533
 534		for (i = 0; i < sp->config.tx_fifo_num; i++)
 535			sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 536	}
 537	netif_tx_wake_all_queues(sp->dev);
 538}
 539
 540static inline void s2io_wake_tx_queue(
 541	struct fifo_info *fifo, int cnt, u8 multiq)
 542{
 543
 544	if (multiq) {
 545		if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 546			netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 547	} else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 548		if (netif_queue_stopped(fifo->dev)) {
 549			fifo->queue_state = FIFO_QUEUE_START;
 550			netif_wake_queue(fifo->dev);
 551		}
 552	}
 553}
 554
 555/**
 556 * init_shared_mem - Allocation and Initialization of Memory
 557 * @nic: Device private variable.
 558 * Description: The function allocates all the memory areas shared
 559 * between the NIC and the driver. This includes Tx descriptors,
 560 * Rx descriptors and the statistics block.
 561 */
 562
 563static int init_shared_mem(struct s2io_nic *nic)
 564{
 565	u32 size;
 566	void *tmp_v_addr, *tmp_v_addr_next;
 567	dma_addr_t tmp_p_addr, tmp_p_addr_next;
 568	struct RxD_block *pre_rxd_blk = NULL;
 569	int i, j, blk_cnt;
 570	int lst_size, lst_per_page;
 571	struct net_device *dev = nic->dev;
 572	unsigned long tmp;
 573	struct buffAdd *ba;
 574	struct config_param *config = &nic->config;
 575	struct mac_info *mac_control = &nic->mac_control;
 576	unsigned long long mem_allocated = 0;
 577
 578	/* Allocation and initialization of TXDLs in FIFOs */
 579	size = 0;
 580	for (i = 0; i < config->tx_fifo_num; i++) {
 581		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 582
 583		size += tx_cfg->fifo_len;
 584	}
 585	if (size > MAX_AVAILABLE_TXDS) {
 586		DBG_PRINT(ERR_DBG,
 587			  "Too many TxDs requested: %d, max supported: %d\n",
 588			  size, MAX_AVAILABLE_TXDS);
 589		return -EINVAL;
 590	}
 591
 592	size = 0;
 593	for (i = 0; i < config->tx_fifo_num; i++) {
 594		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 595
 596		size = tx_cfg->fifo_len;
 597		/*
 598		 * Legal values are from 2 to 8192
 599		 */
 600		if (size < 2) {
 601			DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 602				  "Valid lengths are 2 through 8192\n",
 603				  i, size);
 604			return -EINVAL;
 605		}
 606	}
 607
 608	lst_size = (sizeof(struct TxD) * config->max_txds);
 609	lst_per_page = PAGE_SIZE / lst_size;
 610
 611	for (i = 0; i < config->tx_fifo_num; i++) {
 612		struct fifo_info *fifo = &mac_control->fifos[i];
 613		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 614		int fifo_len = tx_cfg->fifo_len;
 615		int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 616
 617		fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 618		if (!fifo->list_info) {
 619			DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 620			return -ENOMEM;
 621		}
 622		mem_allocated += list_holder_size;
 623	}
 624	for (i = 0; i < config->tx_fifo_num; i++) {
 625		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 626						lst_per_page);
 627		struct fifo_info *fifo = &mac_control->fifos[i];
 628		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 629
 630		fifo->tx_curr_put_info.offset = 0;
 631		fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 632		fifo->tx_curr_get_info.offset = 0;
 633		fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 634		fifo->fifo_no = i;
 635		fifo->nic = nic;
 636		fifo->max_txds = MAX_SKB_FRAGS + 2;
 637		fifo->dev = dev;
 638
 639		for (j = 0; j < page_num; j++) {
 640			int k = 0;
 641			dma_addr_t tmp_p;
 642			void *tmp_v;
 643			tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
 644						   &tmp_p, GFP_KERNEL);
 645			if (!tmp_v) {
 646				DBG_PRINT(INFO_DBG,
 647					  "dma_alloc_coherent failed for TxDL\n");
 648				return -ENOMEM;
 649			}
 650			/* If we got a zero DMA address(can happen on
 651			 * certain platforms like PPC), reallocate.
 652			 * Store virtual address of page we don't want,
 653			 * to be freed later.
 654			 */
 655			if (!tmp_p) {
 656				mac_control->zerodma_virt_addr = tmp_v;
 657				DBG_PRINT(INIT_DBG,
 658					  "%s: Zero DMA address for TxDL. "
 659					  "Virtual address %p\n",
 660					  dev->name, tmp_v);
 661				tmp_v = dma_alloc_coherent(&nic->pdev->dev,
 662							   PAGE_SIZE, &tmp_p,
 663							   GFP_KERNEL);
 664				if (!tmp_v) {
 665					DBG_PRINT(INFO_DBG,
 666						  "dma_alloc_coherent failed for TxDL\n");
 667					return -ENOMEM;
 668				}
 669				mem_allocated += PAGE_SIZE;
 670			}
 671			while (k < lst_per_page) {
 672				int l = (j * lst_per_page) + k;
 673				if (l == tx_cfg->fifo_len)
 674					break;
 675				fifo->list_info[l].list_virt_addr =
 676					tmp_v + (k * lst_size);
 677				fifo->list_info[l].list_phy_addr =
 678					tmp_p + (k * lst_size);
 679				k++;
 680			}
 681		}
 682	}
 683
 684	for (i = 0; i < config->tx_fifo_num; i++) {
 685		struct fifo_info *fifo = &mac_control->fifos[i];
 686		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 687
 688		size = tx_cfg->fifo_len;
 689		fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 690		if (!fifo->ufo_in_band_v)
 691			return -ENOMEM;
 692		mem_allocated += (size * sizeof(u64));
 693	}
 694
 695	/* Allocation and initialization of RXDs in Rings */
 696	size = 0;
 697	for (i = 0; i < config->rx_ring_num; i++) {
 698		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 699		struct ring_info *ring = &mac_control->rings[i];
 700
 701		if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 702			DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 703				  "multiple of RxDs per Block\n",
 704				  dev->name, i);
 705			return FAILURE;
 706		}
 707		size += rx_cfg->num_rxd;
 708		ring->block_count = rx_cfg->num_rxd /
 709			(rxd_count[nic->rxd_mode] + 1);
 710		ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 711	}
 712	if (nic->rxd_mode == RXD_MODE_1)
 713		size = (size * (sizeof(struct RxD1)));
 714	else
 715		size = (size * (sizeof(struct RxD3)));
 716
 717	for (i = 0; i < config->rx_ring_num; i++) {
 718		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 719		struct ring_info *ring = &mac_control->rings[i];
 720
 721		ring->rx_curr_get_info.block_index = 0;
 722		ring->rx_curr_get_info.offset = 0;
 723		ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 724		ring->rx_curr_put_info.block_index = 0;
 725		ring->rx_curr_put_info.offset = 0;
 726		ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 727		ring->nic = nic;
 728		ring->ring_no = i;
 729
 730		blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 731		/*  Allocating all the Rx blocks */
 732		for (j = 0; j < blk_cnt; j++) {
 733			struct rx_block_info *rx_blocks;
 734			int l;
 735
 736			rx_blocks = &ring->rx_blocks[j];
 737			size = SIZE_OF_BLOCK;	/* size is always page size */
 738			tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
 739							&tmp_p_addr, GFP_KERNEL);
 740			if (tmp_v_addr == NULL) {
 741				/*
 742				 * In case of failure, free_shared_mem()
 743				 * is called, which should free any
 744				 * memory that was alloced till the
 745				 * failure happened.
 746				 */
 747				rx_blocks->block_virt_addr = tmp_v_addr;
 748				return -ENOMEM;
 749			}
 750			mem_allocated += size;
 751
 752			size = sizeof(struct rxd_info) *
 753				rxd_count[nic->rxd_mode];
 754			rx_blocks->block_virt_addr = tmp_v_addr;
 755			rx_blocks->block_dma_addr = tmp_p_addr;
 756			rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 757			if (!rx_blocks->rxds)
 758				return -ENOMEM;
 759			mem_allocated += size;
 760			for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 761				rx_blocks->rxds[l].virt_addr =
 762					rx_blocks->block_virt_addr +
 763					(rxd_size[nic->rxd_mode] * l);
 764				rx_blocks->rxds[l].dma_addr =
 765					rx_blocks->block_dma_addr +
 766					(rxd_size[nic->rxd_mode] * l);
 767			}
 768		}
 769		/* Interlinking all Rx Blocks */
 770		for (j = 0; j < blk_cnt; j++) {
 771			int next = (j + 1) % blk_cnt;
 772			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 773			tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 774			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 775			tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 776
 777			pre_rxd_blk = tmp_v_addr;
 778			pre_rxd_blk->reserved_2_pNext_RxD_block =
 779				(unsigned long)tmp_v_addr_next;
 780			pre_rxd_blk->pNext_RxD_Blk_physical =
 781				(u64)tmp_p_addr_next;
 782		}
 783	}
 784	if (nic->rxd_mode == RXD_MODE_3B) {
 785		/*
 786		 * Allocation of Storages for buffer addresses in 2BUFF mode
 787		 * and the buffers as well.
 788		 */
 789		for (i = 0; i < config->rx_ring_num; i++) {
 790			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 791			struct ring_info *ring = &mac_control->rings[i];
 792
 793			blk_cnt = rx_cfg->num_rxd /
 794				(rxd_count[nic->rxd_mode] + 1);
 795			size = sizeof(struct buffAdd *) * blk_cnt;
 796			ring->ba = kmalloc(size, GFP_KERNEL);
 797			if (!ring->ba)
 798				return -ENOMEM;
 799			mem_allocated += size;
 800			for (j = 0; j < blk_cnt; j++) {
 801				int k = 0;
 802
 803				size = sizeof(struct buffAdd) *
 804					(rxd_count[nic->rxd_mode] + 1);
 805				ring->ba[j] = kmalloc(size, GFP_KERNEL);
 806				if (!ring->ba[j])
 807					return -ENOMEM;
 808				mem_allocated += size;
 809				while (k != rxd_count[nic->rxd_mode]) {
 810					ba = &ring->ba[j][k];
 811					size = BUF0_LEN + ALIGN_SIZE;
 812					ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 813					if (!ba->ba_0_org)
 814						return -ENOMEM;
 815					mem_allocated += size;
 816					tmp = (unsigned long)ba->ba_0_org;
 817					tmp += ALIGN_SIZE;
 818					tmp &= ~((unsigned long)ALIGN_SIZE);
 819					ba->ba_0 = (void *)tmp;
 820
 821					size = BUF1_LEN + ALIGN_SIZE;
 822					ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 823					if (!ba->ba_1_org)
 824						return -ENOMEM;
 825					mem_allocated += size;
 826					tmp = (unsigned long)ba->ba_1_org;
 827					tmp += ALIGN_SIZE;
 828					tmp &= ~((unsigned long)ALIGN_SIZE);
 829					ba->ba_1 = (void *)tmp;
 830					k++;
 831				}
 832			}
 833		}
 834	}
 835
 836	/* Allocation and initialization of Statistics block */
 837	size = sizeof(struct stat_block);
 838	mac_control->stats_mem =
 839		dma_alloc_coherent(&nic->pdev->dev, size,
 840				   &mac_control->stats_mem_phy, GFP_KERNEL);
 841
 842	if (!mac_control->stats_mem) {
 843		/*
 844		 * In case of failure, free_shared_mem() is called, which
 845		 * should free any memory that was alloced till the
 846		 * failure happened.
 847		 */
 848		return -ENOMEM;
 849	}
 850	mem_allocated += size;
 851	mac_control->stats_mem_sz = size;
 852
 853	tmp_v_addr = mac_control->stats_mem;
 854	mac_control->stats_info = tmp_v_addr;
 855	memset(tmp_v_addr, 0, size);
 856	DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 857		dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 858	mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 859	return SUCCESS;
 860}
 861
 862/**
 863 * free_shared_mem - Free the allocated Memory
 864 * @nic:  Device private variable.
 865 * Description: This function is to free all memory locations allocated by
 866 * the init_shared_mem() function and return it to the kernel.
 867 */
 868
 869static void free_shared_mem(struct s2io_nic *nic)
 870{
 871	int i, j, blk_cnt, size;
 872	void *tmp_v_addr;
 873	dma_addr_t tmp_p_addr;
 874	int lst_size, lst_per_page;
 875	struct net_device *dev;
 876	int page_num = 0;
 877	struct config_param *config;
 878	struct mac_info *mac_control;
 879	struct stat_block *stats;
 880	struct swStat *swstats;
 881
 882	if (!nic)
 883		return;
 884
 885	dev = nic->dev;
 886
 887	config = &nic->config;
 888	mac_control = &nic->mac_control;
 889	stats = mac_control->stats_info;
 890	swstats = &stats->sw_stat;
 891
 892	lst_size = sizeof(struct TxD) * config->max_txds;
 893	lst_per_page = PAGE_SIZE / lst_size;
 894
 895	for (i = 0; i < config->tx_fifo_num; i++) {
 896		struct fifo_info *fifo = &mac_control->fifos[i];
 897		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 898
 899		page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 900		for (j = 0; j < page_num; j++) {
 901			int mem_blks = (j * lst_per_page);
 902			struct list_info_hold *fli;
 903
 904			if (!fifo->list_info)
 905				return;
 906
 907			fli = &fifo->list_info[mem_blks];
 908			if (!fli->list_virt_addr)
 909				break;
 910			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 911					  fli->list_virt_addr,
 912					  fli->list_phy_addr);
 913			swstats->mem_freed += PAGE_SIZE;
 914		}
 915		/* If we got a zero DMA address during allocation,
 916		 * free the page now
 917		 */
 918		if (mac_control->zerodma_virt_addr) {
 919			dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
 920					  mac_control->zerodma_virt_addr,
 921					  (dma_addr_t)0);
 922			DBG_PRINT(INIT_DBG,
 923				  "%s: Freeing TxDL with zero DMA address. "
 924				  "Virtual address %p\n",
 925				  dev->name, mac_control->zerodma_virt_addr);
 926			swstats->mem_freed += PAGE_SIZE;
 927		}
 928		kfree(fifo->list_info);
 929		swstats->mem_freed += tx_cfg->fifo_len *
 930			sizeof(struct list_info_hold);
 931	}
 932
 933	size = SIZE_OF_BLOCK;
 934	for (i = 0; i < config->rx_ring_num; i++) {
 935		struct ring_info *ring = &mac_control->rings[i];
 936
 937		blk_cnt = ring->block_count;
 938		for (j = 0; j < blk_cnt; j++) {
 939			tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 940			tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 941			if (tmp_v_addr == NULL)
 942				break;
 943			dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
 944					  tmp_p_addr);
 945			swstats->mem_freed += size;
 946			kfree(ring->rx_blocks[j].rxds);
 947			swstats->mem_freed += sizeof(struct rxd_info) *
 948				rxd_count[nic->rxd_mode];
 949		}
 950	}
 951
 952	if (nic->rxd_mode == RXD_MODE_3B) {
 953		/* Freeing buffer storage addresses in 2BUFF mode. */
 954		for (i = 0; i < config->rx_ring_num; i++) {
 955			struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 956			struct ring_info *ring = &mac_control->rings[i];
 957
 958			blk_cnt = rx_cfg->num_rxd /
 959				(rxd_count[nic->rxd_mode] + 1);
 960			for (j = 0; j < blk_cnt; j++) {
 961				int k = 0;
 962				if (!ring->ba[j])
 963					continue;
 964				while (k != rxd_count[nic->rxd_mode]) {
 965					struct buffAdd *ba = &ring->ba[j][k];
 966					kfree(ba->ba_0_org);
 967					swstats->mem_freed +=
 968						BUF0_LEN + ALIGN_SIZE;
 969					kfree(ba->ba_1_org);
 970					swstats->mem_freed +=
 971						BUF1_LEN + ALIGN_SIZE;
 972					k++;
 973				}
 974				kfree(ring->ba[j]);
 975				swstats->mem_freed += sizeof(struct buffAdd) *
 976					(rxd_count[nic->rxd_mode] + 1);
 977			}
 978			kfree(ring->ba);
 979			swstats->mem_freed += sizeof(struct buffAdd *) *
 980				blk_cnt;
 981		}
 982	}
 983
 984	for (i = 0; i < nic->config.tx_fifo_num; i++) {
 985		struct fifo_info *fifo = &mac_control->fifos[i];
 986		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 987
 988		if (fifo->ufo_in_band_v) {
 989			swstats->mem_freed += tx_cfg->fifo_len *
 990				sizeof(u64);
 991			kfree(fifo->ufo_in_band_v);
 992		}
 993	}
 994
 995	if (mac_control->stats_mem) {
 996		swstats->mem_freed += mac_control->stats_mem_sz;
 997		dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
 998				  mac_control->stats_mem,
 999				  mac_control->stats_mem_phy);
1000	}
1001}
1002
1003/*
1004 * s2io_verify_pci_mode -
1005 */
1006
1007static int s2io_verify_pci_mode(struct s2io_nic *nic)
1008{
1009	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1010	register u64 val64 = 0;
1011	int     mode;
1012
1013	val64 = readq(&bar0->pci_mode);
1014	mode = (u8)GET_PCI_MODE(val64);
1015
1016	if (val64 & PCI_MODE_UNKNOWN_MODE)
1017		return -1;      /* Unknown PCI mode */
1018	return mode;
1019}
1020
1021#define NEC_VENID   0x1033
1022#define NEC_DEVID   0x0125
1023static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1024{
1025	struct pci_dev *tdev = NULL;
1026	for_each_pci_dev(tdev) {
1027		if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1028			if (tdev->bus == s2io_pdev->bus->parent) {
1029				pci_dev_put(tdev);
1030				return 1;
1031			}
1032		}
1033	}
1034	return 0;
1035}
1036
1037static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1038/*
1039 * s2io_print_pci_mode -
1040 */
1041static int s2io_print_pci_mode(struct s2io_nic *nic)
1042{
1043	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1044	register u64 val64 = 0;
1045	int	mode;
1046	struct config_param *config = &nic->config;
1047	const char *pcimode;
1048
1049	val64 = readq(&bar0->pci_mode);
1050	mode = (u8)GET_PCI_MODE(val64);
1051
1052	if (val64 & PCI_MODE_UNKNOWN_MODE)
1053		return -1;	/* Unknown PCI mode */
1054
1055	config->bus_speed = bus_speed[mode];
1056
1057	if (s2io_on_nec_bridge(nic->pdev)) {
1058		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1059			  nic->dev->name);
1060		return mode;
1061	}
1062
1063	switch (mode) {
1064	case PCI_MODE_PCI_33:
1065		pcimode = "33MHz PCI bus";
1066		break;
1067	case PCI_MODE_PCI_66:
1068		pcimode = "66MHz PCI bus";
1069		break;
1070	case PCI_MODE_PCIX_M1_66:
1071		pcimode = "66MHz PCIX(M1) bus";
1072		break;
1073	case PCI_MODE_PCIX_M1_100:
1074		pcimode = "100MHz PCIX(M1) bus";
1075		break;
1076	case PCI_MODE_PCIX_M1_133:
1077		pcimode = "133MHz PCIX(M1) bus";
1078		break;
1079	case PCI_MODE_PCIX_M2_66:
1080		pcimode = "133MHz PCIX(M2) bus";
1081		break;
1082	case PCI_MODE_PCIX_M2_100:
1083		pcimode = "200MHz PCIX(M2) bus";
1084		break;
1085	case PCI_MODE_PCIX_M2_133:
1086		pcimode = "266MHz PCIX(M2) bus";
1087		break;
1088	default:
1089		pcimode = "unsupported bus!";
1090		mode = -1;
1091	}
1092
1093	DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1094		  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1095
1096	return mode;
1097}
1098
1099/**
1100 *  init_tti - Initialization transmit traffic interrupt scheme
1101 *  @nic: device private variable
1102 *  @link: link status (UP/DOWN) used to enable/disable continuous
1103 *  transmit interrupts
1104 *  @may_sleep: parameter indicates if sleeping when waiting for
1105 *  command complete
1106 *  Description: The function configures transmit traffic interrupts
1107 *  Return Value:  SUCCESS on success and
1108 *  '-1' on failure
1109 */
1110
1111static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
1112{
1113	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1114	register u64 val64 = 0;
1115	int i;
1116	struct config_param *config = &nic->config;
1117
1118	for (i = 0; i < config->tx_fifo_num; i++) {
1119		/*
1120		 * TTI Initialization. Default Tx timer gets us about
1121		 * 250 interrupts per sec. Continuous interrupts are enabled
1122		 * by default.
1123		 */
1124		if (nic->device_type == XFRAME_II_DEVICE) {
1125			int count = (nic->config.bus_speed * 125)/2;
1126			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1127		} else
1128			val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1129
1130		val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1131			TTI_DATA1_MEM_TX_URNG_B(0x10) |
1132			TTI_DATA1_MEM_TX_URNG_C(0x30) |
1133			TTI_DATA1_MEM_TX_TIMER_AC_EN;
1134		if (i == 0)
1135			if (use_continuous_tx_intrs && (link == LINK_UP))
1136				val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1137		writeq(val64, &bar0->tti_data1_mem);
1138
1139		if (nic->config.intr_type == MSI_X) {
1140			val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1141				TTI_DATA2_MEM_TX_UFC_B(0x100) |
1142				TTI_DATA2_MEM_TX_UFC_C(0x200) |
1143				TTI_DATA2_MEM_TX_UFC_D(0x300);
1144		} else {
1145			if ((nic->config.tx_steering_type ==
1146			     TX_DEFAULT_STEERING) &&
1147			    (config->tx_fifo_num > 1) &&
1148			    (i >= nic->udp_fifo_idx) &&
1149			    (i < (nic->udp_fifo_idx +
1150				  nic->total_udp_fifos)))
1151				val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1152					TTI_DATA2_MEM_TX_UFC_B(0x80) |
1153					TTI_DATA2_MEM_TX_UFC_C(0x100) |
1154					TTI_DATA2_MEM_TX_UFC_D(0x120);
1155			else
1156				val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1157					TTI_DATA2_MEM_TX_UFC_B(0x20) |
1158					TTI_DATA2_MEM_TX_UFC_C(0x40) |
1159					TTI_DATA2_MEM_TX_UFC_D(0x80);
1160		}
1161
1162		writeq(val64, &bar0->tti_data2_mem);
1163
1164		val64 = TTI_CMD_MEM_WE |
1165			TTI_CMD_MEM_STROBE_NEW_CMD |
1166			TTI_CMD_MEM_OFFSET(i);
1167		writeq(val64, &bar0->tti_command_mem);
1168
1169		if (wait_for_cmd_complete(&bar0->tti_command_mem,
1170					  TTI_CMD_MEM_STROBE_NEW_CMD,
1171					  S2IO_BIT_RESET, may_sleep) != SUCCESS)
1172			return FAILURE;
1173	}
1174
1175	return SUCCESS;
1176}
1177
1178/**
1179 *  init_nic - Initialization of hardware
1180 *  @nic: device private variable
1181 *  Description: The function sequentially configures every block
1182 *  of the H/W from their reset values.
1183 *  Return Value:  SUCCESS on success and
1184 *  '-1' on failure (endian settings incorrect).
1185 */
1186
1187static int init_nic(struct s2io_nic *nic)
1188{
1189	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1190	struct net_device *dev = nic->dev;
1191	register u64 val64 = 0;
1192	void __iomem *add;
1193	u32 time;
1194	int i, j;
1195	int dtx_cnt = 0;
1196	unsigned long long mem_share;
1197	int mem_size;
1198	struct config_param *config = &nic->config;
1199	struct mac_info *mac_control = &nic->mac_control;
1200
1201	/* to set the swapper controle on the card */
1202	if (s2io_set_swapper(nic)) {
1203		DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1204		return -EIO;
1205	}
1206
1207	/*
1208	 * Herc requires EOI to be removed from reset before XGXS, so..
1209	 */
1210	if (nic->device_type & XFRAME_II_DEVICE) {
1211		val64 = 0xA500000000ULL;
1212		writeq(val64, &bar0->sw_reset);
1213		msleep(500);
1214		val64 = readq(&bar0->sw_reset);
1215	}
1216
1217	/* Remove XGXS from reset state */
1218	val64 = 0;
1219	writeq(val64, &bar0->sw_reset);
1220	msleep(500);
1221	val64 = readq(&bar0->sw_reset);
1222
1223	/* Ensure that it's safe to access registers by checking
1224	 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1225	 */
1226	if (nic->device_type == XFRAME_II_DEVICE) {
1227		for (i = 0; i < 50; i++) {
1228			val64 = readq(&bar0->adapter_status);
1229			if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1230				break;
1231			msleep(10);
1232		}
1233		if (i == 50)
1234			return -ENODEV;
1235	}
1236
1237	/*  Enable Receiving broadcasts */
1238	add = &bar0->mac_cfg;
1239	val64 = readq(&bar0->mac_cfg);
1240	val64 |= MAC_RMAC_BCAST_ENABLE;
1241	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1242	writel((u32)val64, add);
1243	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1244	writel((u32) (val64 >> 32), (add + 4));
1245
1246	/* Read registers in all blocks */
1247	val64 = readq(&bar0->mac_int_mask);
1248	val64 = readq(&bar0->mc_int_mask);
1249	val64 = readq(&bar0->xgxs_int_mask);
1250
1251	/*  Set MTU */
1252	val64 = dev->mtu;
1253	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1254
1255	if (nic->device_type & XFRAME_II_DEVICE) {
1256		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1257			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1258					  &bar0->dtx_control, UF);
1259			if (dtx_cnt & 0x1)
1260				msleep(1); /* Necessary!! */
1261			dtx_cnt++;
1262		}
1263	} else {
1264		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1265			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1266					  &bar0->dtx_control, UF);
1267			val64 = readq(&bar0->dtx_control);
1268			dtx_cnt++;
1269		}
1270	}
1271
1272	/*  Tx DMA Initialization */
1273	val64 = 0;
1274	writeq(val64, &bar0->tx_fifo_partition_0);
1275	writeq(val64, &bar0->tx_fifo_partition_1);
1276	writeq(val64, &bar0->tx_fifo_partition_2);
1277	writeq(val64, &bar0->tx_fifo_partition_3);
1278
1279	for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1280		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1281
1282		val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1283			vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1284
1285		if (i == (config->tx_fifo_num - 1)) {
1286			if (i % 2 == 0)
1287				i++;
1288		}
1289
1290		switch (i) {
1291		case 1:
1292			writeq(val64, &bar0->tx_fifo_partition_0);
1293			val64 = 0;
1294			j = 0;
1295			break;
1296		case 3:
1297			writeq(val64, &bar0->tx_fifo_partition_1);
1298			val64 = 0;
1299			j = 0;
1300			break;
1301		case 5:
1302			writeq(val64, &bar0->tx_fifo_partition_2);
1303			val64 = 0;
1304			j = 0;
1305			break;
1306		case 7:
1307			writeq(val64, &bar0->tx_fifo_partition_3);
1308			val64 = 0;
1309			j = 0;
1310			break;
1311		default:
1312			j++;
1313			break;
1314		}
1315	}
1316
1317	/*
1318	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1319	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1320	 */
1321	if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1322		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1323
1324	val64 = readq(&bar0->tx_fifo_partition_0);
1325	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1326		  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1327
1328	/*
1329	 * Initialization of Tx_PA_CONFIG register to ignore packet
1330	 * integrity checking.
1331	 */
1332	val64 = readq(&bar0->tx_pa_cfg);
1333	val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1334		TX_PA_CFG_IGNORE_SNAP_OUI |
1335		TX_PA_CFG_IGNORE_LLC_CTRL |
1336		TX_PA_CFG_IGNORE_L2_ERR;
1337	writeq(val64, &bar0->tx_pa_cfg);
1338
1339	/* Rx DMA initialization. */
1340	val64 = 0;
1341	for (i = 0; i < config->rx_ring_num; i++) {
1342		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1343
1344		val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1345	}
1346	writeq(val64, &bar0->rx_queue_priority);
1347
1348	/*
1349	 * Allocating equal share of memory to all the
1350	 * configured Rings.
1351	 */
1352	val64 = 0;
1353	if (nic->device_type & XFRAME_II_DEVICE)
1354		mem_size = 32;
1355	else
1356		mem_size = 64;
1357
1358	for (i = 0; i < config->rx_ring_num; i++) {
1359		switch (i) {
1360		case 0:
1361			mem_share = (mem_size / config->rx_ring_num +
1362				     mem_size % config->rx_ring_num);
1363			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1364			continue;
1365		case 1:
1366			mem_share = (mem_size / config->rx_ring_num);
1367			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1368			continue;
1369		case 2:
1370			mem_share = (mem_size / config->rx_ring_num);
1371			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1372			continue;
1373		case 3:
1374			mem_share = (mem_size / config->rx_ring_num);
1375			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1376			continue;
1377		case 4:
1378			mem_share = (mem_size / config->rx_ring_num);
1379			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1380			continue;
1381		case 5:
1382			mem_share = (mem_size / config->rx_ring_num);
1383			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1384			continue;
1385		case 6:
1386			mem_share = (mem_size / config->rx_ring_num);
1387			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1388			continue;
1389		case 7:
1390			mem_share = (mem_size / config->rx_ring_num);
1391			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1392			continue;
1393		}
1394	}
1395	writeq(val64, &bar0->rx_queue_cfg);
1396
1397	/*
1398	 * Filling Tx round robin registers
1399	 * as per the number of FIFOs for equal scheduling priority
1400	 */
1401	switch (config->tx_fifo_num) {
1402	case 1:
1403		val64 = 0x0;
1404		writeq(val64, &bar0->tx_w_round_robin_0);
1405		writeq(val64, &bar0->tx_w_round_robin_1);
1406		writeq(val64, &bar0->tx_w_round_robin_2);
1407		writeq(val64, &bar0->tx_w_round_robin_3);
1408		writeq(val64, &bar0->tx_w_round_robin_4);
1409		break;
1410	case 2:
1411		val64 = 0x0001000100010001ULL;
1412		writeq(val64, &bar0->tx_w_round_robin_0);
1413		writeq(val64, &bar0->tx_w_round_robin_1);
1414		writeq(val64, &bar0->tx_w_round_robin_2);
1415		writeq(val64, &bar0->tx_w_round_robin_3);
1416		val64 = 0x0001000100000000ULL;
1417		writeq(val64, &bar0->tx_w_round_robin_4);
1418		break;
1419	case 3:
1420		val64 = 0x0001020001020001ULL;
1421		writeq(val64, &bar0->tx_w_round_robin_0);
1422		val64 = 0x0200010200010200ULL;
1423		writeq(val64, &bar0->tx_w_round_robin_1);
1424		val64 = 0x0102000102000102ULL;
1425		writeq(val64, &bar0->tx_w_round_robin_2);
1426		val64 = 0x0001020001020001ULL;
1427		writeq(val64, &bar0->tx_w_round_robin_3);
1428		val64 = 0x0200010200000000ULL;
1429		writeq(val64, &bar0->tx_w_round_robin_4);
1430		break;
1431	case 4:
1432		val64 = 0x0001020300010203ULL;
1433		writeq(val64, &bar0->tx_w_round_robin_0);
1434		writeq(val64, &bar0->tx_w_round_robin_1);
1435		writeq(val64, &bar0->tx_w_round_robin_2);
1436		writeq(val64, &bar0->tx_w_round_robin_3);
1437		val64 = 0x0001020300000000ULL;
1438		writeq(val64, &bar0->tx_w_round_robin_4);
1439		break;
1440	case 5:
1441		val64 = 0x0001020304000102ULL;
1442		writeq(val64, &bar0->tx_w_round_robin_0);
1443		val64 = 0x0304000102030400ULL;
1444		writeq(val64, &bar0->tx_w_round_robin_1);
1445		val64 = 0x0102030400010203ULL;
1446		writeq(val64, &bar0->tx_w_round_robin_2);
1447		val64 = 0x0400010203040001ULL;
1448		writeq(val64, &bar0->tx_w_round_robin_3);
1449		val64 = 0x0203040000000000ULL;
1450		writeq(val64, &bar0->tx_w_round_robin_4);
1451		break;
1452	case 6:
1453		val64 = 0x0001020304050001ULL;
1454		writeq(val64, &bar0->tx_w_round_robin_0);
1455		val64 = 0x0203040500010203ULL;
1456		writeq(val64, &bar0->tx_w_round_robin_1);
1457		val64 = 0x0405000102030405ULL;
1458		writeq(val64, &bar0->tx_w_round_robin_2);
1459		val64 = 0x0001020304050001ULL;
1460		writeq(val64, &bar0->tx_w_round_robin_3);
1461		val64 = 0x0203040500000000ULL;
1462		writeq(val64, &bar0->tx_w_round_robin_4);
1463		break;
1464	case 7:
1465		val64 = 0x0001020304050600ULL;
1466		writeq(val64, &bar0->tx_w_round_robin_0);
1467		val64 = 0x0102030405060001ULL;
1468		writeq(val64, &bar0->tx_w_round_robin_1);
1469		val64 = 0x0203040506000102ULL;
1470		writeq(val64, &bar0->tx_w_round_robin_2);
1471		val64 = 0x0304050600010203ULL;
1472		writeq(val64, &bar0->tx_w_round_robin_3);
1473		val64 = 0x0405060000000000ULL;
1474		writeq(val64, &bar0->tx_w_round_robin_4);
1475		break;
1476	case 8:
1477		val64 = 0x0001020304050607ULL;
1478		writeq(val64, &bar0->tx_w_round_robin_0);
1479		writeq(val64, &bar0->tx_w_round_robin_1);
1480		writeq(val64, &bar0->tx_w_round_robin_2);
1481		writeq(val64, &bar0->tx_w_round_robin_3);
1482		val64 = 0x0001020300000000ULL;
1483		writeq(val64, &bar0->tx_w_round_robin_4);
1484		break;
1485	}
1486
1487	/* Enable all configured Tx FIFO partitions */
1488	val64 = readq(&bar0->tx_fifo_partition_0);
1489	val64 |= (TX_FIFO_PARTITION_EN);
1490	writeq(val64, &bar0->tx_fifo_partition_0);
1491
1492	/* Filling the Rx round robin registers as per the
1493	 * number of Rings and steering based on QoS with
1494	 * equal priority.
1495	 */
1496	switch (config->rx_ring_num) {
1497	case 1:
1498		val64 = 0x0;
1499		writeq(val64, &bar0->rx_w_round_robin_0);
1500		writeq(val64, &bar0->rx_w_round_robin_1);
1501		writeq(val64, &bar0->rx_w_round_robin_2);
1502		writeq(val64, &bar0->rx_w_round_robin_3);
1503		writeq(val64, &bar0->rx_w_round_robin_4);
1504
1505		val64 = 0x8080808080808080ULL;
1506		writeq(val64, &bar0->rts_qos_steering);
1507		break;
1508	case 2:
1509		val64 = 0x0001000100010001ULL;
1510		writeq(val64, &bar0->rx_w_round_robin_0);
1511		writeq(val64, &bar0->rx_w_round_robin_1);
1512		writeq(val64, &bar0->rx_w_round_robin_2);
1513		writeq(val64, &bar0->rx_w_round_robin_3);
1514		val64 = 0x0001000100000000ULL;
1515		writeq(val64, &bar0->rx_w_round_robin_4);
1516
1517		val64 = 0x8080808040404040ULL;
1518		writeq(val64, &bar0->rts_qos_steering);
1519		break;
1520	case 3:
1521		val64 = 0x0001020001020001ULL;
1522		writeq(val64, &bar0->rx_w_round_robin_0);
1523		val64 = 0x0200010200010200ULL;
1524		writeq(val64, &bar0->rx_w_round_robin_1);
1525		val64 = 0x0102000102000102ULL;
1526		writeq(val64, &bar0->rx_w_round_robin_2);
1527		val64 = 0x0001020001020001ULL;
1528		writeq(val64, &bar0->rx_w_round_robin_3);
1529		val64 = 0x0200010200000000ULL;
1530		writeq(val64, &bar0->rx_w_round_robin_4);
1531
1532		val64 = 0x8080804040402020ULL;
1533		writeq(val64, &bar0->rts_qos_steering);
1534		break;
1535	case 4:
1536		val64 = 0x0001020300010203ULL;
1537		writeq(val64, &bar0->rx_w_round_robin_0);
1538		writeq(val64, &bar0->rx_w_round_robin_1);
1539		writeq(val64, &bar0->rx_w_round_robin_2);
1540		writeq(val64, &bar0->rx_w_round_robin_3);
1541		val64 = 0x0001020300000000ULL;
1542		writeq(val64, &bar0->rx_w_round_robin_4);
1543
1544		val64 = 0x8080404020201010ULL;
1545		writeq(val64, &bar0->rts_qos_steering);
1546		break;
1547	case 5:
1548		val64 = 0x0001020304000102ULL;
1549		writeq(val64, &bar0->rx_w_round_robin_0);
1550		val64 = 0x0304000102030400ULL;
1551		writeq(val64, &bar0->rx_w_round_robin_1);
1552		val64 = 0x0102030400010203ULL;
1553		writeq(val64, &bar0->rx_w_round_robin_2);
1554		val64 = 0x0400010203040001ULL;
1555		writeq(val64, &bar0->rx_w_round_robin_3);
1556		val64 = 0x0203040000000000ULL;
1557		writeq(val64, &bar0->rx_w_round_robin_4);
1558
1559		val64 = 0x8080404020201008ULL;
1560		writeq(val64, &bar0->rts_qos_steering);
1561		break;
1562	case 6:
1563		val64 = 0x0001020304050001ULL;
1564		writeq(val64, &bar0->rx_w_round_robin_0);
1565		val64 = 0x0203040500010203ULL;
1566		writeq(val64, &bar0->rx_w_round_robin_1);
1567		val64 = 0x0405000102030405ULL;
1568		writeq(val64, &bar0->rx_w_round_robin_2);
1569		val64 = 0x0001020304050001ULL;
1570		writeq(val64, &bar0->rx_w_round_robin_3);
1571		val64 = 0x0203040500000000ULL;
1572		writeq(val64, &bar0->rx_w_round_robin_4);
1573
1574		val64 = 0x8080404020100804ULL;
1575		writeq(val64, &bar0->rts_qos_steering);
1576		break;
1577	case 7:
1578		val64 = 0x0001020304050600ULL;
1579		writeq(val64, &bar0->rx_w_round_robin_0);
1580		val64 = 0x0102030405060001ULL;
1581		writeq(val64, &bar0->rx_w_round_robin_1);
1582		val64 = 0x0203040506000102ULL;
1583		writeq(val64, &bar0->rx_w_round_robin_2);
1584		val64 = 0x0304050600010203ULL;
1585		writeq(val64, &bar0->rx_w_round_robin_3);
1586		val64 = 0x0405060000000000ULL;
1587		writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589		val64 = 0x8080402010080402ULL;
1590		writeq(val64, &bar0->rts_qos_steering);
1591		break;
1592	case 8:
1593		val64 = 0x0001020304050607ULL;
1594		writeq(val64, &bar0->rx_w_round_robin_0);
1595		writeq(val64, &bar0->rx_w_round_robin_1);
1596		writeq(val64, &bar0->rx_w_round_robin_2);
1597		writeq(val64, &bar0->rx_w_round_robin_3);
1598		val64 = 0x0001020300000000ULL;
1599		writeq(val64, &bar0->rx_w_round_robin_4);
1600
1601		val64 = 0x8040201008040201ULL;
1602		writeq(val64, &bar0->rts_qos_steering);
1603		break;
1604	}
1605
1606	/* UDP Fix */
1607	val64 = 0;
1608	for (i = 0; i < 8; i++)
1609		writeq(val64, &bar0->rts_frm_len_n[i]);
1610
1611	/* Set the default rts frame length for the rings configured */
1612	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1613	for (i = 0 ; i < config->rx_ring_num ; i++)
1614		writeq(val64, &bar0->rts_frm_len_n[i]);
1615
1616	/* Set the frame length for the configured rings
1617	 * desired by the user
1618	 */
1619	for (i = 0; i < config->rx_ring_num; i++) {
1620		/* If rts_frm_len[i] == 0 then it is assumed that user not
1621		 * specified frame length steering.
1622		 * If the user provides the frame length then program
1623		 * the rts_frm_len register for those values or else
1624		 * leave it as it is.
1625		 */
1626		if (rts_frm_len[i] != 0) {
1627			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1628			       &bar0->rts_frm_len_n[i]);
1629		}
1630	}
1631
1632	/* Disable differentiated services steering logic */
1633	for (i = 0; i < 64; i++) {
1634		if (rts_ds_steer(nic, i, 0) == FAILURE) {
1635			DBG_PRINT(ERR_DBG,
1636				  "%s: rts_ds_steer failed on codepoint %d\n",
1637				  dev->name, i);
1638			return -ENODEV;
1639		}
1640	}
1641
1642	/* Program statistics memory */
1643	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1644
1645	if (nic->device_type == XFRAME_II_DEVICE) {
1646		val64 = STAT_BC(0x320);
1647		writeq(val64, &bar0->stat_byte_cnt);
1648	}
1649
1650	/*
1651	 * Initializing the sampling rate for the device to calculate the
1652	 * bandwidth utilization.
1653	 */
1654	val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1655		MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1656	writeq(val64, &bar0->mac_link_util);
1657
1658	/*
1659	 * Initializing the Transmit and Receive Traffic Interrupt
1660	 * Scheme.
1661	 */
1662
1663	/* Initialize TTI */
1664	if (SUCCESS != init_tti(nic, nic->last_link_state, true))
1665		return -ENODEV;
1666
1667	/* RTI Initialization */
1668	if (nic->device_type == XFRAME_II_DEVICE) {
1669		/*
1670		 * Programmed to generate Apprx 500 Intrs per
1671		 * second
1672		 */
1673		int count = (nic->config.bus_speed * 125)/4;
1674		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1675	} else
1676		val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1677	val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1678		RTI_DATA1_MEM_RX_URNG_B(0x10) |
1679		RTI_DATA1_MEM_RX_URNG_C(0x30) |
1680		RTI_DATA1_MEM_RX_TIMER_AC_EN;
1681
1682	writeq(val64, &bar0->rti_data1_mem);
1683
1684	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1685		RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1686	if (nic->config.intr_type == MSI_X)
1687		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1688			  RTI_DATA2_MEM_RX_UFC_D(0x40));
1689	else
1690		val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1691			  RTI_DATA2_MEM_RX_UFC_D(0x80));
1692	writeq(val64, &bar0->rti_data2_mem);
1693
1694	for (i = 0; i < config->rx_ring_num; i++) {
1695		val64 = RTI_CMD_MEM_WE |
1696			RTI_CMD_MEM_STROBE_NEW_CMD |
1697			RTI_CMD_MEM_OFFSET(i);
1698		writeq(val64, &bar0->rti_command_mem);
1699
1700		/*
1701		 * Once the operation completes, the Strobe bit of the
1702		 * command register will be reset. We poll for this
1703		 * particular condition. We wait for a maximum of 500ms
1704		 * for the operation to complete, if it's not complete
1705		 * by then we return error.
1706		 */
1707		time = 0;
1708		while (true) {
1709			val64 = readq(&bar0->rti_command_mem);
1710			if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1711				break;
1712
1713			if (time > 10) {
1714				DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1715					  dev->name);
1716				return -ENODEV;
1717			}
1718			time++;
1719			msleep(50);
1720		}
1721	}
1722
1723	/*
1724	 * Initializing proper values as Pause threshold into all
1725	 * the 8 Queues on Rx side.
1726	 */
1727	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1728	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1729
1730	/* Disable RMAC PAD STRIPPING */
1731	add = &bar0->mac_cfg;
1732	val64 = readq(&bar0->mac_cfg);
1733	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1734	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1735	writel((u32) (val64), add);
1736	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1737	writel((u32) (val64 >> 32), (add + 4));
1738	val64 = readq(&bar0->mac_cfg);
1739
1740	/* Enable FCS stripping by adapter */
1741	add = &bar0->mac_cfg;
1742	val64 = readq(&bar0->mac_cfg);
1743	val64 |= MAC_CFG_RMAC_STRIP_FCS;
1744	if (nic->device_type == XFRAME_II_DEVICE)
1745		writeq(val64, &bar0->mac_cfg);
1746	else {
1747		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1748		writel((u32) (val64), add);
1749		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1750		writel((u32) (val64 >> 32), (add + 4));
1751	}
1752
1753	/*
1754	 * Set the time value to be inserted in the pause frame
1755	 * generated by xena.
1756	 */
1757	val64 = readq(&bar0->rmac_pause_cfg);
1758	val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1759	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1760	writeq(val64, &bar0->rmac_pause_cfg);
1761
1762	/*
1763	 * Set the Threshold Limit for Generating the pause frame
1764	 * If the amount of data in any Queue exceeds ratio of
1765	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1766	 * pause frame is generated
1767	 */
1768	val64 = 0;
1769	for (i = 0; i < 4; i++) {
1770		val64 |= (((u64)0xFF00 |
1771			   nic->mac_control.mc_pause_threshold_q0q3)
1772			  << (i * 2 * 8));
1773	}
1774	writeq(val64, &bar0->mc_pause_thresh_q0q3);
1775
1776	val64 = 0;
1777	for (i = 0; i < 4; i++) {
1778		val64 |= (((u64)0xFF00 |
1779			   nic->mac_control.mc_pause_threshold_q4q7)
1780			  << (i * 2 * 8));
1781	}
1782	writeq(val64, &bar0->mc_pause_thresh_q4q7);
1783
1784	/*
1785	 * TxDMA will stop Read request if the number of read split has
1786	 * exceeded the limit pointed by shared_splits
1787	 */
1788	val64 = readq(&bar0->pic_control);
1789	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1790	writeq(val64, &bar0->pic_control);
1791
1792	if (nic->config.bus_speed == 266) {
1793		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1794		writeq(0x0, &bar0->read_retry_delay);
1795		writeq(0x0, &bar0->write_retry_delay);
1796	}
1797
1798	/*
1799	 * Programming the Herc to split every write transaction
1800	 * that does not start on an ADB to reduce disconnects.
1801	 */
1802	if (nic->device_type == XFRAME_II_DEVICE) {
1803		val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1804			MISC_LINK_STABILITY_PRD(3);
1805		writeq(val64, &bar0->misc_control);
1806		val64 = readq(&bar0->pic_control2);
1807		val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1808		writeq(val64, &bar0->pic_control2);
1809	}
1810	if (strstr(nic->product_name, "CX4")) {
1811		val64 = TMAC_AVG_IPG(0x17);
1812		writeq(val64, &bar0->tmac_avg_ipg);
1813	}
1814
1815	return SUCCESS;
1816}
1817#define LINK_UP_DOWN_INTERRUPT		1
1818#define MAC_RMAC_ERR_TIMER		2
1819
1820static int s2io_link_fault_indication(struct s2io_nic *nic)
1821{
1822	if (nic->device_type == XFRAME_II_DEVICE)
1823		return LINK_UP_DOWN_INTERRUPT;
1824	else
1825		return MAC_RMAC_ERR_TIMER;
1826}
1827
1828/**
1829 *  do_s2io_write_bits -  update alarm bits in alarm register
1830 *  @value: alarm bits
1831 *  @flag: interrupt status
1832 *  @addr: address value
1833 *  Description: update alarm bits in alarm register
1834 *  Return Value:
1835 *  NONE.
1836 */
1837static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1838{
1839	u64 temp64;
1840
1841	temp64 = readq(addr);
1842
1843	if (flag == ENABLE_INTRS)
1844		temp64 &= ~((u64)value);
1845	else
1846		temp64 |= ((u64)value);
1847	writeq(temp64, addr);
1848}
1849
1850static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1851{
1852	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1853	register u64 gen_int_mask = 0;
1854	u64 interruptible;
1855
1856	writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1857	if (mask & TX_DMA_INTR) {
1858		gen_int_mask |= TXDMA_INT_M;
1859
1860		do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1861				   TXDMA_PCC_INT | TXDMA_TTI_INT |
1862				   TXDMA_LSO_INT | TXDMA_TPA_INT |
1863				   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1864
1865		do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1866				   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1867				   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1868				   &bar0->pfc_err_mask);
1869
1870		do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1871				   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1872				   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1873
1874		do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1875				   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1876				   PCC_N_SERR | PCC_6_COF_OV_ERR |
1877				   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1878				   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1879				   PCC_TXB_ECC_SG_ERR,
1880				   flag, &bar0->pcc_err_mask);
1881
1882		do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1883				   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1884
1885		do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1886				   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1887				   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1888				   flag, &bar0->lso_err_mask);
1889
1890		do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1891				   flag, &bar0->tpa_err_mask);
1892
1893		do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1894	}
1895
1896	if (mask & TX_MAC_INTR) {
1897		gen_int_mask |= TXMAC_INT_M;
1898		do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1899				   &bar0->mac_int_mask);
1900		do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1901				   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1902				   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1903				   flag, &bar0->mac_tmac_err_mask);
1904	}
1905
1906	if (mask & TX_XGXS_INTR) {
1907		gen_int_mask |= TXXGXS_INT_M;
1908		do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1909				   &bar0->xgxs_int_mask);
1910		do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1911				   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1912				   flag, &bar0->xgxs_txgxs_err_mask);
1913	}
1914
1915	if (mask & RX_DMA_INTR) {
1916		gen_int_mask |= RXDMA_INT_M;
1917		do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1918				   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1919				   flag, &bar0->rxdma_int_mask);
1920		do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1921				   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1922				   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1923				   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1924		do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1925				   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1926				   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1927				   &bar0->prc_pcix_err_mask);
1928		do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1929				   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1930				   &bar0->rpa_err_mask);
1931		do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1932				   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1933				   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1934				   RDA_FRM_ECC_SG_ERR |
1935				   RDA_MISC_ERR|RDA_PCIX_ERR,
1936				   flag, &bar0->rda_err_mask);
1937		do_s2io_write_bits(RTI_SM_ERR_ALARM |
1938				   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1939				   flag, &bar0->rti_err_mask);
1940	}
1941
1942	if (mask & RX_MAC_INTR) {
1943		gen_int_mask |= RXMAC_INT_M;
1944		do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1945				   &bar0->mac_int_mask);
1946		interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1947				 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1948				 RMAC_DOUBLE_ECC_ERR);
1949		if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1950			interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1951		do_s2io_write_bits(interruptible,
1952				   flag, &bar0->mac_rmac_err_mask);
1953	}
1954
1955	if (mask & RX_XGXS_INTR) {
1956		gen_int_mask |= RXXGXS_INT_M;
1957		do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1958				   &bar0->xgxs_int_mask);
1959		do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1960				   &bar0->xgxs_rxgxs_err_mask);
1961	}
1962
1963	if (mask & MC_INTR) {
1964		gen_int_mask |= MC_INT_M;
1965		do_s2io_write_bits(MC_INT_MASK_MC_INT,
1966				   flag, &bar0->mc_int_mask);
1967		do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1968				   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1969				   &bar0->mc_err_mask);
1970	}
1971	nic->general_int_mask = gen_int_mask;
1972
1973	/* Remove this line when alarm interrupts are enabled */
1974	nic->general_int_mask = 0;
1975}
1976
1977/**
1978 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1979 *  @nic: device private variable,
1980 *  @mask: A mask indicating which Intr block must be modified and,
1981 *  @flag: A flag indicating whether to enable or disable the Intrs.
1982 *  Description: This function will either disable or enable the interrupts
1983 *  depending on the flag argument. The mask argument can be used to
1984 *  enable/disable any Intr block.
1985 *  Return Value: NONE.
1986 */
1987
1988static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1989{
1990	struct XENA_dev_config __iomem *bar0 = nic->bar0;
1991	register u64 temp64 = 0, intr_mask = 0;
1992
1993	intr_mask = nic->general_int_mask;
1994
1995	/*  Top level interrupt classification */
1996	/*  PIC Interrupts */
1997	if (mask & TX_PIC_INTR) {
1998		/*  Enable PIC Intrs in the general intr mask register */
1999		intr_mask |= TXPIC_INT_M;
2000		if (flag == ENABLE_INTRS) {
2001			/*
2002			 * If Hercules adapter enable GPIO otherwise
2003			 * disable all PCIX, Flash, MDIO, IIC and GPIO
2004			 * interrupts for now.
2005			 * TODO
2006			 */
2007			if (s2io_link_fault_indication(nic) ==
2008			    LINK_UP_DOWN_INTERRUPT) {
2009				do_s2io_write_bits(PIC_INT_GPIO, flag,
2010						   &bar0->pic_int_mask);
2011				do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2012						   &bar0->gpio_int_mask);
2013			} else
2014				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2015		} else if (flag == DISABLE_INTRS) {
2016			/*
2017			 * Disable PIC Intrs in the general
2018			 * intr mask register
2019			 */
2020			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2021		}
2022	}
2023
2024	/*  Tx traffic interrupts */
2025	if (mask & TX_TRAFFIC_INTR) {
2026		intr_mask |= TXTRAFFIC_INT_M;
2027		if (flag == ENABLE_INTRS) {
2028			/*
2029			 * Enable all the Tx side interrupts
2030			 * writing 0 Enables all 64 TX interrupt levels
2031			 */
2032			writeq(0x0, &bar0->tx_traffic_mask);
2033		} else if (flag == DISABLE_INTRS) {
2034			/*
2035			 * Disable Tx Traffic Intrs in the general intr mask
2036			 * register.
2037			 */
2038			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2039		}
2040	}
2041
2042	/*  Rx traffic interrupts */
2043	if (mask & RX_TRAFFIC_INTR) {
2044		intr_mask |= RXTRAFFIC_INT_M;
2045		if (flag == ENABLE_INTRS) {
2046			/* writing 0 Enables all 8 RX interrupt levels */
2047			writeq(0x0, &bar0->rx_traffic_mask);
2048		} else if (flag == DISABLE_INTRS) {
2049			/*
2050			 * Disable Rx Traffic Intrs in the general intr mask
2051			 * register.
2052			 */
2053			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2054		}
2055	}
2056
2057	temp64 = readq(&bar0->general_int_mask);
2058	if (flag == ENABLE_INTRS)
2059		temp64 &= ~((u64)intr_mask);
2060	else
2061		temp64 = DISABLE_ALL_INTRS;
2062	writeq(temp64, &bar0->general_int_mask);
2063
2064	nic->general_int_mask = readq(&bar0->general_int_mask);
2065}
2066
2067/**
2068 *  verify_pcc_quiescent- Checks for PCC quiescent state
2069 *  @sp : private member of the device structure, which is a pointer to the
2070 *  s2io_nic structure.
2071 *  @flag: boolean controlling function path
2072 *  Return: 1 If PCC is quiescence
2073 *          0 If PCC is not quiescence
2074 */
2075static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2076{
2077	int ret = 0, herc;
2078	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2079	u64 val64 = readq(&bar0->adapter_status);
2080
2081	herc = (sp->device_type == XFRAME_II_DEVICE);
2082
2083	if (flag == false) {
2084		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2085			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2086				ret = 1;
2087		} else {
2088			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2089				ret = 1;
2090		}
2091	} else {
2092		if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2093			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2094			     ADAPTER_STATUS_RMAC_PCC_IDLE))
2095				ret = 1;
2096		} else {
2097			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2098			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2099				ret = 1;
2100		}
2101	}
2102
2103	return ret;
2104}
2105/**
2106 *  verify_xena_quiescence - Checks whether the H/W is ready
2107 *  @sp : private member of the device structure, which is a pointer to the
2108 *  s2io_nic structure.
2109 *  Description: Returns whether the H/W is ready to go or not. Depending
2110 *  on whether adapter enable bit was written or not the comparison
2111 *  differs and the calling function passes the input argument flag to
2112 *  indicate this.
2113 *  Return: 1 If xena is quiescence
2114 *          0 If Xena is not quiescence
2115 */
2116
2117static int verify_xena_quiescence(struct s2io_nic *sp)
2118{
2119	int  mode;
2120	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2121	u64 val64 = readq(&bar0->adapter_status);
2122	mode = s2io_verify_pci_mode(sp);
2123
2124	if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2125		DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2126		return 0;
2127	}
2128	if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2129		DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2130		return 0;
2131	}
2132	if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2133		DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2134		return 0;
2135	}
2136	if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2137		DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2138		return 0;
2139	}
2140	if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2141		DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2142		return 0;
2143	}
2144	if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2145		DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2146		return 0;
2147	}
2148	if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2149		DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2150		return 0;
2151	}
2152	if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2153		DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2154		return 0;
2155	}
2156
2157	/*
2158	 * In PCI 33 mode, the P_PLL is not used, and therefore,
2159	 * the P_PLL_LOCK bit in the adapter_status register will
2160	 * not be asserted.
2161	 */
2162	if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2163	    sp->device_type == XFRAME_II_DEVICE &&
2164	    mode != PCI_MODE_PCI_33) {
2165		DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2166		return 0;
2167	}
2168	if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2169	      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2170		DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2171		return 0;
2172	}
2173	return 1;
2174}
2175
2176/**
2177 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2178 * @sp: Pointer to device specifc structure
2179 * Description :
2180 * New procedure to clear mac address reading  problems on Alpha platforms
2181 *
2182 */
2183
2184static void fix_mac_address(struct s2io_nic *sp)
2185{
2186	struct XENA_dev_config __iomem *bar0 = sp->bar0;
2187	int i = 0;
2188
2189	while (fix_mac[i] != END_SIGN) {
2190		writeq(fix_mac[i++], &bar0->gpio_control);
2191		udelay(10);
2192		(void) readq(&bar0->gpio_control);
2193	}
2194}
2195
2196/**
2197 *  start_nic - Turns the device on
2198 *  @nic : device private variable.
2199 *  Description:
2200 *  This function actually turns the device on. Before this  function is
2201 *  called,all Registers are configured from their reset states
2202 *  and shared memory is allocated but the NIC is still quiescent. On
2203 *  calling this function, the device interrupts are cleared and the NIC is
2204 *  literally switched on by writing into the adapter control register.
2205 *  Return Value:
2206 *  SUCCESS on success and -1 on failure.
2207 */
2208
2209static int start_nic(struct s2io_nic *nic)
2210{
2211	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2212	struct net_device *dev = nic->dev;
2213	register u64 val64 = 0;
2214	u16 subid, i;
2215	struct config_param *config = &nic->config;
2216	struct mac_info *mac_control = &nic->mac_control;
2217
2218	/*  PRC Initialization and configuration */
2219	for (i = 0; i < config->rx_ring_num; i++) {
2220		struct ring_info *ring = &mac_control->rings[i];
2221
2222		writeq((u64)ring->rx_blocks[0].block_dma_addr,
2223		       &bar0->prc_rxd0_n[i]);
2224
2225		val64 = readq(&bar0->prc_ctrl_n[i]);
2226		if (nic->rxd_mode == RXD_MODE_1)
2227			val64 |= PRC_CTRL_RC_ENABLED;
2228		else
2229			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2230		if (nic->device_type == XFRAME_II_DEVICE)
2231			val64 |= PRC_CTRL_GROUP_READS;
2232		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2233		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2234		writeq(val64, &bar0->prc_ctrl_n[i]);
2235	}
2236
2237	if (nic->rxd_mode == RXD_MODE_3B) {
2238		/* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2239		val64 = readq(&bar0->rx_pa_cfg);
2240		val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2241		writeq(val64, &bar0->rx_pa_cfg);
2242	}
2243
2244	if (vlan_tag_strip == 0) {
2245		val64 = readq(&bar0->rx_pa_cfg);
2246		val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2247		writeq(val64, &bar0->rx_pa_cfg);
2248		nic->vlan_strip_flag = 0;
2249	}
2250
2251	/*
2252	 * Enabling MC-RLDRAM. After enabling the device, we timeout
2253	 * for around 100ms, which is approximately the time required
2254	 * for the device to be ready for operation.
2255	 */
2256	val64 = readq(&bar0->mc_rldram_mrs);
2257	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2258	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2259	val64 = readq(&bar0->mc_rldram_mrs);
2260
2261	msleep(100);	/* Delay by around 100 ms. */
2262
2263	/* Enabling ECC Protection. */
2264	val64 = readq(&bar0->adapter_control);
2265	val64 &= ~ADAPTER_ECC_EN;
2266	writeq(val64, &bar0->adapter_control);
2267
2268	/*
2269	 * Verify if the device is ready to be enabled, if so enable
2270	 * it.
2271	 */
2272	val64 = readq(&bar0->adapter_status);
2273	if (!verify_xena_quiescence(nic)) {
2274		DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2275			  "Adapter status reads: 0x%llx\n",
2276			  dev->name, (unsigned long long)val64);
2277		return FAILURE;
2278	}
2279
2280	/*
2281	 * With some switches, link might be already up at this point.
2282	 * Because of this weird behavior, when we enable laser,
2283	 * we may not get link. We need to handle this. We cannot
2284	 * figure out which switch is misbehaving. So we are forced to
2285	 * make a global change.
2286	 */
2287
2288	/* Enabling Laser. */
2289	val64 = readq(&bar0->adapter_control);
2290	val64 |= ADAPTER_EOI_TX_ON;
2291	writeq(val64, &bar0->adapter_control);
2292
2293	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2294		/*
2295		 * Dont see link state interrupts initially on some switches,
2296		 * so directly scheduling the link state task here.
2297		 */
2298		schedule_work(&nic->set_link_task);
2299	}
2300	/* SXE-002: Initialize link and activity LED */
2301	subid = nic->pdev->subsystem_device;
2302	if (((subid & 0xFF) >= 0x07) &&
2303	    (nic->device_type == XFRAME_I_DEVICE)) {
2304		val64 = readq(&bar0->gpio_control);
2305		val64 |= 0x0000800000000000ULL;
2306		writeq(val64, &bar0->gpio_control);
2307		val64 = 0x0411040400000000ULL;
2308		writeq(val64, (void __iomem *)bar0 + 0x2700);
2309	}
2310
2311	return SUCCESS;
2312}
2313/**
2314 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2315 * @fifo_data: fifo data pointer
2316 * @txdlp: descriptor
2317 * @get_off: unused
2318 */
2319static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2320					struct TxD *txdlp, int get_off)
2321{
2322	struct s2io_nic *nic = fifo_data->nic;
2323	struct sk_buff *skb;
2324	struct TxD *txds;
2325	u16 j, frg_cnt;
2326
2327	txds = txdlp;
2328	if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2329		dma_unmap_single(&nic->pdev->dev,
2330				 (dma_addr_t)txds->Buffer_Pointer,
2331				 sizeof(u64), DMA_TO_DEVICE);
2332		txds++;
2333	}
2334
2335	skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2336	if (!skb) {
2337		memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2338		return NULL;
2339	}
2340	dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
2341			 skb_headlen(skb), DMA_TO_DEVICE);
2342	frg_cnt = skb_shinfo(skb)->nr_frags;
2343	if (frg_cnt) {
2344		txds++;
2345		for (j = 0; j < frg_cnt; j++, txds++) {
2346			const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2347			if (!txds->Buffer_Pointer)
2348				break;
2349			dma_unmap_page(&nic->pdev->dev,
2350				       (dma_addr_t)txds->Buffer_Pointer,
2351				       skb_frag_size(frag), DMA_TO_DEVICE);
2352		}
2353	}
2354	memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2355	return skb;
2356}
2357
2358/**
2359 *  free_tx_buffers - Free all queued Tx buffers
2360 *  @nic : device private variable.
2361 *  Description:
2362 *  Free all queued Tx buffers.
2363 *  Return Value: void
2364 */
2365
2366static void free_tx_buffers(struct s2io_nic *nic)
2367{
2368	struct net_device *dev = nic->dev;
2369	struct sk_buff *skb;
2370	struct TxD *txdp;
2371	int i, j;
2372	int cnt = 0;
2373	struct config_param *config = &nic->config;
2374	struct mac_info *mac_control = &nic->mac_control;
2375	struct stat_block *stats = mac_control->stats_info;
2376	struct swStat *swstats = &stats->sw_stat;
2377
2378	for (i = 0; i < config->tx_fifo_num; i++) {
2379		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2380		struct fifo_info *fifo = &mac_control->fifos[i];
2381		unsigned long flags;
2382
2383		spin_lock_irqsave(&fifo->tx_lock, flags);
2384		for (j = 0; j < tx_cfg->fifo_len; j++) {
2385			txdp = fifo->list_info[j].list_virt_addr;
2386			skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2387			if (skb) {
2388				swstats->mem_freed += skb->truesize;
2389				dev_kfree_skb_irq(skb);
2390				cnt++;
2391			}
2392		}
2393		DBG_PRINT(INTR_DBG,
2394			  "%s: forcibly freeing %d skbs on FIFO%d\n",
2395			  dev->name, cnt, i);
2396		fifo->tx_curr_get_info.offset = 0;
2397		fifo->tx_curr_put_info.offset = 0;
2398		spin_unlock_irqrestore(&fifo->tx_lock, flags);
2399	}
2400}
2401
2402/**
2403 *   stop_nic -  To stop the nic
2404 *   @nic : device private variable.
2405 *   Description:
2406 *   This function does exactly the opposite of what the start_nic()
2407 *   function does. This function is called to stop the device.
2408 *   Return Value:
2409 *   void.
2410 */
2411
2412static void stop_nic(struct s2io_nic *nic)
2413{
2414	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2415	register u64 val64 = 0;
2416	u16 interruptible;
2417
2418	/*  Disable all interrupts */
2419	en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2420	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2421	interruptible |= TX_PIC_INTR;
2422	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2423
2424	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2425	val64 = readq(&bar0->adapter_control);
2426	val64 &= ~(ADAPTER_CNTL_EN);
2427	writeq(val64, &bar0->adapter_control);
2428}
2429
2430/**
2431 *  fill_rx_buffers - Allocates the Rx side skbs
2432 *  @nic : device private variable.
2433 *  @ring: per ring structure
2434 *  @from_card_up: If this is true, we will map the buffer to get
2435 *     the dma address for buf0 and buf1 to give it to the card.
2436 *     Else we will sync the already mapped buffer to give it to the card.
2437 *  Description:
2438 *  The function allocates Rx side skbs and puts the physical
2439 *  address of these buffers into the RxD buffer pointers, so that the NIC
2440 *  can DMA the received frame into these locations.
2441 *  The NIC supports 3 receive modes, viz
2442 *  1. single buffer,
2443 *  2. three buffer and
2444 *  3. Five buffer modes.
2445 *  Each mode defines how many fragments the received frame will be split
2446 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2447 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2448 *  is split into 3 fragments. As of now only single buffer mode is
2449 *  supported.
2450 *   Return Value:
2451 *  SUCCESS on success or an appropriate -ve value on failure.
2452 */
2453static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2454			   int from_card_up)
2455{
2456	struct sk_buff *skb;
2457	struct RxD_t *rxdp;
2458	int off, size, block_no, block_no1;
2459	u32 alloc_tab = 0;
2460	u32 alloc_cnt;
2461	u64 tmp;
2462	struct buffAdd *ba;
2463	struct RxD_t *first_rxdp = NULL;
2464	u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2465	struct RxD1 *rxdp1;
2466	struct RxD3 *rxdp3;
2467	struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2468
2469	alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2470
2471	block_no1 = ring->rx_curr_get_info.block_index;
2472	while (alloc_tab < alloc_cnt) {
2473		block_no = ring->rx_curr_put_info.block_index;
2474
2475		off = ring->rx_curr_put_info.offset;
2476
2477		rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2478
2479		if ((block_no == block_no1) &&
2480		    (off == ring->rx_curr_get_info.offset) &&
2481		    (rxdp->Host_Control)) {
2482			DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2483				  ring->dev->name);
2484			goto end;
2485		}
2486		if (off && (off == ring->rxd_count)) {
2487			ring->rx_curr_put_info.block_index++;
2488			if (ring->rx_curr_put_info.block_index ==
2489			    ring->block_count)
2490				ring->rx_curr_put_info.block_index = 0;
2491			block_no = ring->rx_curr_put_info.block_index;
2492			off = 0;
2493			ring->rx_curr_put_info.offset = off;
2494			rxdp = ring->rx_blocks[block_no].block_virt_addr;
2495			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2496				  ring->dev->name, rxdp);
2497
2498		}
2499
2500		if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2501		    ((ring->rxd_mode == RXD_MODE_3B) &&
2502		     (rxdp->Control_2 & s2BIT(0)))) {
2503			ring->rx_curr_put_info.offset = off;
2504			goto end;
2505		}
2506		/* calculate size of skb based on ring mode */
2507		size = ring->mtu +
2508			HEADER_ETHERNET_II_802_3_SIZE +
2509			HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2510		if (ring->rxd_mode == RXD_MODE_1)
2511			size += NET_IP_ALIGN;
2512		else
2513			size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2514
2515		/* allocate skb */
2516		skb = netdev_alloc_skb(nic->dev, size);
2517		if (!skb) {
2518			DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2519				  ring->dev->name);
2520			if (first_rxdp) {
2521				dma_wmb();
2522				first_rxdp->Control_1 |= RXD_OWN_XENA;
2523			}
2524			swstats->mem_alloc_fail_cnt++;
2525
2526			return -ENOMEM ;
2527		}
2528		swstats->mem_allocated += skb->truesize;
2529
2530		if (ring->rxd_mode == RXD_MODE_1) {
2531			/* 1 buffer mode - normal operation mode */
2532			rxdp1 = (struct RxD1 *)rxdp;
2533			memset(rxdp, 0, sizeof(struct RxD1));
2534			skb_reserve(skb, NET_IP_ALIGN);
2535			rxdp1->Buffer0_ptr =
2536				dma_map_single(&ring->pdev->dev, skb->data,
2537					       size - NET_IP_ALIGN,
2538					       DMA_FROM_DEVICE);
2539			if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
2540				goto pci_map_failed;
2541
2542			rxdp->Control_2 =
2543				SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2544			rxdp->Host_Control = (unsigned long)skb;
2545		} else if (ring->rxd_mode == RXD_MODE_3B) {
2546			/*
2547			 * 2 buffer mode -
2548			 * 2 buffer mode provides 128
2549			 * byte aligned receive buffers.
2550			 */
2551
2552			rxdp3 = (struct RxD3 *)rxdp;
2553			/* save buffer pointers to avoid frequent dma mapping */
2554			Buffer0_ptr = rxdp3->Buffer0_ptr;
2555			Buffer1_ptr = rxdp3->Buffer1_ptr;
2556			memset(rxdp, 0, sizeof(struct RxD3));
2557			/* restore the buffer pointers for dma sync*/
2558			rxdp3->Buffer0_ptr = Buffer0_ptr;
2559			rxdp3->Buffer1_ptr = Buffer1_ptr;
2560
2561			ba = &ring->ba[block_no][off];
2562			skb_reserve(skb, BUF0_LEN);
2563			tmp = (u64)(unsigned long)skb->data;
2564			tmp += ALIGN_SIZE;
2565			tmp &= ~ALIGN_SIZE;
2566			skb->data = (void *) (unsigned long)tmp;
2567			skb_reset_tail_pointer(skb);
2568
2569			if (from_card_up) {
2570				rxdp3->Buffer0_ptr =
2571					dma_map_single(&ring->pdev->dev,
2572						       ba->ba_0, BUF0_LEN,
2573						       DMA_FROM_DEVICE);
2574				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
2575					goto pci_map_failed;
2576			} else
2577				dma_sync_single_for_device(&ring->pdev->dev,
2578							   (dma_addr_t)rxdp3->Buffer0_ptr,
2579							   BUF0_LEN,
2580							   DMA_FROM_DEVICE);
2581
2582			rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2583			if (ring->rxd_mode == RXD_MODE_3B) {
2584				/* Two buffer mode */
2585
2586				/*
2587				 * Buffer2 will have L3/L4 header plus
2588				 * L4 payload
2589				 */
2590				rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
2591								    skb->data,
2592								    ring->mtu + 4,
2593								    DMA_FROM_DEVICE);
2594
2595				if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
2596					goto pci_map_failed;
2597
2598				if (from_card_up) {
2599					rxdp3->Buffer1_ptr =
2600						dma_map_single(&ring->pdev->dev,
2601							       ba->ba_1,
2602							       BUF1_LEN,
2603							       DMA_FROM_DEVICE);
2604
2605					if (dma_mapping_error(&nic->pdev->dev,
2606							      rxdp3->Buffer1_ptr)) {
2607						dma_unmap_single(&ring->pdev->dev,
2608								 (dma_addr_t)(unsigned long)
2609								 skb->data,
2610								 ring->mtu + 4,
2611								 DMA_FROM_DEVICE);
2612						goto pci_map_failed;
2613					}
2614				}
2615				rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2616				rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2617					(ring->mtu + 4);
2618			}
2619			rxdp->Control_2 |= s2BIT(0);
2620			rxdp->Host_Control = (unsigned long) (skb);
2621		}
2622		if (alloc_tab & ((1 << rxsync_frequency) - 1))
2623			rxdp->Control_1 |= RXD_OWN_XENA;
2624		off++;
2625		if (off == (ring->rxd_count + 1))
2626			off = 0;
2627		ring->rx_curr_put_info.offset = off;
2628
2629		rxdp->Control_2 |= SET_RXD_MARKER;
2630		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2631			if (first_rxdp) {
2632				dma_wmb();
2633				first_rxdp->Control_1 |= RXD_OWN_XENA;
2634			}
2635			first_rxdp = rxdp;
2636		}
2637		ring->rx_bufs_left += 1;
2638		alloc_tab++;
2639	}
2640
2641end:
2642	/* Transfer ownership of first descriptor to adapter just before
2643	 * exiting. Before that, use memory barrier so that ownership
2644	 * and other fields are seen by adapter correctly.
2645	 */
2646	if (first_rxdp) {
2647		dma_wmb();
2648		first_rxdp->Control_1 |= RXD_OWN_XENA;
2649	}
2650
2651	return SUCCESS;
2652
2653pci_map_failed:
2654	swstats->pci_map_fail_cnt++;
2655	swstats->mem_freed += skb->truesize;
2656	dev_kfree_skb_irq(skb);
2657	return -ENOMEM;
2658}
2659
2660static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2661{
2662	struct net_device *dev = sp->dev;
2663	int j;
2664	struct sk_buff *skb;
2665	struct RxD_t *rxdp;
2666	struct RxD1 *rxdp1;
2667	struct RxD3 *rxdp3;
2668	struct mac_info *mac_control = &sp->mac_control;
2669	struct stat_block *stats = mac_control->stats_info;
2670	struct swStat *swstats = &stats->sw_stat;
2671
2672	for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2673		rxdp = mac_control->rings[ring_no].
2674			rx_blocks[blk].rxds[j].virt_addr;
2675		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2676		if (!skb)
2677			continue;
2678		if (sp->rxd_mode == RXD_MODE_1) {
2679			rxdp1 = (struct RxD1 *)rxdp;
2680			dma_unmap_single(&sp->pdev->dev,
2681					 (dma_addr_t)rxdp1->Buffer0_ptr,
2682					 dev->mtu +
2683					 HEADER_ETHERNET_II_802_3_SIZE +
2684					 HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2685					 DMA_FROM_DEVICE);
2686			memset(rxdp, 0, sizeof(struct RxD1));
2687		} else if (sp->rxd_mode == RXD_MODE_3B) {
2688			rxdp3 = (struct RxD3 *)rxdp;
2689			dma_unmap_single(&sp->pdev->dev,
2690					 (dma_addr_t)rxdp3->Buffer0_ptr,
2691					 BUF0_LEN, DMA_FROM_DEVICE);
2692			dma_unmap_single(&sp->pdev->dev,
2693					 (dma_addr_t)rxdp3->Buffer1_ptr,
2694					 BUF1_LEN, DMA_FROM_DEVICE);
2695			dma_unmap_single(&sp->pdev->dev,
2696					 (dma_addr_t)rxdp3->Buffer2_ptr,
2697					 dev->mtu + 4, DMA_FROM_DEVICE);
2698			memset(rxdp, 0, sizeof(struct RxD3));
2699		}
2700		swstats->mem_freed += skb->truesize;
2701		dev_kfree_skb(skb);
2702		mac_control->rings[ring_no].rx_bufs_left -= 1;
2703	}
2704}
2705
2706/**
2707 *  free_rx_buffers - Frees all Rx buffers
2708 *  @sp: device private variable.
2709 *  Description:
2710 *  This function will free all Rx buffers allocated by host.
2711 *  Return Value:
2712 *  NONE.
2713 */
2714
2715static void free_rx_buffers(struct s2io_nic *sp)
2716{
2717	struct net_device *dev = sp->dev;
2718	int i, blk = 0, buf_cnt = 0;
2719	struct config_param *config = &sp->config;
2720	struct mac_info *mac_control = &sp->mac_control;
2721
2722	for (i = 0; i < config->rx_ring_num; i++) {
2723		struct ring_info *ring = &mac_control->rings[i];
2724
2725		for (blk = 0; blk < rx_ring_sz[i]; blk++)
2726			free_rxd_blk(sp, i, blk);
2727
2728		ring->rx_curr_put_info.block_index = 0;
2729		ring->rx_curr_get_info.block_index = 0;
2730		ring->rx_curr_put_info.offset = 0;
2731		ring->rx_curr_get_info.offset = 0;
2732		ring->rx_bufs_left = 0;
2733		DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2734			  dev->name, buf_cnt, i);
2735	}
2736}
2737
2738static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2739{
2740	if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2741		DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2742			  ring->dev->name);
2743	}
2744	return 0;
2745}
2746
2747/**
2748 * s2io_poll_msix - Rx interrupt handler for NAPI support
2749 * @napi : pointer to the napi structure.
2750 * @budget : The number of packets that were budgeted to be processed
2751 * during  one pass through the 'Poll" function.
2752 * Description:
2753 * Comes into picture only if NAPI support has been incorporated. It does
2754 * the same thing that rx_intr_handler does, but not in a interrupt context
2755 * also It will process only a given number of packets.
2756 * Return value:
2757 * 0 on success and 1 if there are No Rx packets to be processed.
2758 */
2759
2760static int s2io_poll_msix(struct napi_struct *napi, int budget)
2761{
2762	struct ring_info *ring = container_of(napi, struct ring_info, napi);
2763	struct net_device *dev = ring->dev;
2764	int pkts_processed = 0;
2765	u8 __iomem *addr = NULL;
2766	u8 val8 = 0;
2767	struct s2io_nic *nic = netdev_priv(dev);
2768	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2769	int budget_org = budget;
2770
2771	if (unlikely(!is_s2io_card_up(nic)))
2772		return 0;
2773
2774	pkts_processed = rx_intr_handler(ring, budget);
2775	s2io_chk_rx_buffers(nic, ring);
2776
2777	if (pkts_processed < budget_org) {
2778		napi_complete_done(napi, pkts_processed);
2779		/*Re Enable MSI-Rx Vector*/
2780		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2781		addr += 7 - ring->ring_no;
2782		val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2783		writeb(val8, addr);
2784		val8 = readb(addr);
2785	}
2786	return pkts_processed;
2787}
2788
2789static int s2io_poll_inta(struct napi_struct *napi, int budget)
2790{
2791	struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2792	int pkts_processed = 0;
2793	int ring_pkts_processed, i;
2794	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2795	int budget_org = budget;
2796	struct config_param *config = &nic->config;
2797	struct mac_info *mac_control = &nic->mac_control;
2798
2799	if (unlikely(!is_s2io_card_up(nic)))
2800		return 0;
2801
2802	for (i = 0; i < config->rx_ring_num; i++) {
2803		struct ring_info *ring = &mac_control->rings[i];
2804		ring_pkts_processed = rx_intr_handler(ring, budget);
2805		s2io_chk_rx_buffers(nic, ring);
2806		pkts_processed += ring_pkts_processed;
2807		budget -= ring_pkts_processed;
2808		if (budget <= 0)
2809			break;
2810	}
2811	if (pkts_processed < budget_org) {
2812		napi_complete_done(napi, pkts_processed);
2813		/* Re enable the Rx interrupts for the ring */
2814		writeq(0, &bar0->rx_traffic_mask);
2815		readl(&bar0->rx_traffic_mask);
2816	}
2817	return pkts_processed;
2818}
2819
2820#ifdef CONFIG_NET_POLL_CONTROLLER
2821/**
2822 * s2io_netpoll - netpoll event handler entry point
2823 * @dev : pointer to the device structure.
2824 * Description:
2825 * 	This function will be called by upper layer to check for events on the
2826 * interface in situations where interrupts are disabled. It is used for
2827 * specific in-kernel networking tasks, such as remote consoles and kernel
2828 * debugging over the network (example netdump in RedHat).
2829 */
2830static void s2io_netpoll(struct net_device *dev)
2831{
2832	struct s2io_nic *nic = netdev_priv(dev);
2833	const int irq = nic->pdev->irq;
2834	struct XENA_dev_config __iomem *bar0 = nic->bar0;
2835	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2836	int i;
2837	struct config_param *config = &nic->config;
2838	struct mac_info *mac_control = &nic->mac_control;
2839
2840	if (pci_channel_offline(nic->pdev))
2841		return;
2842
2843	disable_irq(irq);
2844
2845	writeq(val64, &bar0->rx_traffic_int);
2846	writeq(val64, &bar0->tx_traffic_int);
2847
2848	/* we need to free up the transmitted skbufs or else netpoll will
2849	 * run out of skbs and will fail and eventually netpoll application such
2850	 * as netdump will fail.
2851	 */
2852	for (i = 0; i < config->tx_fifo_num; i++)
2853		tx_intr_handler(&mac_control->fifos[i]);
2854
2855	/* check for received packet and indicate up to network */
2856	for (i = 0; i < config->rx_ring_num; i++) {
2857		struct ring_info *ring = &mac_control->rings[i];
2858
2859		rx_intr_handler(ring, 0);
2860	}
2861
2862	for (i = 0; i < config->rx_ring_num; i++) {
2863		struct ring_info *ring = &mac_control->rings[i];
2864
2865		if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2866			DBG_PRINT(INFO_DBG,
2867				  "%s: Out of memory in Rx Netpoll!!\n",
2868				  dev->name);
2869			break;
2870		}
2871	}
2872	enable_irq(irq);
2873}
2874#endif
2875
2876/**
2877 *  rx_intr_handler - Rx interrupt handler
2878 *  @ring_data: per ring structure.
2879 *  @budget: budget for napi processing.
2880 *  Description:
2881 *  If the interrupt is because of a received frame or if the
2882 *  receive ring contains fresh as yet un-processed frames,this function is
2883 *  called. It picks out the RxD at which place the last Rx processing had
2884 *  stopped and sends the skb to the OSM's Rx handler and then increments
2885 *  the offset.
2886 *  Return Value:
2887 *  No. of napi packets processed.
2888 */
2889static int rx_intr_handler(struct ring_info *ring_data, int budget)
2890{
2891	int get_block, put_block;
2892	struct rx_curr_get_info get_info, put_info;
2893	struct RxD_t *rxdp;
2894	struct sk_buff *skb;
2895	int pkt_cnt = 0, napi_pkts = 0;
2896	int i;
2897	struct RxD1 *rxdp1;
2898	struct RxD3 *rxdp3;
2899
2900	if (budget <= 0)
2901		return napi_pkts;
2902
2903	get_info = ring_data->rx_curr_get_info;
2904	get_block = get_info.block_index;
2905	memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2906	put_block = put_info.block_index;
2907	rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2908
2909	while (RXD_IS_UP2DT(rxdp)) {
2910		/*
2911		 * If your are next to put index then it's
2912		 * FIFO full condition
2913		 */
2914		if ((get_block == put_block) &&
2915		    (get_info.offset + 1) == put_info.offset) {
2916			DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2917				  ring_data->dev->name);
2918			break;
2919		}
2920		skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2921		if (skb == NULL) {
2922			DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2923				  ring_data->dev->name);
2924			return 0;
2925		}
2926		if (ring_data->rxd_mode == RXD_MODE_1) {
2927			rxdp1 = (struct RxD1 *)rxdp;
2928			dma_unmap_single(&ring_data->pdev->dev,
2929					 (dma_addr_t)rxdp1->Buffer0_ptr,
2930					 ring_data->mtu +
2931					 HEADER_ETHERNET_II_802_3_SIZE +
2932					 HEADER_802_2_SIZE +
2933					 HEADER_SNAP_SIZE,
2934					 DMA_FROM_DEVICE);
2935		} else if (ring_data->rxd_mode == RXD_MODE_3B) {
2936			rxdp3 = (struct RxD3 *)rxdp;
2937			dma_sync_single_for_cpu(&ring_data->pdev->dev,
2938						(dma_addr_t)rxdp3->Buffer0_ptr,
2939						BUF0_LEN, DMA_FROM_DEVICE);
2940			dma_unmap_single(&ring_data->pdev->dev,
2941					 (dma_addr_t)rxdp3->Buffer2_ptr,
2942					 ring_data->mtu + 4, DMA_FROM_DEVICE);
2943		}
2944		prefetch(skb->data);
2945		rx_osm_handler(ring_data, rxdp);
2946		get_info.offset++;
2947		ring_data->rx_curr_get_info.offset = get_info.offset;
2948		rxdp = ring_data->rx_blocks[get_block].
2949			rxds[get_info.offset].virt_addr;
2950		if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2951			get_info.offset = 0;
2952			ring_data->rx_curr_get_info.offset = get_info.offset;
2953			get_block++;
2954			if (get_block == ring_data->block_count)
2955				get_block = 0;
2956			ring_data->rx_curr_get_info.block_index = get_block;
2957			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2958		}
2959
2960		if (ring_data->nic->config.napi) {
2961			budget--;
2962			napi_pkts++;
2963			if (!budget)
2964				break;
2965		}
2966		pkt_cnt++;
2967		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2968			break;
2969	}
2970	if (ring_data->lro) {
2971		/* Clear all LRO sessions before exiting */
2972		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2973			struct lro *lro = &ring_data->lro0_n[i];
2974			if (lro->in_use) {
2975				update_L3L4_header(ring_data->nic, lro);
2976				queue_rx_frame(lro->parent, lro->vlan_tag);
2977				clear_lro_session(lro);
2978			}
2979		}
2980	}
2981	return napi_pkts;
2982}
2983
2984/**
2985 *  tx_intr_handler - Transmit interrupt handler
2986 *  @fifo_data : fifo data pointer
2987 *  Description:
2988 *  If an interrupt was raised to indicate DMA complete of the
2989 *  Tx packet, this function is called. It identifies the last TxD
2990 *  whose buffer was freed and frees all skbs whose data have already
2991 *  DMA'ed into the NICs internal memory.
2992 *  Return Value:
2993 *  NONE
2994 */
2995
2996static void tx_intr_handler(struct fifo_info *fifo_data)
2997{
2998	struct s2io_nic *nic = fifo_data->nic;
2999	struct tx_curr_get_info get_info, put_info;
3000	struct sk_buff *skb = NULL;
3001	struct TxD *txdlp;
3002	int pkt_cnt = 0;
3003	unsigned long flags = 0;
3004	u8 err_mask;
3005	struct stat_block *stats = nic->mac_control.stats_info;
3006	struct swStat *swstats = &stats->sw_stat;
3007
3008	if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3009		return;
3010
3011	get_info = fifo_data->tx_curr_get_info;
3012	memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3013	txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3014	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3015	       (get_info.offset != put_info.offset) &&
3016	       (txdlp->Host_Control)) {
3017		/* Check for TxD errors */
3018		if (txdlp->Control_1 & TXD_T_CODE) {
3019			unsigned long long err;
3020			err = txdlp->Control_1 & TXD_T_CODE;
3021			if (err & 0x1) {
3022				swstats->parity_err_cnt++;
3023			}
3024
3025			/* update t_code statistics */
3026			err_mask = err >> 48;
3027			switch (err_mask) {
3028			case 2:
3029				swstats->tx_buf_abort_cnt++;
3030				break;
3031
3032			case 3:
3033				swstats->tx_desc_abort_cnt++;
3034				break;
3035
3036			case 7:
3037				swstats->tx_parity_err_cnt++;
3038				break;
3039
3040			case 10:
3041				swstats->tx_link_loss_cnt++;
3042				break;
3043
3044			case 15:
3045				swstats->tx_list_proc_err_cnt++;
3046				break;
3047			}
3048		}
3049
3050		skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3051		if (skb == NULL) {
3052			spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3053			DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3054				  __func__);
3055			return;
3056		}
3057		pkt_cnt++;
3058
3059		/* Updating the statistics block */
3060		swstats->mem_freed += skb->truesize;
3061		dev_consume_skb_irq(skb);
3062
3063		get_info.offset++;
3064		if (get_info.offset == get_info.fifo_len + 1)
3065			get_info.offset = 0;
3066		txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3067		fifo_data->tx_curr_get_info.offset = get_info.offset;
3068	}
3069
3070	s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3071
3072	spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3073}
3074
3075/**
3076 *  s2io_mdio_write - Function to write in to MDIO registers
3077 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3078 *  @addr     : address value
3079 *  @value    : data value
3080 *  @dev      : pointer to net_device structure
3081 *  Description:
3082 *  This function is used to write values to the MDIO registers
3083 *  NONE
3084 */
3085static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3086			    struct net_device *dev)
3087{
3088	u64 val64;
3089	struct s2io_nic *sp = netdev_priv(dev);
3090	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3091
3092	/* address transaction */
3093	val64 = MDIO_MMD_INDX_ADDR(addr) |
3094		MDIO_MMD_DEV_ADDR(mmd_type) |
3095		MDIO_MMS_PRT_ADDR(0x0);
3096	writeq(val64, &bar0->mdio_control);
3097	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3098	writeq(val64, &bar0->mdio_control);
3099	udelay(100);
3100
3101	/* Data transaction */
3102	val64 = MDIO_MMD_INDX_ADDR(addr) |
3103		MDIO_MMD_DEV_ADDR(mmd_type) |
3104		MDIO_MMS_PRT_ADDR(0x0) |
3105		MDIO_MDIO_DATA(value) |
3106		MDIO_OP(MDIO_OP_WRITE_TRANS);
3107	writeq(val64, &bar0->mdio_control);
3108	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3109	writeq(val64, &bar0->mdio_control);
3110	udelay(100);
3111
3112	val64 = MDIO_MMD_INDX_ADDR(addr) |
3113		MDIO_MMD_DEV_ADDR(mmd_type) |
3114		MDIO_MMS_PRT_ADDR(0x0) |
3115		MDIO_OP(MDIO_OP_READ_TRANS);
3116	writeq(val64, &bar0->mdio_control);
3117	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3118	writeq(val64, &bar0->mdio_control);
3119	udelay(100);
3120}
3121
3122/**
3123 *  s2io_mdio_read - Function to write in to MDIO registers
3124 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3125 *  @addr     : address value
3126 *  @dev      : pointer to net_device structure
3127 *  Description:
3128 *  This function is used to read values to the MDIO registers
3129 *  NONE
3130 */
3131static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3132{
3133	u64 val64 = 0x0;
3134	u64 rval64 = 0x0;
3135	struct s2io_nic *sp = netdev_priv(dev);
3136	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3137
3138	/* address transaction */
3139	val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3140			 | MDIO_MMD_DEV_ADDR(mmd_type)
3141			 | MDIO_MMS_PRT_ADDR(0x0));
3142	writeq(val64, &bar0->mdio_control);
3143	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3144	writeq(val64, &bar0->mdio_control);
3145	udelay(100);
3146
3147	/* Data transaction */
3148	val64 = MDIO_MMD_INDX_ADDR(addr) |
3149		MDIO_MMD_DEV_ADDR(mmd_type) |
3150		MDIO_MMS_PRT_ADDR(0x0) |
3151		MDIO_OP(MDIO_OP_READ_TRANS);
3152	writeq(val64, &bar0->mdio_control);
3153	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154	writeq(val64, &bar0->mdio_control);
3155	udelay(100);
3156
3157	/* Read the value from regs */
3158	rval64 = readq(&bar0->mdio_control);
3159	rval64 = rval64 & 0xFFFF0000;
3160	rval64 = rval64 >> 16;
3161	return rval64;
3162}
3163
3164/**
3165 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3166 *  @counter      : counter value to be updated
3167 *  @regs_stat    : registers status
3168 *  @index        : index
3169 *  @flag         : flag to indicate the status
3170 *  @type         : counter type
3171 *  Description:
3172 *  This function is to check the status of the xpak counters value
3173 *  NONE
3174 */
3175
3176static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3177				  u16 flag, u16 type)
3178{
3179	u64 mask = 0x3;
3180	u64 val64;
3181	int i;
3182	for (i = 0; i < index; i++)
3183		mask = mask << 0x2;
3184
3185	if (flag > 0) {
3186		*counter = *counter + 1;
3187		val64 = *regs_stat & mask;
3188		val64 = val64 >> (index * 0x2);
3189		val64 = val64 + 1;
3190		if (val64 == 3) {
3191			switch (type) {
3192			case 1:
3193				DBG_PRINT(ERR_DBG,
3194					  "Take Xframe NIC out of service.\n");
3195				DBG_PRINT(ERR_DBG,
3196"Excessive temperatures may result in premature transceiver failure.\n");
3197				break;
3198			case 2:
3199				DBG_PRINT(ERR_DBG,
3200					  "Take Xframe NIC out of service.\n");
3201				DBG_PRINT(ERR_DBG,
3202"Excessive bias currents may indicate imminent laser diode failure.\n");
3203				break;
3204			case 3:
3205				DBG_PRINT(ERR_DBG,
3206					  "Take Xframe NIC out of service.\n");
3207				DBG_PRINT(ERR_DBG,
3208"Excessive laser output power may saturate far-end receiver.\n");
3209				break;
3210			default:
3211				DBG_PRINT(ERR_DBG,
3212					  "Incorrect XPAK Alarm type\n");
3213			}
3214			val64 = 0x0;
3215		}
3216		val64 = val64 << (index * 0x2);
3217		*regs_stat = (*regs_stat & (~mask)) | (val64);
3218
3219	} else {
3220		*regs_stat = *regs_stat & (~mask);
3221	}
3222}
3223
3224/**
3225 *  s2io_updt_xpak_counter - Function to update the xpak counters
3226 *  @dev         : pointer to net_device struct
3227 *  Description:
3228 *  This function is to upate the status of the xpak counters value
3229 *  NONE
3230 */
3231static void s2io_updt_xpak_counter(struct net_device *dev)
3232{
3233	u16 flag  = 0x0;
3234	u16 type  = 0x0;
3235	u16 val16 = 0x0;
3236	u64 val64 = 0x0;
3237	u64 addr  = 0x0;
3238
3239	struct s2io_nic *sp = netdev_priv(dev);
3240	struct stat_block *stats = sp->mac_control.stats_info;
3241	struct xpakStat *xstats = &stats->xpak_stat;
3242
3243	/* Check the communication with the MDIO slave */
3244	addr = MDIO_CTRL1;
3245	val64 = 0x0;
3246	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3247	if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3248		DBG_PRINT(ERR_DBG,
3249			  "ERR: MDIO slave access failed - Returned %llx\n",
3250			  (unsigned long long)val64);
3251		return;
3252	}
3253
3254	/* Check for the expected value of control reg 1 */
3255	if (val64 != MDIO_CTRL1_SPEED10G) {
3256		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3257			  "Returned: %llx- Expected: 0x%x\n",
3258			  (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3259		return;
3260	}
3261
3262	/* Loading the DOM register to MDIO register */
3263	addr = 0xA100;
3264	s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3265	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3266
3267	/* Reading the Alarm flags */
3268	addr = 0xA070;
3269	val64 = 0x0;
3270	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3271
3272	flag = CHECKBIT(val64, 0x7);
3273	type = 1;
3274	s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3275			      &xstats->xpak_regs_stat,
3276			      0x0, flag, type);
3277
3278	if (CHECKBIT(val64, 0x6))
3279		xstats->alarm_transceiver_temp_low++;
3280
3281	flag = CHECKBIT(val64, 0x3);
3282	type = 2;
3283	s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3284			      &xstats->xpak_regs_stat,
3285			      0x2, flag, type);
3286
3287	if (CHECKBIT(val64, 0x2))
3288		xstats->alarm_laser_bias_current_low++;
3289
3290	flag = CHECKBIT(val64, 0x1);
3291	type = 3;
3292	s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3293			      &xstats->xpak_regs_stat,
3294			      0x4, flag, type);
3295
3296	if (CHECKBIT(val64, 0x0))
3297		xstats->alarm_laser_output_power_low++;
3298
3299	/* Reading the Warning flags */
3300	addr = 0xA074;
3301	val64 = 0x0;
3302	val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3303
3304	if (CHECKBIT(val64, 0x7))
3305		xstats->warn_transceiver_temp_high++;
3306
3307	if (CHECKBIT(val64, 0x6))
3308		xstats->warn_transceiver_temp_low++;
3309
3310	if (CHECKBIT(val64, 0x3))
3311		xstats->warn_laser_bias_current_high++;
3312
3313	if (CHECKBIT(val64, 0x2))
3314		xstats->warn_laser_bias_current_low++;
3315
3316	if (CHECKBIT(val64, 0x1))
3317		xstats->warn_laser_output_power_high++;
3318
3319	if (CHECKBIT(val64, 0x0))
3320		xstats->warn_laser_output_power_low++;
3321}
3322
3323/**
3324 *  wait_for_cmd_complete - waits for a command to complete.
3325 *  @addr: address
3326 *  @busy_bit: bit to check for busy
3327 *  @bit_state: state to check
3328 *  @may_sleep: parameter indicates if sleeping when waiting for
3329 *  command complete
3330 *  Description: Function that waits for a command to Write into RMAC
3331 *  ADDR DATA registers to be completed and returns either success or
3332 *  error depending on whether the command was complete or not.
3333 *  Return value:
3334 *   SUCCESS on success and FAILURE on failure.
3335 */
3336
3337static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3338				 int bit_state, bool may_sleep)
3339{
3340	int ret = FAILURE, cnt = 0, delay = 1;
3341	u64 val64;
3342
3343	if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3344		return FAILURE;
3345
3346	do {
3347		val64 = readq(addr);
3348		if (bit_state == S2IO_BIT_RESET) {
3349			if (!(val64 & busy_bit)) {
3350				ret = SUCCESS;
3351				break;
3352			}
3353		} else {
3354			if (val64 & busy_bit) {
3355				ret = SUCCESS;
3356				break;
3357			}
3358		}
3359
3360		if (!may_sleep)
3361			mdelay(delay);
3362		else
3363			msleep(delay);
3364
3365		if (++cnt >= 10)
3366			delay = 50;
3367	} while (cnt < 20);
3368	return ret;
3369}
3370/**
3371 * check_pci_device_id - Checks if the device id is supported
3372 * @id : device id
3373 * Description: Function to check if the pci device id is supported by driver.
3374 * Return value: Actual device id if supported else PCI_ANY_ID
3375 */
3376static u16 check_pci_device_id(u16 id)
3377{
3378	switch (id) {
3379	case PCI_DEVICE_ID_HERC_WIN:
3380	case PCI_DEVICE_ID_HERC_UNI:
3381		return XFRAME_II_DEVICE;
3382	case PCI_DEVICE_ID_S2IO_UNI:
3383	case PCI_DEVICE_ID_S2IO_WIN:
3384		return XFRAME_I_DEVICE;
3385	default:
3386		return PCI_ANY_ID;
3387	}
3388}
3389
3390/**
3391 *  s2io_reset - Resets the card.
3392 *  @sp : private member of the device structure.
3393 *  Description: Function to Reset the card. This function then also
3394 *  restores the previously saved PCI configuration space registers as
3395 *  the card reset also resets the configuration space.
3396 *  Return value:
3397 *  void.
3398 */
3399
3400static void s2io_reset(struct s2io_nic *sp)
3401{
3402	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3403	u64 val64;
3404	u16 subid, pci_cmd;
3405	int i;
3406	u16 val16;
3407	unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3408	unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3409	struct stat_block *stats;
3410	struct swStat *swstats;
3411
3412	DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3413		  __func__, pci_name(sp->pdev));
3414
3415	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3416	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3417
3418	val64 = SW_RESET_ALL;
3419	writeq(val64, &bar0->sw_reset);
3420	if (strstr(sp->product_name, "CX4"))
3421		msleep(750);
3422	msleep(250);
3423	for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3424
3425		/* Restore the PCI state saved during initialization. */
3426		pci_restore_state(sp->pdev);
3427		pci_save_state(sp->pdev);
3428		pci_read_config_word(sp->pdev, 0x2, &val16);
3429		if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3430			break;
3431		msleep(200);
3432	}
3433
3434	if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3435		DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3436
3437	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3438
3439	s2io_init_pci(sp);
3440
3441	/* Set swapper to enable I/O register access */
3442	s2io_set_swapper(sp);
3443
3444	/* restore mac_addr entries */
3445	do_s2io_restore_unicast_mc(sp);
3446
3447	/* Restore the MSIX table entries from local variables */
3448	restore_xmsi_data(sp);
3449
3450	/* Clear certain PCI/PCI-X fields after reset */
3451	if (sp->device_type == XFRAME_II_DEVICE) {
3452		/* Clear "detected parity error" bit */
3453		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3454
3455		/* Clearing PCIX Ecc status register */
3456		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3457
3458		/* Clearing PCI_STATUS error reflected here */
3459		writeq(s2BIT(62), &bar0->txpic_int_reg);
3460	}
3461
3462	/* Reset device statistics maintained by OS */
3463	memset(&sp->stats, 0, sizeof(struct net_device_stats));
3464
3465	stats = sp->mac_control.stats_info;
3466	swstats = &stats->sw_stat;
3467
3468	/* save link up/down time/cnt, reset/memory/watchdog cnt */
3469	up_cnt = swstats->link_up_cnt;
3470	down_cnt = swstats->link_down_cnt;
3471	up_time = swstats->link_up_time;
3472	down_time = swstats->link_down_time;
3473	reset_cnt = swstats->soft_reset_cnt;
3474	mem_alloc_cnt = swstats->mem_allocated;
3475	mem_free_cnt = swstats->mem_freed;
3476	watchdog_cnt = swstats->watchdog_timer_cnt;
3477
3478	memset(stats, 0, sizeof(struct stat_block));
3479
3480	/* restore link up/down time/cnt, reset/memory/watchdog cnt */
3481	swstats->link_up_cnt = up_cnt;
3482	swstats->link_down_cnt = down_cnt;
3483	swstats->link_up_time = up_time;
3484	swstats->link_down_time = down_time;
3485	swstats->soft_reset_cnt = reset_cnt;
3486	swstats->mem_allocated = mem_alloc_cnt;
3487	swstats->mem_freed = mem_free_cnt;
3488	swstats->watchdog_timer_cnt = watchdog_cnt;
3489
3490	/* SXE-002: Configure link and activity LED to turn it off */
3491	subid = sp->pdev->subsystem_device;
3492	if (((subid & 0xFF) >= 0x07) &&
3493	    (sp->device_type == XFRAME_I_DEVICE)) {
3494		val64 = readq(&bar0->gpio_control);
3495		val64 |= 0x0000800000000000ULL;
3496		writeq(val64, &bar0->gpio_control);
3497		val64 = 0x0411040400000000ULL;
3498		writeq(val64, (void __iomem *)bar0 + 0x2700);
3499	}
3500
3501	/*
3502	 * Clear spurious ECC interrupts that would have occurred on
3503	 * XFRAME II cards after reset.
3504	 */
3505	if (sp->device_type == XFRAME_II_DEVICE) {
3506		val64 = readq(&bar0->pcc_err_reg);
3507		writeq(val64, &bar0->pcc_err_reg);
3508	}
3509
3510	sp->device_enabled_once = false;
3511}
3512
3513/**
3514 *  s2io_set_swapper - to set the swapper controle on the card
3515 *  @sp : private member of the device structure,
3516 *  pointer to the s2io_nic structure.
3517 *  Description: Function to set the swapper control on the card
3518 *  correctly depending on the 'endianness' of the system.
3519 *  Return value:
3520 *  SUCCESS on success and FAILURE on failure.
3521 */
3522
3523static int s2io_set_swapper(struct s2io_nic *sp)
3524{
3525	struct net_device *dev = sp->dev;
3526	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3527	u64 val64, valt, valr;
3528
3529	/*
3530	 * Set proper endian settings and verify the same by reading
3531	 * the PIF Feed-back register.
3532	 */
3533
3534	val64 = readq(&bar0->pif_rd_swapper_fb);
3535	if (val64 != 0x0123456789ABCDEFULL) {
3536		int i = 0;
3537		static const u64 value[] = {
3538			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
3539			0x8100008181000081ULL,	/* FE=1, SE=0 */
3540			0x4200004242000042ULL,	/* FE=0, SE=1 */
3541			0			/* FE=0, SE=0 */
3542		};
3543
3544		while (i < 4) {
3545			writeq(value[i], &bar0->swapper_ctrl);
3546			val64 = readq(&bar0->pif_rd_swapper_fb);
3547			if (val64 == 0x0123456789ABCDEFULL)
3548				break;
3549			i++;
3550		}
3551		if (i == 4) {
3552			DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3553				  "feedback read %llx\n",
3554				  dev->name, (unsigned long long)val64);
3555			return FAILURE;
3556		}
3557		valr = value[i];
3558	} else {
3559		valr = readq(&bar0->swapper_ctrl);
3560	}
3561
3562	valt = 0x0123456789ABCDEFULL;
3563	writeq(valt, &bar0->xmsi_address);
3564	val64 = readq(&bar0->xmsi_address);
3565
3566	if (val64 != valt) {
3567		int i = 0;
3568		static const u64 value[] = {
3569			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
3570			0x0081810000818100ULL,	/* FE=1, SE=0 */
3571			0x0042420000424200ULL,	/* FE=0, SE=1 */
3572			0			/* FE=0, SE=0 */
3573		};
3574
3575		while (i < 4) {
3576			writeq((value[i] | valr), &bar0->swapper_ctrl);
3577			writeq(valt, &bar0->xmsi_address);
3578			val64 = readq(&bar0->xmsi_address);
3579			if (val64 == valt)
3580				break;
3581			i++;
3582		}
3583		if (i == 4) {
3584			unsigned long long x = val64;
3585			DBG_PRINT(ERR_DBG,
3586				  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3587			return FAILURE;
3588		}
3589	}
3590	val64 = readq(&bar0->swapper_ctrl);
3591	val64 &= 0xFFFF000000000000ULL;
3592
3593#ifdef __BIG_ENDIAN
3594	/*
3595	 * The device by default set to a big endian format, so a
3596	 * big endian driver need not set anything.
3597	 */
3598	val64 |= (SWAPPER_CTRL_TXP_FE |
3599		  SWAPPER_CTRL_TXP_SE |
3600		  SWAPPER_CTRL_TXD_R_FE |
3601		  SWAPPER_CTRL_TXD_W_FE |
3602		  SWAPPER_CTRL_TXF_R_FE |
3603		  SWAPPER_CTRL_RXD_R_FE |
3604		  SWAPPER_CTRL_RXD_W_FE |
3605		  SWAPPER_CTRL_RXF_W_FE |
3606		  SWAPPER_CTRL_XMSI_FE |
3607		  SWAPPER_CTRL_STATS_FE |
3608		  SWAPPER_CTRL_STATS_SE);
3609	if (sp->config.intr_type == INTA)
3610		val64 |= SWAPPER_CTRL_XMSI_SE;
3611	writeq(val64, &bar0->swapper_ctrl);
3612#else
3613	/*
3614	 * Initially we enable all bits to make it accessible by the
3615	 * driver, then we selectively enable only those bits that
3616	 * we want to set.
3617	 */
3618	val64 |= (SWAPPER_CTRL_TXP_FE |
3619		  SWAPPER_CTRL_TXP_SE |
3620		  SWAPPER_CTRL_TXD_R_FE |
3621		  SWAPPER_CTRL_TXD_R_SE |
3622		  SWAPPER_CTRL_TXD_W_FE |
3623		  SWAPPER_CTRL_TXD_W_SE |
3624		  SWAPPER_CTRL_TXF_R_FE |
3625		  SWAPPER_CTRL_RXD_R_FE |
3626		  SWAPPER_CTRL_RXD_R_SE |
3627		  SWAPPER_CTRL_RXD_W_FE |
3628		  SWAPPER_CTRL_RXD_W_SE |
3629		  SWAPPER_CTRL_RXF_W_FE |
3630		  SWAPPER_CTRL_XMSI_FE |
3631		  SWAPPER_CTRL_STATS_FE |
3632		  SWAPPER_CTRL_STATS_SE);
3633	if (sp->config.intr_type == INTA)
3634		val64 |= SWAPPER_CTRL_XMSI_SE;
3635	writeq(val64, &bar0->swapper_ctrl);
3636#endif
3637	val64 = readq(&bar0->swapper_ctrl);
3638
3639	/*
3640	 * Verifying if endian settings are accurate by reading a
3641	 * feedback register.
3642	 */
3643	val64 = readq(&bar0->pif_rd_swapper_fb);
3644	if (val64 != 0x0123456789ABCDEFULL) {
3645		/* Endian settings are incorrect, calls for another dekko. */
3646		DBG_PRINT(ERR_DBG,
3647			  "%s: Endian settings are wrong, feedback read %llx\n",
3648			  dev->name, (unsigned long long)val64);
3649		return FAILURE;
3650	}
3651
3652	return SUCCESS;
3653}
3654
3655static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3656{
3657	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3658	u64 val64;
3659	int ret = 0, cnt = 0;
3660
3661	do {
3662		val64 = readq(&bar0->xmsi_access);
3663		if (!(val64 & s2BIT(15)))
3664			break;
3665		mdelay(1);
3666		cnt++;
3667	} while (cnt < 5);
3668	if (cnt == 5) {
3669		DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3670		ret = 1;
3671	}
3672
3673	return ret;
3674}
3675
3676static void restore_xmsi_data(struct s2io_nic *nic)
3677{
3678	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3679	u64 val64;
3680	int i, msix_index;
3681
3682	if (nic->device_type == XFRAME_I_DEVICE)
3683		return;
3684
3685	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3686		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3687		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3688		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3689		val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3690		writeq(val64, &bar0->xmsi_access);
3691		if (wait_for_msix_trans(nic, msix_index))
3692			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3693				  __func__, msix_index);
3694	}
3695}
3696
3697static void store_xmsi_data(struct s2io_nic *nic)
3698{
3699	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3700	u64 val64, addr, data;
3701	int i, msix_index;
3702
3703	if (nic->device_type == XFRAME_I_DEVICE)
3704		return;
3705
3706	/* Store and display */
3707	for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3708		msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3709		val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3710		writeq(val64, &bar0->xmsi_access);
3711		if (wait_for_msix_trans(nic, msix_index)) {
3712			DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3713				  __func__, msix_index);
3714			continue;
3715		}
3716		addr = readq(&bar0->xmsi_address);
3717		data = readq(&bar0->xmsi_data);
3718		if (addr && data) {
3719			nic->msix_info[i].addr = addr;
3720			nic->msix_info[i].data = data;
3721		}
3722	}
3723}
3724
3725static int s2io_enable_msi_x(struct s2io_nic *nic)
3726{
3727	struct XENA_dev_config __iomem *bar0 = nic->bar0;
3728	u64 rx_mat;
3729	u16 msi_control; /* Temp variable */
3730	int ret, i, j, msix_indx = 1;
3731	int size;
3732	struct stat_block *stats = nic->mac_control.stats_info;
3733	struct swStat *swstats = &stats->sw_stat;
3734
3735	size = nic->num_entries * sizeof(struct msix_entry);
3736	nic->entries = kzalloc(size, GFP_KERNEL);
3737	if (!nic->entries) {
3738		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3739			  __func__);
3740		swstats->mem_alloc_fail_cnt++;
3741		return -ENOMEM;
3742	}
3743	swstats->mem_allocated += size;
3744
3745	size = nic->num_entries * sizeof(struct s2io_msix_entry);
3746	nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3747	if (!nic->s2io_entries) {
3748		DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3749			  __func__);
3750		swstats->mem_alloc_fail_cnt++;
3751		kfree(nic->entries);
3752		swstats->mem_freed
3753			+= (nic->num_entries * sizeof(struct msix_entry));
3754		return -ENOMEM;
3755	}
3756	swstats->mem_allocated += size;
3757
3758	nic->entries[0].entry = 0;
3759	nic->s2io_entries[0].entry = 0;
3760	nic->s2io_entries[0].in_use = MSIX_FLG;
3761	nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3762	nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3763
3764	for (i = 1; i < nic->num_entries; i++) {
3765		nic->entries[i].entry = ((i - 1) * 8) + 1;
3766		nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3767		nic->s2io_entries[i].arg = NULL;
3768		nic->s2io_entries[i].in_use = 0;
3769	}
3770
3771	rx_mat = readq(&bar0->rx_mat);
3772	for (j = 0; j < nic->config.rx_ring_num; j++) {
3773		rx_mat |= RX_MAT_SET(j, msix_indx);
3774		nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3775		nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3776		nic->s2io_entries[j+1].in_use = MSIX_FLG;
3777		msix_indx += 8;
3778	}
3779	writeq(rx_mat, &bar0->rx_mat);
3780	readq(&bar0->rx_mat);
3781
3782	ret = pci_enable_msix_range(nic->pdev, nic->entries,
3783				    nic->num_entries, nic->num_entries);
3784	/* We fail init if error or we get less vectors than min required */
3785	if (ret < 0) {
3786		DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3787		kfree(nic->entries);
3788		swstats->mem_freed += nic->num_entries *
3789			sizeof(struct msix_entry);
3790		kfree(nic->s2io_entries);
3791		swstats->mem_freed += nic->num_entries *
3792			sizeof(struct s2io_msix_entry);
3793		nic->entries = NULL;
3794		nic->s2io_entries = NULL;
3795		return -ENOMEM;
3796	}
3797
3798	/*
3799	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3800	 * in the herc NIC. (Temp change, needs to be removed later)
3801	 */
3802	pci_read_config_word(nic->pdev, 0x42, &msi_control);
3803	msi_control |= 0x1; /* Enable MSI */
3804	pci_write_config_word(nic->pdev, 0x42, msi_control);
3805
3806	return 0;
3807}
3808
3809/* Handle software interrupt used during MSI(X) test */
3810static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3811{
3812	struct s2io_nic *sp = dev_id;
3813
3814	sp->msi_detected = 1;
3815	wake_up(&sp->msi_wait);
3816
3817	return IRQ_HANDLED;
3818}
3819
3820/* Test interrupt path by forcing a software IRQ */
3821static int s2io_test_msi(struct s2io_nic *sp)
3822{
3823	struct pci_dev *pdev = sp->pdev;
3824	struct XENA_dev_config __iomem *bar0 = sp->bar0;
3825	int err;
3826	u64 val64, saved64;
3827
3828	err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3829			  sp->name, sp);
3830	if (err) {
3831		DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3832			  sp->dev->name, pci_name(pdev), pdev->irq);
3833		return err;
3834	}
3835
3836	init_waitqueue_head(&sp->msi_wait);
3837	sp->msi_detected = 0;
3838
3839	saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3840	val64 |= SCHED_INT_CTRL_ONE_SHOT;
3841	val64 |= SCHED_INT_CTRL_TIMER_EN;
3842	val64 |= SCHED_INT_CTRL_INT2MSI(1);
3843	writeq(val64, &bar0->scheduled_int_ctrl);
3844
3845	wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3846
3847	if (!sp->msi_detected) {
3848		/* MSI(X) test failed, go back to INTx mode */
3849		DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3850			  "using MSI(X) during test\n",
3851			  sp->dev->name, pci_name(pdev));
3852
3853		err = -EOPNOTSUPP;
3854	}
3855
3856	free_irq(sp->entries[1].vector, sp);
3857
3858	writeq(saved64, &bar0->scheduled_int_ctrl);
3859
3860	return err;
3861}
3862
3863static void remove_msix_isr(struct s2io_nic *sp)
3864{
3865	int i;
3866	u16 msi_control;
3867
3868	for (i = 0; i < sp->num_entries; i++) {
3869		if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3870			int vector = sp->entries[i].vector;
3871			void *arg = sp->s2io_entries[i].arg;
3872			free_irq(vector, arg);
3873		}
3874	}
3875
3876	kfree(sp->entries);
3877	kfree(sp->s2io_entries);
3878	sp->entries = NULL;
3879	sp->s2io_entries = NULL;
3880
3881	pci_read_config_word(sp->pdev, 0x42, &msi_control);
3882	msi_control &= 0xFFFE; /* Disable MSI */
3883	pci_write_config_word(sp->pdev, 0x42, msi_control);
3884
3885	pci_disable_msix(sp->pdev);
3886}
3887
3888static void remove_inta_isr(struct s2io_nic *sp)
3889{
3890	free_irq(sp->pdev->irq, sp->dev);
3891}
3892
3893/* ********************************************************* *
3894 * Functions defined below concern the OS part of the driver *
3895 * ********************************************************* */
3896
3897/**
3898 *  s2io_open - open entry point of the driver
3899 *  @dev : pointer to the device structure.
3900 *  Description:
3901 *  This function is the open entry point of the driver. It mainly calls a
3902 *  function to allocate Rx buffers and inserts them into the buffer
3903 *  descriptors and then enables the Rx part of the NIC.
3904 *  Return value:
3905 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3906 *   file on failure.
3907 */
3908
3909static int s2io_open(struct net_device *dev)
3910{
3911	struct s2io_nic *sp = netdev_priv(dev);
3912	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3913	int err = 0;
3914
3915	/*
3916	 * Make sure you have link off by default every time
3917	 * Nic is initialized
3918	 */
3919	netif_carrier_off(dev);
3920	sp->last_link_state = 0;
3921
3922	/* Initialize H/W and enable interrupts */
3923	err = s2io_card_up(sp);
3924	if (err) {
3925		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3926			  dev->name);
3927		goto hw_init_failed;
3928	}
3929
3930	if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3931		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3932		s2io_card_down(sp);
3933		err = -ENODEV;
3934		goto hw_init_failed;
3935	}
3936	s2io_start_all_tx_queue(sp);
3937	return 0;
3938
3939hw_init_failed:
3940	if (sp->config.intr_type == MSI_X) {
3941		if (sp->entries) {
3942			kfree(sp->entries);
3943			swstats->mem_freed += sp->num_entries *
3944				sizeof(struct msix_entry);
3945		}
3946		if (sp->s2io_entries) {
3947			kfree(sp->s2io_entries);
3948			swstats->mem_freed += sp->num_entries *
3949				sizeof(struct s2io_msix_entry);
3950		}
3951	}
3952	return err;
3953}
3954
3955/**
3956 *  s2io_close -close entry point of the driver
3957 *  @dev : device pointer.
3958 *  Description:
3959 *  This is the stop entry point of the driver. It needs to undo exactly
3960 *  whatever was done by the open entry point,thus it's usually referred to
3961 *  as the close function.Among other things this function mainly stops the
3962 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3963 *  Return value:
3964 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3965 *  file on failure.
3966 */
3967
3968static int s2io_close(struct net_device *dev)
3969{
3970	struct s2io_nic *sp = netdev_priv(dev);
3971	struct config_param *config = &sp->config;
3972	u64 tmp64;
3973	int offset;
3974
3975	/* Return if the device is already closed               *
3976	 *  Can happen when s2io_card_up failed in change_mtu    *
3977	 */
3978	if (!is_s2io_card_up(sp))
3979		return 0;
3980
3981	s2io_stop_all_tx_queue(sp);
3982	/* delete all populated mac entries */
3983	for (offset = 1; offset < config->max_mc_addr; offset++) {
3984		tmp64 = do_s2io_read_unicast_mc(sp, offset);
3985		if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3986			do_s2io_delete_unicast_mc(sp, tmp64);
3987	}
3988
3989	s2io_card_down(sp);
3990
3991	return 0;
3992}
3993
3994/**
3995 *  s2io_xmit - Tx entry point of te driver
3996 *  @skb : the socket buffer containing the Tx data.
3997 *  @dev : device pointer.
3998 *  Description :
3999 *  This function is the Tx entry point of the driver. S2IO NIC supports
4000 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4001 *  NOTE: when device can't queue the pkt,just the trans_start variable will
4002 *  not be upadted.
4003 *  Return value:
4004 *  0 on success & 1 on failure.
4005 */
4006
4007static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4008{
4009	struct s2io_nic *sp = netdev_priv(dev);
4010	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4011	register u64 val64;
4012	struct TxD *txdp;
4013	struct TxFIFO_element __iomem *tx_fifo;
4014	unsigned long flags = 0;
4015	u16 vlan_tag = 0;
4016	struct fifo_info *fifo = NULL;
4017	int offload_type;
4018	int enable_per_list_interrupt = 0;
4019	struct config_param *config = &sp->config;
4020	struct mac_info *mac_control = &sp->mac_control;
4021	struct stat_block *stats = mac_control->stats_info;
4022	struct swStat *swstats = &stats->sw_stat;
4023
4024	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4025
4026	if (unlikely(skb->len <= 0)) {
4027		DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4028		dev_kfree_skb_any(skb);
4029		return NETDEV_TX_OK;
4030	}
4031
4032	if (!is_s2io_card_up(sp)) {
4033		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4034			  dev->name);
4035		dev_kfree_skb_any(skb);
4036		return NETDEV_TX_OK;
4037	}
4038
4039	queue = 0;
4040	if (skb_vlan_tag_present(skb))
4041		vlan_tag = skb_vlan_tag_get(skb);
4042	if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4043		if (skb->protocol == htons(ETH_P_IP)) {
4044			struct iphdr *ip;
4045			struct tcphdr *th;
4046			ip = ip_hdr(skb);
4047
4048			if (!ip_is_fragment(ip)) {
4049				th = (struct tcphdr *)(((unsigned char *)ip) +
4050						       ip->ihl*4);
4051
4052				if (ip->protocol == IPPROTO_TCP) {
4053					queue_len = sp->total_tcp_fifos;
4054					queue = (ntohs(th->source) +
4055						 ntohs(th->dest)) &
4056						sp->fifo_selector[queue_len - 1];
4057					if (queue >= queue_len)
4058						queue = queue_len - 1;
4059				} else if (ip->protocol == IPPROTO_UDP) {
4060					queue_len = sp->total_udp_fifos;
4061					queue = (ntohs(th->source) +
4062						 ntohs(th->dest)) &
4063						sp->fifo_selector[queue_len - 1];
4064					if (queue >= queue_len)
4065						queue = queue_len - 1;
4066					queue += sp->udp_fifo_idx;
4067					if (skb->len > 1024)
4068						enable_per_list_interrupt = 1;
4069				}
4070			}
4071		}
4072	} else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4073		/* get fifo number based on skb->priority value */
4074		queue = config->fifo_mapping
4075			[skb->priority & (MAX_TX_FIFOS - 1)];
4076	fifo = &mac_control->fifos[queue];
4077
4078	spin_lock_irqsave(&fifo->tx_lock, flags);
4079
4080	if (sp->config.multiq) {
4081		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4082			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4083			return NETDEV_TX_BUSY;
4084		}
4085	} else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4086		if (netif_queue_stopped(dev)) {
4087			spin_unlock_irqrestore(&fifo->tx_lock, flags);
4088			return NETDEV_TX_BUSY;
4089		}
4090	}
4091
4092	put_off = (u16)fifo->tx_curr_put_info.offset;
4093	get_off = (u16)fifo->tx_curr_get_info.offset;
4094	txdp = fifo->list_info[put_off].list_virt_addr;
4095
4096	queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4097	/* Avoid "put" pointer going beyond "get" pointer */
4098	if (txdp->Host_Control ||
4099	    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4100		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4101		s2io_stop_tx_queue(sp, fifo->fifo_no);
4102		dev_kfree_skb_any(skb);
4103		spin_unlock_irqrestore(&fifo->tx_lock, flags);
4104		return NETDEV_TX_OK;
4105	}
4106
4107	offload_type = s2io_offload_type(skb);
4108	if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4109		txdp->Control_1 |= TXD_TCP_LSO_EN;
4110		txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4111	}
4112	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4113		txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4114				    TXD_TX_CKO_TCP_EN |
4115				    TXD_TX_CKO_UDP_EN);
4116	}
4117	txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4118	txdp->Control_1 |= TXD_LIST_OWN_XENA;
4119	txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4120	if (enable_per_list_interrupt)
4121		if (put_off & (queue_len >> 5))
4122			txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4123	if (vlan_tag) {
4124		txdp->Control_2 |= TXD_VLAN_ENABLE;
4125		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4126	}
4127
4128	frg_len = skb_headlen(skb);
4129	txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
4130					      frg_len, DMA_TO_DEVICE);
4131	if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
4132		goto pci_map_failed;
4133
4134	txdp->Host_Control = (unsigned long)skb;
4135	txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4136
4137	frg_cnt = skb_shinfo(skb)->nr_frags;
4138	/* For fragmented SKB. */
4139	for (i = 0; i < frg_cnt; i++) {
4140		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4141		/* A '0' length fragment will be ignored */
4142		if (!skb_frag_size(frag))
4143			continue;
4144		txdp++;
4145		txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4146							     frag, 0,
4147							     skb_frag_size(frag),
4148							     DMA_TO_DEVICE);
4149		txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4150	}
4151	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4152
4153	tx_fifo = mac_control->tx_FIFO_start[queue];
4154	val64 = fifo->list_info[put_off].list_phy_addr;
4155	writeq(val64, &tx_fifo->TxDL_Pointer);
4156
4157	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4158		 TX_FIFO_LAST_LIST);
4159	if (offload_type)
4160		val64 |= TX_FIFO_SPECIAL_FUNC;
4161
4162	writeq(val64, &tx_fifo->List_Control);
4163
4164	put_off++;
4165	if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4166		put_off = 0;
4167	fifo->tx_curr_put_info.offset = put_off;
4168
4169	/* Avoid "put" pointer going beyond "get" pointer */
4170	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4171		swstats->fifo_full_cnt++;
4172		DBG_PRINT(TX_DBG,
4173			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4174			  put_off, get_off);
4175		s2io_stop_tx_queue(sp, fifo->fifo_no);
4176	}
4177	swstats->mem_allocated += skb->truesize;
4178	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4179
4180	if (sp->config.intr_type == MSI_X)
4181		tx_intr_handler(fifo);
4182
4183	return NETDEV_TX_OK;
4184
4185pci_map_failed:
4186	swstats->pci_map_fail_cnt++;
4187	s2io_stop_tx_queue(sp, fifo->fifo_no);
4188	swstats->mem_freed += skb->truesize;
4189	dev_kfree_skb_any(skb);
4190	spin_unlock_irqrestore(&fifo->tx_lock, flags);
4191	return NETDEV_TX_OK;
4192}
4193
4194static void
4195s2io_alarm_handle(struct timer_list *t)
4196{
4197	struct s2io_nic *sp = from_timer(sp, t, alarm_timer);
4198	struct net_device *dev = sp->dev;
4199
4200	s2io_handle_errors(dev);
4201	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4202}
4203
4204static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4205{
4206	struct ring_info *ring = (struct ring_info *)dev_id;
4207	struct s2io_nic *sp = ring->nic;
4208	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4209
4210	if (unlikely(!is_s2io_card_up(sp)))
4211		return IRQ_HANDLED;
4212
4213	if (sp->config.napi) {
4214		u8 __iomem *addr = NULL;
4215		u8 val8 = 0;
4216
4217		addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4218		addr += (7 - ring->ring_no);
4219		val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4220		writeb(val8, addr);
4221		val8 = readb(addr);
4222		napi_schedule(&ring->napi);
4223	} else {
4224		rx_intr_handler(ring, 0);
4225		s2io_chk_rx_buffers(sp, ring);
4226	}
4227
4228	return IRQ_HANDLED;
4229}
4230
4231static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4232{
4233	int i;
4234	struct fifo_info *fifos = (struct fifo_info *)dev_id;
4235	struct s2io_nic *sp = fifos->nic;
4236	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4237	struct config_param *config  = &sp->config;
4238	u64 reason;
4239
4240	if (unlikely(!is_s2io_card_up(sp)))
4241		return IRQ_NONE;
4242
4243	reason = readq(&bar0->general_int_status);
4244	if (unlikely(reason == S2IO_MINUS_ONE))
4245		/* Nothing much can be done. Get out */
4246		return IRQ_HANDLED;
4247
4248	if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4249		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4250
4251		if (reason & GEN_INTR_TXPIC)
4252			s2io_txpic_intr_handle(sp);
4253
4254		if (reason & GEN_INTR_TXTRAFFIC)
4255			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4256
4257		for (i = 0; i < config->tx_fifo_num; i++)
4258			tx_intr_handler(&fifos[i]);
4259
4260		writeq(sp->general_int_mask, &bar0->general_int_mask);
4261		readl(&bar0->general_int_status);
4262		return IRQ_HANDLED;
4263	}
4264	/* The interrupt was not raised by us */
4265	return IRQ_NONE;
4266}
4267
4268static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4269{
4270	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4271	u64 val64;
4272
4273	val64 = readq(&bar0->pic_int_status);
4274	if (val64 & PIC_INT_GPIO) {
4275		val64 = readq(&bar0->gpio_int_reg);
4276		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4277		    (val64 & GPIO_INT_REG_LINK_UP)) {
4278			/*
4279			 * This is unstable state so clear both up/down
4280			 * interrupt and adapter to re-evaluate the link state.
4281			 */
4282			val64 |= GPIO_INT_REG_LINK_DOWN;
4283			val64 |= GPIO_INT_REG_LINK_UP;
4284			writeq(val64, &bar0->gpio_int_reg);
4285			val64 = readq(&bar0->gpio_int_mask);
4286			val64 &= ~(GPIO_INT_MASK_LINK_UP |
4287				   GPIO_INT_MASK_LINK_DOWN);
4288			writeq(val64, &bar0->gpio_int_mask);
4289		} else if (val64 & GPIO_INT_REG_LINK_UP) {
4290			val64 = readq(&bar0->adapter_status);
4291			/* Enable Adapter */
4292			val64 = readq(&bar0->adapter_control);
4293			val64 |= ADAPTER_CNTL_EN;
4294			writeq(val64, &bar0->adapter_control);
4295			val64 |= ADAPTER_LED_ON;
4296			writeq(val64, &bar0->adapter_control);
4297			if (!sp->device_enabled_once)
4298				sp->device_enabled_once = 1;
4299
4300			s2io_link(sp, LINK_UP);
4301			/*
4302			 * unmask link down interrupt and mask link-up
4303			 * intr
4304			 */
4305			val64 = readq(&bar0->gpio_int_mask);
4306			val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4307			val64 |= GPIO_INT_MASK_LINK_UP;
4308			writeq(val64, &bar0->gpio_int_mask);
4309
4310		} else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4311			val64 = readq(&bar0->adapter_status);
4312			s2io_link(sp, LINK_DOWN);
4313			/* Link is down so unmaks link up interrupt */
4314			val64 = readq(&bar0->gpio_int_mask);
4315			val64 &= ~GPIO_INT_MASK_LINK_UP;
4316			val64 |= GPIO_INT_MASK_LINK_DOWN;
4317			writeq(val64, &bar0->gpio_int_mask);
4318
4319			/* turn off LED */
4320			val64 = readq(&bar0->adapter_control);
4321			val64 = val64 & (~ADAPTER_LED_ON);
4322			writeq(val64, &bar0->adapter_control);
4323		}
4324	}
4325	val64 = readq(&bar0->gpio_int_mask);
4326}
4327
4328/**
4329 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4330 *  @value: alarm bits
4331 *  @addr: address value
4332 *  @cnt: counter variable
4333 *  Description: Check for alarm and increment the counter
4334 *  Return Value:
4335 *  1 - if alarm bit set
4336 *  0 - if alarm bit is not set
4337 */
4338static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4339				 unsigned long long *cnt)
4340{
4341	u64 val64;
4342	val64 = readq(addr);
4343	if (val64 & value) {
4344		writeq(val64, addr);
4345		(*cnt)++;
4346		return 1;
4347	}
4348	return 0;
4349
4350}
4351
4352/**
4353 *  s2io_handle_errors - Xframe error indication handler
4354 *  @dev_id: opaque handle to dev
4355 *  Description: Handle alarms such as loss of link, single or
4356 *  double ECC errors, critical and serious errors.
4357 *  Return Value:
4358 *  NONE
4359 */
4360static void s2io_handle_errors(void *dev_id)
4361{
4362	struct net_device *dev = (struct net_device *)dev_id;
4363	struct s2io_nic *sp = netdev_priv(dev);
4364	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4365	u64 temp64 = 0, val64 = 0;
4366	int i = 0;
4367
4368	struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4369	struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4370
4371	if (!is_s2io_card_up(sp))
4372		return;
4373
4374	if (pci_channel_offline(sp->pdev))
4375		return;
4376
4377	memset(&sw_stat->ring_full_cnt, 0,
4378	       sizeof(sw_stat->ring_full_cnt));
4379
4380	/* Handling the XPAK counters update */
4381	if (stats->xpak_timer_count < 72000) {
4382		/* waiting for an hour */
4383		stats->xpak_timer_count++;
4384	} else {
4385		s2io_updt_xpak_counter(dev);
4386		/* reset the count to zero */
4387		stats->xpak_timer_count = 0;
4388	}
4389
4390	/* Handling link status change error Intr */
4391	if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4392		val64 = readq(&bar0->mac_rmac_err_reg);
4393		writeq(val64, &bar0->mac_rmac_err_reg);
4394		if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4395			schedule_work(&sp->set_link_task);
4396	}
4397
4398	/* In case of a serious error, the device will be Reset. */
4399	if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4400				  &sw_stat->serious_err_cnt))
4401		goto reset;
4402
4403	/* Check for data parity error */
4404	if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4405				  &sw_stat->parity_err_cnt))
4406		goto reset;
4407
4408	/* Check for ring full counter */
4409	if (sp->device_type == XFRAME_II_DEVICE) {
4410		val64 = readq(&bar0->ring_bump_counter1);
4411		for (i = 0; i < 4; i++) {
4412			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4413			temp64 >>= 64 - ((i+1)*16);
4414			sw_stat->ring_full_cnt[i] += temp64;
4415		}
4416
4417		val64 = readq(&bar0->ring_bump_counter2);
4418		for (i = 0; i < 4; i++) {
4419			temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4420			temp64 >>= 64 - ((i+1)*16);
4421			sw_stat->ring_full_cnt[i+4] += temp64;
4422		}
4423	}
4424
4425	val64 = readq(&bar0->txdma_int_status);
4426	/*check for pfc_err*/
4427	if (val64 & TXDMA_PFC_INT) {
4428		if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4429					  PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4430					  PFC_PCIX_ERR,
4431					  &bar0->pfc_err_reg,
4432					  &sw_stat->pfc_err_cnt))
4433			goto reset;
4434		do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4435				      &bar0->pfc_err_reg,
4436				      &sw_stat->pfc_err_cnt);
4437	}
4438
4439	/*check for tda_err*/
4440	if (val64 & TXDMA_TDA_INT) {
4441		if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4442					  TDA_SM0_ERR_ALARM |
4443					  TDA_SM1_ERR_ALARM,
4444					  &bar0->tda_err_reg,
4445					  &sw_stat->tda_err_cnt))
4446			goto reset;
4447		do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4448				      &bar0->tda_err_reg,
4449				      &sw_stat->tda_err_cnt);
4450	}
4451	/*check for pcc_err*/
4452	if (val64 & TXDMA_PCC_INT) {
4453		if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4454					  PCC_N_SERR | PCC_6_COF_OV_ERR |
4455					  PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4456					  PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4457					  PCC_TXB_ECC_DB_ERR,
4458					  &bar0->pcc_err_reg,
4459					  &sw_stat->pcc_err_cnt))
4460			goto reset;
4461		do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4462				      &bar0->pcc_err_reg,
4463				      &sw_stat->pcc_err_cnt);
4464	}
4465
4466	/*check for tti_err*/
4467	if (val64 & TXDMA_TTI_INT) {
4468		if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4469					  &bar0->tti_err_reg,
4470					  &sw_stat->tti_err_cnt))
4471			goto reset;
4472		do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4473				      &bar0->tti_err_reg,
4474				      &sw_stat->tti_err_cnt);
4475	}
4476
4477	/*check for lso_err*/
4478	if (val64 & TXDMA_LSO_INT) {
4479		if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4480					  LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4481					  &bar0->lso_err_reg,
4482					  &sw_stat->lso_err_cnt))
4483			goto reset;
4484		do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4485				      &bar0->lso_err_reg,
4486				      &sw_stat->lso_err_cnt);
4487	}
4488
4489	/*check for tpa_err*/
4490	if (val64 & TXDMA_TPA_INT) {
4491		if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4492					  &bar0->tpa_err_reg,
4493					  &sw_stat->tpa_err_cnt))
4494			goto reset;
4495		do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4496				      &bar0->tpa_err_reg,
4497				      &sw_stat->tpa_err_cnt);
4498	}
4499
4500	/*check for sm_err*/
4501	if (val64 & TXDMA_SM_INT) {
4502		if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4503					  &bar0->sm_err_reg,
4504					  &sw_stat->sm_err_cnt))
4505			goto reset;
4506	}
4507
4508	val64 = readq(&bar0->mac_int_status);
4509	if (val64 & MAC_INT_STATUS_TMAC_INT) {
4510		if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4511					  &bar0->mac_tmac_err_reg,
4512					  &sw_stat->mac_tmac_err_cnt))
4513			goto reset;
4514		do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4515				      TMAC_DESC_ECC_SG_ERR |
4516				      TMAC_DESC_ECC_DB_ERR,
4517				      &bar0->mac_tmac_err_reg,
4518				      &sw_stat->mac_tmac_err_cnt);
4519	}
4520
4521	val64 = readq(&bar0->xgxs_int_status);
4522	if (val64 & XGXS_INT_STATUS_TXGXS) {
4523		if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4524					  &bar0->xgxs_txgxs_err_reg,
4525					  &sw_stat->xgxs_txgxs_err_cnt))
4526			goto reset;
4527		do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4528				      &bar0->xgxs_txgxs_err_reg,
4529				      &sw_stat->xgxs_txgxs_err_cnt);
4530	}
4531
4532	val64 = readq(&bar0->rxdma_int_status);
4533	if (val64 & RXDMA_INT_RC_INT_M) {
4534		if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4535					  RC_FTC_ECC_DB_ERR |
4536					  RC_PRCn_SM_ERR_ALARM |
4537					  RC_FTC_SM_ERR_ALARM,
4538					  &bar0->rc_err_reg,
4539					  &sw_stat->rc_err_cnt))
4540			goto reset;
4541		do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4542				      RC_FTC_ECC_SG_ERR |
4543				      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4544				      &sw_stat->rc_err_cnt);
4545		if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4546					  PRC_PCI_AB_WR_Rn |
4547					  PRC_PCI_AB_F_WR_Rn,
4548					  &bar0->prc_pcix_err_reg,
4549					  &sw_stat->prc_pcix_err_cnt))
4550			goto reset;
4551		do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4552				      PRC_PCI_DP_WR_Rn |
4553				      PRC_PCI_DP_F_WR_Rn,
4554				      &bar0->prc_pcix_err_reg,
4555				      &sw_stat->prc_pcix_err_cnt);
4556	}
4557
4558	if (val64 & RXDMA_INT_RPA_INT_M) {
4559		if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4560					  &bar0->rpa_err_reg,
4561					  &sw_stat->rpa_err_cnt))
4562			goto reset;
4563		do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4564				      &bar0->rpa_err_reg,
4565				      &sw_stat->rpa_err_cnt);
4566	}
4567
4568	if (val64 & RXDMA_INT_RDA_INT_M) {
4569		if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4570					  RDA_FRM_ECC_DB_N_AERR |
4571					  RDA_SM1_ERR_ALARM |
4572					  RDA_SM0_ERR_ALARM |
4573					  RDA_RXD_ECC_DB_SERR,
4574					  &bar0->rda_err_reg,
4575					  &sw_stat->rda_err_cnt))
4576			goto reset;
4577		do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4578				      RDA_FRM_ECC_SG_ERR |
4579				      RDA_MISC_ERR |
4580				      RDA_PCIX_ERR,
4581				      &bar0->rda_err_reg,
4582				      &sw_stat->rda_err_cnt);
4583	}
4584
4585	if (val64 & RXDMA_INT_RTI_INT_M) {
4586		if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4587					  &bar0->rti_err_reg,
4588					  &sw_stat->rti_err_cnt))
4589			goto reset;
4590		do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4591				      &bar0->rti_err_reg,
4592				      &sw_stat->rti_err_cnt);
4593	}
4594
4595	val64 = readq(&bar0->mac_int_status);
4596	if (val64 & MAC_INT_STATUS_RMAC_INT) {
4597		if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4598					  &bar0->mac_rmac_err_reg,
4599					  &sw_stat->mac_rmac_err_cnt))
4600			goto reset;
4601		do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4602				      RMAC_SINGLE_ECC_ERR |
4603				      RMAC_DOUBLE_ECC_ERR,
4604				      &bar0->mac_rmac_err_reg,
4605				      &sw_stat->mac_rmac_err_cnt);
4606	}
4607
4608	val64 = readq(&bar0->xgxs_int_status);
4609	if (val64 & XGXS_INT_STATUS_RXGXS) {
4610		if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4611					  &bar0->xgxs_rxgxs_err_reg,
4612					  &sw_stat->xgxs_rxgxs_err_cnt))
4613			goto reset;
4614	}
4615
4616	val64 = readq(&bar0->mc_int_status);
4617	if (val64 & MC_INT_STATUS_MC_INT) {
4618		if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,
4619					  &bar0->mc_err_reg,
4620					  &sw_stat->mc_err_cnt))
4621			goto reset;
4622
4623		/* Handling Ecc errors */
4624		if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4625			writeq(val64, &bar0->mc_err_reg);
4626			if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4627				sw_stat->double_ecc_errs++;
4628				if (sp->device_type != XFRAME_II_DEVICE) {
4629					/*
4630					 * Reset XframeI only if critical error
4631					 */
4632					if (val64 &
4633					    (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4634					     MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4635						goto reset;
4636				}
4637			} else
4638				sw_stat->single_ecc_errs++;
4639		}
4640	}
4641	return;
4642
4643reset:
4644	s2io_stop_all_tx_queue(sp);
4645	schedule_work(&sp->rst_timer_task);
4646	sw_stat->soft_reset_cnt++;
4647}
4648
4649/**
4650 *  s2io_isr - ISR handler of the device .
4651 *  @irq: the irq of the device.
4652 *  @dev_id: a void pointer to the dev structure of the NIC.
4653 *  Description:  This function is the ISR handler of the device. It
4654 *  identifies the reason for the interrupt and calls the relevant
4655 *  service routines. As a contongency measure, this ISR allocates the
4656 *  recv buffers, if their numbers are below the panic value which is
4657 *  presently set to 25% of the original number of rcv buffers allocated.
4658 *  Return value:
4659 *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4660 *   IRQ_NONE: will be returned if interrupt is not from our device
4661 */
4662static irqreturn_t s2io_isr(int irq, void *dev_id)
4663{
4664	struct net_device *dev = (struct net_device *)dev_id;
4665	struct s2io_nic *sp = netdev_priv(dev);
4666	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4667	int i;
4668	u64 reason = 0;
4669	struct mac_info *mac_control;
4670	struct config_param *config;
4671
4672	/* Pretend we handled any irq's from a disconnected card */
4673	if (pci_channel_offline(sp->pdev))
4674		return IRQ_NONE;
4675
4676	if (!is_s2io_card_up(sp))
4677		return IRQ_NONE;
4678
4679	config = &sp->config;
4680	mac_control = &sp->mac_control;
4681
4682	/*
4683	 * Identify the cause for interrupt and call the appropriate
4684	 * interrupt handler. Causes for the interrupt could be;
4685	 * 1. Rx of packet.
4686	 * 2. Tx complete.
4687	 * 3. Link down.
4688	 */
4689	reason = readq(&bar0->general_int_status);
4690
4691	if (unlikely(reason == S2IO_MINUS_ONE))
4692		return IRQ_HANDLED;	/* Nothing much can be done. Get out */
4693
4694	if (reason &
4695	    (GEN_INTR_RXTRAFFIC | GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC)) {
4696		writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4697
4698		if (config->napi) {
4699			if (reason & GEN_INTR_RXTRAFFIC) {
4700				napi_schedule(&sp->napi);
4701				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4702				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4703				readl(&bar0->rx_traffic_int);
4704			}
4705		} else {
4706			/*
4707			 * rx_traffic_int reg is an R1 register, writing all 1's
4708			 * will ensure that the actual interrupt causing bit
4709			 * get's cleared and hence a read can be avoided.
4710			 */
4711			if (reason & GEN_INTR_RXTRAFFIC)
4712				writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4713
4714			for (i = 0; i < config->rx_ring_num; i++) {
4715				struct ring_info *ring = &mac_control->rings[i];
4716
4717				rx_intr_handler(ring, 0);
4718			}
4719		}
4720
4721		/*
4722		 * tx_traffic_int reg is an R1 register, writing all 1's
4723		 * will ensure that the actual interrupt causing bit get's
4724		 * cleared and hence a read can be avoided.
4725		 */
4726		if (reason & GEN_INTR_TXTRAFFIC)
4727			writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4728
4729		for (i = 0; i < config->tx_fifo_num; i++)
4730			tx_intr_handler(&mac_control->fifos[i]);
4731
4732		if (reason & GEN_INTR_TXPIC)
4733			s2io_txpic_intr_handle(sp);
4734
4735		/*
4736		 * Reallocate the buffers from the interrupt handler itself.
4737		 */
4738		if (!config->napi) {
4739			for (i = 0; i < config->rx_ring_num; i++) {
4740				struct ring_info *ring = &mac_control->rings[i];
4741
4742				s2io_chk_rx_buffers(sp, ring);
4743			}
4744		}
4745		writeq(sp->general_int_mask, &bar0->general_int_mask);
4746		readl(&bar0->general_int_status);
4747
4748		return IRQ_HANDLED;
4749
4750	} else if (!reason) {
4751		/* The interrupt was not raised by us */
4752		return IRQ_NONE;
4753	}
4754
4755	return IRQ_HANDLED;
4756}
4757
4758/*
4759 * s2io_updt_stats -
4760 */
4761static void s2io_updt_stats(struct s2io_nic *sp)
4762{
4763	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4764	u64 val64;
4765	int cnt = 0;
4766
4767	if (is_s2io_card_up(sp)) {
4768		/* Apprx 30us on a 133 MHz bus */
4769		val64 = SET_UPDT_CLICKS(10) |
4770			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4771		writeq(val64, &bar0->stat_cfg);
4772		do {
4773			udelay(100);
4774			val64 = readq(&bar0->stat_cfg);
4775			if (!(val64 & s2BIT(0)))
4776				break;
4777			cnt++;
4778			if (cnt == 5)
4779				break; /* Updt failed */
4780		} while (1);
4781	}
4782}
4783
4784/**
4785 *  s2io_get_stats - Updates the device statistics structure.
4786 *  @dev : pointer to the device structure.
4787 *  Description:
4788 *  This function updates the device statistics structure in the s2io_nic
4789 *  structure and returns a pointer to the same.
4790 *  Return value:
4791 *  pointer to the updated net_device_stats structure.
4792 */
4793static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4794{
4795	struct s2io_nic *sp = netdev_priv(dev);
4796	struct mac_info *mac_control = &sp->mac_control;
4797	struct stat_block *stats = mac_control->stats_info;
4798	u64 delta;
4799
4800	/* Configure Stats for immediate updt */
4801	s2io_updt_stats(sp);
4802
4803	/* A device reset will cause the on-adapter statistics to be zero'ed.
4804	 * This can be done while running by changing the MTU.  To prevent the
4805	 * system from having the stats zero'ed, the driver keeps a copy of the
4806	 * last update to the system (which is also zero'ed on reset).  This
4807	 * enables the driver to accurately know the delta between the last
4808	 * update and the current update.
4809	 */
4810	delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4811		le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4812	sp->stats.rx_packets += delta;
4813	dev->stats.rx_packets += delta;
4814
4815	delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4816		le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4817	sp->stats.tx_packets += delta;
4818	dev->stats.tx_packets += delta;
4819
4820	delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4821		le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4822	sp->stats.rx_bytes += delta;
4823	dev->stats.rx_bytes += delta;
4824
4825	delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4826		le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4827	sp->stats.tx_bytes += delta;
4828	dev->stats.tx_bytes += delta;
4829
4830	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4831	sp->stats.rx_errors += delta;
4832	dev->stats.rx_errors += delta;
4833
4834	delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4835		le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4836	sp->stats.tx_errors += delta;
4837	dev->stats.tx_errors += delta;
4838
4839	delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4840	sp->stats.rx_dropped += delta;
4841	dev->stats.rx_dropped += delta;
4842
4843	delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4844	sp->stats.tx_dropped += delta;
4845	dev->stats.tx_dropped += delta;
4846
4847	/* The adapter MAC interprets pause frames as multicast packets, but
4848	 * does not pass them up.  This erroneously increases the multicast
4849	 * packet count and needs to be deducted when the multicast frame count
4850	 * is queried.
4851	 */
4852	delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4853		le32_to_cpu(stats->rmac_vld_mcst_frms);
4854	delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4855	delta -= sp->stats.multicast;
4856	sp->stats.multicast += delta;
4857	dev->stats.multicast += delta;
4858
4859	delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4860		le32_to_cpu(stats->rmac_usized_frms)) +
4861		le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4862	sp->stats.rx_length_errors += delta;
4863	dev->stats.rx_length_errors += delta;
4864
4865	delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4866	sp->stats.rx_crc_errors += delta;
4867	dev->stats.rx_crc_errors += delta;
4868
4869	return &dev->stats;
4870}
4871
4872/**
4873 *  s2io_set_multicast - entry point for multicast address enable/disable.
4874 *  @dev : pointer to the device structure
4875 *  @may_sleep: parameter indicates if sleeping when waiting for command
4876 *  complete
4877 *  Description:
4878 *  This function is a driver entry point which gets called by the kernel
4879 *  whenever multicast addresses must be enabled/disabled. This also gets
4880 *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4881 *  determine, if multicast address must be enabled or if promiscuous mode
4882 *  is to be disabled etc.
4883 *  Return value:
4884 *  void.
4885 */
4886static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
4887{
4888	int i, j, prev_cnt;
4889	struct netdev_hw_addr *ha;
4890	struct s2io_nic *sp = netdev_priv(dev);
4891	struct XENA_dev_config __iomem *bar0 = sp->bar0;
4892	u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4893		0xfeffffffffffULL;
4894	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4895	void __iomem *add;
4896	struct config_param *config = &sp->config;
4897
4898	if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4899		/*  Enable all Multicast addresses */
4900		writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4901		       &bar0->rmac_addr_data0_mem);
4902		writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4903		       &bar0->rmac_addr_data1_mem);
4904		val64 = RMAC_ADDR_CMD_MEM_WE |
4905			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4906			RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4907		writeq(val64, &bar0->rmac_addr_cmd_mem);
4908		/* Wait till command completes */
4909		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4910				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4911				      S2IO_BIT_RESET, may_sleep);
4912
4913		sp->m_cast_flg = 1;
4914		sp->all_multi_pos = config->max_mc_addr - 1;
4915	} else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4916		/*  Disable all Multicast addresses */
4917		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4918		       &bar0->rmac_addr_data0_mem);
4919		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4920		       &bar0->rmac_addr_data1_mem);
4921		val64 = RMAC_ADDR_CMD_MEM_WE |
4922			RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4923			RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4924		writeq(val64, &bar0->rmac_addr_cmd_mem);
4925		/* Wait till command completes */
4926		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4927				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4928				      S2IO_BIT_RESET, may_sleep);
4929
4930		sp->m_cast_flg = 0;
4931		sp->all_multi_pos = 0;
4932	}
4933
4934	if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4935		/*  Put the NIC into promiscuous mode */
4936		add = &bar0->mac_cfg;
4937		val64 = readq(&bar0->mac_cfg);
4938		val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4939
4940		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4941		writel((u32)val64, add);
4942		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4943		writel((u32) (val64 >> 32), (add + 4));
4944
4945		if (vlan_tag_strip != 1) {
4946			val64 = readq(&bar0->rx_pa_cfg);
4947			val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4948			writeq(val64, &bar0->rx_pa_cfg);
4949			sp->vlan_strip_flag = 0;
4950		}
4951
4952		val64 = readq(&bar0->mac_cfg);
4953		sp->promisc_flg = 1;
4954		DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4955			  dev->name);
4956	} else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4957		/*  Remove the NIC from promiscuous mode */
4958		add = &bar0->mac_cfg;
4959		val64 = readq(&bar0->mac_cfg);
4960		val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4961
4962		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4963		writel((u32)val64, add);
4964		writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4965		writel((u32) (val64 >> 32), (add + 4));
4966
4967		if (vlan_tag_strip != 0) {
4968			val64 = readq(&bar0->rx_pa_cfg);
4969			val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4970			writeq(val64, &bar0->rx_pa_cfg);
4971			sp->vlan_strip_flag = 1;
4972		}
4973
4974		val64 = readq(&bar0->mac_cfg);
4975		sp->promisc_flg = 0;
4976		DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n", dev->name);
4977	}
4978
4979	/*  Update individual M_CAST address list */
4980	if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
4981		if (netdev_mc_count(dev) >
4982		    (config->max_mc_addr - config->max_mac_addr)) {
4983			DBG_PRINT(ERR_DBG,
4984				  "%s: No more Rx filters can be added - "
4985				  "please enable ALL_MULTI instead\n",
4986				  dev->name);
4987			return;
4988		}
4989
4990		prev_cnt = sp->mc_addr_count;
4991		sp->mc_addr_count = netdev_mc_count(dev);
4992
4993		/* Clear out the previous list of Mc in the H/W. */
4994		for (i = 0; i < prev_cnt; i++) {
4995			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4996			       &bar0->rmac_addr_data0_mem);
4997			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4998			       &bar0->rmac_addr_data1_mem);
4999			val64 = RMAC_ADDR_CMD_MEM_WE |
5000				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5001				RMAC_ADDR_CMD_MEM_OFFSET
5002				(config->mc_start_offset + i);
5003			writeq(val64, &bar0->rmac_addr_cmd_mem);
5004
5005			/* Wait for command completes */
5006			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5007						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5008						  S2IO_BIT_RESET, may_sleep)) {
5009				DBG_PRINT(ERR_DBG,
5010					  "%s: Adding Multicasts failed\n",
5011					  dev->name);
5012				return;
5013			}
5014		}
5015
5016		/* Create the new Rx filter list and update the same in H/W. */
5017		i = 0;
5018		netdev_for_each_mc_addr(ha, dev) {
5019			mac_addr = 0;
5020			for (j = 0; j < ETH_ALEN; j++) {
5021				mac_addr |= ha->addr[j];
5022				mac_addr <<= 8;
5023			}
5024			mac_addr >>= 8;
5025			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5026			       &bar0->rmac_addr_data0_mem);
5027			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5028			       &bar0->rmac_addr_data1_mem);
5029			val64 = RMAC_ADDR_CMD_MEM_WE |
5030				RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5031				RMAC_ADDR_CMD_MEM_OFFSET
5032				(i + config->mc_start_offset);
5033			writeq(val64, &bar0->rmac_addr_cmd_mem);
5034
5035			/* Wait for command completes */
5036			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5037						  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5038						  S2IO_BIT_RESET, may_sleep)) {
5039				DBG_PRINT(ERR_DBG,
5040					  "%s: Adding Multicasts failed\n",
5041					  dev->name);
5042				return;
5043			}
5044			i++;
5045		}
5046	}
5047}
5048
5049/* NDO wrapper for s2io_set_multicast */
5050static void s2io_ndo_set_multicast(struct net_device *dev)
5051{
5052	s2io_set_multicast(dev, false);
5053}
5054
5055/* read from CAM unicast & multicast addresses and store it in
5056 * def_mac_addr structure
5057 */
5058static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5059{
5060	int offset;
5061	u64 mac_addr = 0x0;
5062	struct config_param *config = &sp->config;
5063
5064	/* store unicast & multicast mac addresses */
5065	for (offset = 0; offset < config->max_mc_addr; offset++) {
5066		mac_addr = do_s2io_read_unicast_mc(sp, offset);
5067		/* if read fails disable the entry */
5068		if (mac_addr == FAILURE)
5069			mac_addr = S2IO_DISABLE_MAC_ENTRY;
5070		do_s2io_copy_mac_addr(sp, offset, mac_addr);
5071	}
5072}
5073
5074/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5075static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5076{
5077	int offset;
5078	struct config_param *config = &sp->config;
5079	/* restore unicast mac address */
5080	for (offset = 0; offset < config->max_mac_addr; offset++)
5081		do_s2io_prog_unicast(sp->dev,
5082				     sp->def_mac_addr[offset].mac_addr);
5083
5084	/* restore multicast mac address */
5085	for (offset = config->mc_start_offset;
5086	     offset < config->max_mc_addr; offset++)
5087		do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5088}
5089
5090/* add a multicast MAC address to CAM */
5091static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5092{
5093	int i;
5094	u64 mac_addr = 0;
5095	struct config_param *config = &sp->config;
5096
5097	for (i = 0; i < ETH_ALEN; i++) {
5098		mac_addr <<= 8;
5099		mac_addr |= addr[i];
5100	}
5101	if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5102		return SUCCESS;
5103
5104	/* check if the multicast mac already preset in CAM */
5105	for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5106		u64 tmp64;
5107		tmp64 = do_s2io_read_unicast_mc(sp, i);
5108		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5109			break;
5110
5111		if (tmp64 == mac_addr)
5112			return SUCCESS;
5113	}
5114	if (i == config->max_mc_addr) {
5115		DBG_PRINT(ERR_DBG,
5116			  "CAM full no space left for multicast MAC\n");
5117		return FAILURE;
5118	}
5119	/* Update the internal structure with this new mac address */
5120	do_s2io_copy_mac_addr(sp, i, mac_addr);
5121
5122	return do_s2io_add_mac(sp, mac_addr, i);
5123}
5124
5125/* add MAC address to CAM */
5126static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5127{
5128	u64 val64;
5129	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5130
5131	writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5132	       &bar0->rmac_addr_data0_mem);
5133
5134	val64 =	RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5135		RMAC_ADDR_CMD_MEM_OFFSET(off);
5136	writeq(val64, &bar0->rmac_addr_cmd_mem);
5137
5138	/* Wait till command completes */
5139	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5140				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5141				  S2IO_BIT_RESET, true)) {
5142		DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5143		return FAILURE;
5144	}
5145	return SUCCESS;
5146}
5147/* deletes a specified unicast/multicast mac entry from CAM */
5148static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5149{
5150	int offset;
5151	u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5152	struct config_param *config = &sp->config;
5153
5154	for (offset = 1;
5155	     offset < config->max_mc_addr; offset++) {
5156		tmp64 = do_s2io_read_unicast_mc(sp, offset);
5157		if (tmp64 == addr) {
5158			/* disable the entry by writing  0xffffffffffffULL */
5159			if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5160				return FAILURE;
5161			/* store the new mac list from CAM */
5162			do_s2io_store_unicast_mc(sp);
5163			return SUCCESS;
5164		}
5165	}
5166	DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5167		  (unsigned long long)addr);
5168	return FAILURE;
5169}
5170
5171/* read mac entries from CAM */
5172static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5173{
5174	u64 tmp64, val64;
5175	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5176
5177	/* read mac addr */
5178	val64 =	RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5179		RMAC_ADDR_CMD_MEM_OFFSET(offset);
5180	writeq(val64, &bar0->rmac_addr_cmd_mem);
5181
5182	/* Wait till command completes */
5183	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5184				  RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5185				  S2IO_BIT_RESET, true)) {
5186		DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5187		return FAILURE;
5188	}
5189	tmp64 = readq(&bar0->rmac_addr_data0_mem);
5190
5191	return tmp64 >> 16;
5192}
5193
5194/*
5195 * s2io_set_mac_addr - driver entry point
5196 */
5197
5198static int s2io_set_mac_addr(struct net_device *dev, void *p)
5199{
5200	struct sockaddr *addr = p;
5201
5202	if (!is_valid_ether_addr(addr->sa_data))
5203		return -EADDRNOTAVAIL;
5204
5205	eth_hw_addr_set(dev, addr->sa_data);
5206
5207	/* store the MAC address in CAM */
5208	return do_s2io_prog_unicast(dev, dev->dev_addr);
5209}
5210/**
5211 *  do_s2io_prog_unicast - Programs the Xframe mac address
5212 *  @dev : pointer to the device structure.
5213 *  @addr: a uchar pointer to the new mac address which is to be set.
5214 *  Description : This procedure will program the Xframe to receive
5215 *  frames with new Mac Address
5216 *  Return value: SUCCESS on success and an appropriate (-)ve integer
5217 *  as defined in errno.h file on failure.
5218 */
5219
5220static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
5221{
5222	struct s2io_nic *sp = netdev_priv(dev);
5223	register u64 mac_addr = 0, perm_addr = 0;
5224	int i;
5225	u64 tmp64;
5226	struct config_param *config = &sp->config;
5227
5228	/*
5229	 * Set the new MAC address as the new unicast filter and reflect this
5230	 * change on the device address registered with the OS. It will be
5231	 * at offset 0.
5232	 */
5233	for (i = 0; i < ETH_ALEN; i++) {
5234		mac_addr <<= 8;
5235		mac_addr |= addr[i];
5236		perm_addr <<= 8;
5237		perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5238	}
5239
5240	/* check if the dev_addr is different than perm_addr */
5241	if (mac_addr == perm_addr)
5242		return SUCCESS;
5243
5244	/* check if the mac already preset in CAM */
5245	for (i = 1; i < config->max_mac_addr; i++) {
5246		tmp64 = do_s2io_read_unicast_mc(sp, i);
5247		if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5248			break;
5249
5250		if (tmp64 == mac_addr) {
5251			DBG_PRINT(INFO_DBG,
5252				  "MAC addr:0x%llx already present in CAM\n",
5253				  (unsigned long long)mac_addr);
5254			return SUCCESS;
5255		}
5256	}
5257	if (i == config->max_mac_addr) {
5258		DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5259		return FAILURE;
5260	}
5261	/* Update the internal structure with this new mac address */
5262	do_s2io_copy_mac_addr(sp, i, mac_addr);
5263
5264	return do_s2io_add_mac(sp, mac_addr, i);
5265}
5266
5267/**
5268 * s2io_ethtool_set_link_ksettings - Sets different link parameters.
5269 * @dev : pointer to netdev
5270 * @cmd: pointer to the structure with parameters given by ethtool to set
5271 * link information.
5272 * Description:
5273 * The function sets different link parameters provided by the user onto
5274 * the NIC.
5275 * Return value:
5276 * 0 on success.
5277 */
5278
5279static int
5280s2io_ethtool_set_link_ksettings(struct net_device *dev,
5281				const struct ethtool_link_ksettings *cmd)
5282{
5283	struct s2io_nic *sp = netdev_priv(dev);
5284	if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
5285	    (cmd->base.speed != SPEED_10000) ||
5286	    (cmd->base.duplex != DUPLEX_FULL))
5287		return -EINVAL;
5288	else {
5289		s2io_close(sp->dev);
5290		s2io_open(sp->dev);
5291	}
5292
5293	return 0;
5294}
5295
5296/**
5297 * s2io_ethtool_get_link_ksettings - Return link specific information.
5298 * @dev: pointer to netdev
5299 * @cmd : pointer to the structure with parameters given by ethtool
5300 * to return link information.
5301 * Description:
5302 * Returns link specific information like speed, duplex etc.. to ethtool.
5303 * Return value :
5304 * return 0 on success.
5305 */
5306
5307static int
5308s2io_ethtool_get_link_ksettings(struct net_device *dev,
5309				struct ethtool_link_ksettings *cmd)
5310{
5311	struct s2io_nic *sp = netdev_priv(dev);
5312
5313	ethtool_link_ksettings_zero_link_mode(cmd, supported);
5314	ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
5315	ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
5316
5317	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
5318	ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
5319	ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
5320
5321	cmd->base.port = PORT_FIBRE;
5322
5323	if (netif_carrier_ok(sp->dev)) {
5324		cmd->base.speed = SPEED_10000;
5325		cmd->base.duplex = DUPLEX_FULL;
5326	} else {
5327		cmd->base.speed = SPEED_UNKNOWN;
5328		cmd->base.duplex = DUPLEX_UNKNOWN;
5329	}
5330
5331	cmd->base.autoneg = AUTONEG_DISABLE;
5332	return 0;
5333}
5334
5335/**
5336 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5337 * @dev: pointer to netdev
5338 * @info : pointer to the structure with parameters given by ethtool to
5339 * return driver information.
5340 * Description:
5341 * Returns driver specefic information like name, version etc.. to ethtool.
5342 * Return value:
5343 *  void
5344 */
5345
5346static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5347				  struct ethtool_drvinfo *info)
5348{
5349	struct s2io_nic *sp = netdev_priv(dev);
5350
5351	strscpy(info->driver, s2io_driver_name, sizeof(info->driver));
5352	strscpy(info->version, s2io_driver_version, sizeof(info->version));
5353	strscpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5354}
5355
5356/**
5357 *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5358 *  @dev: pointer to netdev
5359 *  @regs : pointer to the structure with parameters given by ethtool for
5360 *          dumping the registers.
5361 *  @space: The input argument into which all the registers are dumped.
5362 *  Description:
5363 *  Dumps the entire register space of xFrame NIC into the user given
5364 *  buffer area.
5365 * Return value :
5366 * void .
5367 */
5368
5369static void s2io_ethtool_gregs(struct net_device *dev,
5370			       struct ethtool_regs *regs, void *space)
5371{
5372	int i;
5373	u64 reg;
5374	u8 *reg_space = (u8 *)space;
5375	struct s2io_nic *sp = netdev_priv(dev);
5376
5377	regs->len = XENA_REG_SPACE;
5378	regs->version = sp->pdev->subsystem_device;
5379
5380	for (i = 0; i < regs->len; i += 8) {
5381		reg = readq(sp->bar0 + i);
5382		memcpy((reg_space + i), &reg, 8);
5383	}
5384}
5385
5386/*
5387 *  s2io_set_led - control NIC led
5388 */
5389static void s2io_set_led(struct s2io_nic *sp, bool on)
5390{
5391	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5392	u16 subid = sp->pdev->subsystem_device;
5393	u64 val64;
5394
5395	if ((sp->device_type == XFRAME_II_DEVICE) ||
5396	    ((subid & 0xFF) >= 0x07)) {
5397		val64 = readq(&bar0->gpio_control);
5398		if (on)
5399			val64 |= GPIO_CTRL_GPIO_0;
5400		else
5401			val64 &= ~GPIO_CTRL_GPIO_0;
5402
5403		writeq(val64, &bar0->gpio_control);
5404	} else {
5405		val64 = readq(&bar0->adapter_control);
5406		if (on)
5407			val64 |= ADAPTER_LED_ON;
5408		else
5409			val64 &= ~ADAPTER_LED_ON;
5410
5411		writeq(val64, &bar0->adapter_control);
5412	}
5413
5414}
5415
5416/**
5417 * s2io_ethtool_set_led - To physically identify the nic on the system.
5418 * @dev : network device
5419 * @state: led setting
5420 *
5421 * Description: Used to physically identify the NIC on the system.
5422 * The Link LED will blink for a time specified by the user for
5423 * identification.
5424 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5425 * identification is possible only if it's link is up.
5426 */
5427
5428static int s2io_ethtool_set_led(struct net_device *dev,
5429				enum ethtool_phys_id_state state)
5430{
5431	struct s2io_nic *sp = netdev_priv(dev);
5432	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5433	u16 subid = sp->pdev->subsystem_device;
5434
5435	if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
5436		u64 val64 = readq(&bar0->adapter_control);
5437		if (!(val64 & ADAPTER_CNTL_EN)) {
5438			pr_err("Adapter Link down, cannot blink LED\n");
5439			return -EAGAIN;
5440		}
5441	}
5442
5443	switch (state) {
5444	case ETHTOOL_ID_ACTIVE:
5445		sp->adapt_ctrl_org = readq(&bar0->gpio_control);
5446		return 1;	/* cycle on/off once per second */
5447
5448	case ETHTOOL_ID_ON:
5449		s2io_set_led(sp, true);
5450		break;
5451
5452	case ETHTOOL_ID_OFF:
5453		s2io_set_led(sp, false);
5454		break;
5455
5456	case ETHTOOL_ID_INACTIVE:
5457		if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
5458			writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
5459	}
5460
5461	return 0;
5462}
5463
5464static void
5465s2io_ethtool_gringparam(struct net_device *dev,
5466			struct ethtool_ringparam *ering,
5467			struct kernel_ethtool_ringparam *kernel_ering,
5468			struct netlink_ext_ack *extack)
5469{
5470	struct s2io_nic *sp = netdev_priv(dev);
5471	int i, tx_desc_count = 0, rx_desc_count = 0;
5472
5473	if (sp->rxd_mode == RXD_MODE_1) {
5474		ering->rx_max_pending = MAX_RX_DESC_1;
5475		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5476	} else {
5477		ering->rx_max_pending = MAX_RX_DESC_2;
5478		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5479	}
5480
5481	ering->tx_max_pending = MAX_TX_DESC;
5482
5483	for (i = 0; i < sp->config.rx_ring_num; i++)
5484		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5485	ering->rx_pending = rx_desc_count;
5486	ering->rx_jumbo_pending = rx_desc_count;
5487
5488	for (i = 0; i < sp->config.tx_fifo_num; i++)
5489		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5490	ering->tx_pending = tx_desc_count;
5491	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5492}
5493
5494/**
5495 * s2io_ethtool_getpause_data -Pause frame generation and reception.
5496 * @dev: pointer to netdev
5497 * @ep : pointer to the structure with pause parameters given by ethtool.
5498 * Description:
5499 * Returns the Pause frame generation and reception capability of the NIC.
5500 * Return value:
5501 *  void
5502 */
5503static void s2io_ethtool_getpause_data(struct net_device *dev,
5504				       struct ethtool_pauseparam *ep)
5505{
5506	u64 val64;
5507	struct s2io_nic *sp = netdev_priv(dev);
5508	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5509
5510	val64 = readq(&bar0->rmac_pause_cfg);
5511	if (val64 & RMAC_PAUSE_GEN_ENABLE)
5512		ep->tx_pause = true;
5513	if (val64 & RMAC_PAUSE_RX_ENABLE)
5514		ep->rx_pause = true;
5515	ep->autoneg = false;
5516}
5517
5518/**
5519 * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5520 * @dev: pointer to netdev
5521 * @ep : pointer to the structure with pause parameters given by ethtool.
5522 * Description:
5523 * It can be used to set or reset Pause frame generation or reception
5524 * support of the NIC.
5525 * Return value:
5526 * int, returns 0 on Success
5527 */
5528
5529static int s2io_ethtool_setpause_data(struct net_device *dev,
5530				      struct ethtool_pauseparam *ep)
5531{
5532	u64 val64;
5533	struct s2io_nic *sp = netdev_priv(dev);
5534	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5535
5536	val64 = readq(&bar0->rmac_pause_cfg);
5537	if (ep->tx_pause)
5538		val64 |= RMAC_PAUSE_GEN_ENABLE;
5539	else
5540		val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5541	if (ep->rx_pause)
5542		val64 |= RMAC_PAUSE_RX_ENABLE;
5543	else
5544		val64 &= ~RMAC_PAUSE_RX_ENABLE;
5545	writeq(val64, &bar0->rmac_pause_cfg);
5546	return 0;
5547}
5548
5549#define S2IO_DEV_ID		5
5550/**
5551 * read_eeprom - reads 4 bytes of data from user given offset.
5552 * @sp : private member of the device structure, which is a pointer to the
5553 *      s2io_nic structure.
5554 * @off : offset at which the data must be written
5555 * @data : Its an output parameter where the data read at the given
5556 *	offset is stored.
5557 * Description:
5558 * Will read 4 bytes of data from the user given offset and return the
5559 * read data.
5560 * NOTE: Will allow to read only part of the EEPROM visible through the
5561 *   I2C bus.
5562 * Return value:
5563 *  -1 on failure and 0 on success.
5564 */
5565static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
5566{
5567	int ret = -1;
5568	u32 exit_cnt = 0;
5569	u64 val64;
5570	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5571
5572	if (sp->device_type == XFRAME_I_DEVICE) {
5573		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5574			I2C_CONTROL_ADDR(off) |
5575			I2C_CONTROL_BYTE_CNT(0x3) |
5576			I2C_CONTROL_READ |
5577			I2C_CONTROL_CNTL_START;
5578		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5579
5580		while (exit_cnt < 5) {
5581			val64 = readq(&bar0->i2c_control);
5582			if (I2C_CONTROL_CNTL_END(val64)) {
5583				*data = I2C_CONTROL_GET_DATA(val64);
5584				ret = 0;
5585				break;
5586			}
5587			msleep(50);
5588			exit_cnt++;
5589		}
5590	}
5591
5592	if (sp->device_type == XFRAME_II_DEVICE) {
5593		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5594			SPI_CONTROL_BYTECNT(0x3) |
5595			SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5596		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5597		val64 |= SPI_CONTROL_REQ;
5598		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5599		while (exit_cnt < 5) {
5600			val64 = readq(&bar0->spi_control);
5601			if (val64 & SPI_CONTROL_NACK) {
5602				ret = 1;
5603				break;
5604			} else if (val64 & SPI_CONTROL_DONE) {
5605				*data = readq(&bar0->spi_data);
5606				*data &= 0xffffff;
5607				ret = 0;
5608				break;
5609			}
5610			msleep(50);
5611			exit_cnt++;
5612		}
5613	}
5614	return ret;
5615}
5616
5617/**
5618 *  write_eeprom - actually writes the relevant part of the data value.
5619 *  @sp : private member of the device structure, which is a pointer to the
5620 *       s2io_nic structure.
5621 *  @off : offset at which the data must be written
5622 *  @data : The data that is to be written
5623 *  @cnt : Number of bytes of the data that are actually to be written into
5624 *  the Eeprom. (max of 3)
5625 * Description:
5626 *  Actually writes the relevant part of the data value into the Eeprom
5627 *  through the I2C bus.
5628 * Return value:
5629 *  0 on success, -1 on failure.
5630 */
5631
5632static int write_eeprom(struct s2io_nic *sp, int off, u64 data, int cnt)
5633{
5634	int exit_cnt = 0, ret = -1;
5635	u64 val64;
5636	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5637
5638	if (sp->device_type == XFRAME_I_DEVICE) {
5639		val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) |
5640			I2C_CONTROL_ADDR(off) |
5641			I2C_CONTROL_BYTE_CNT(cnt) |
5642			I2C_CONTROL_SET_DATA((u32)data) |
5643			I2C_CONTROL_CNTL_START;
5644		SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5645
5646		while (exit_cnt < 5) {
5647			val64 = readq(&bar0->i2c_control);
5648			if (I2C_CONTROL_CNTL_END(val64)) {
5649				if (!(val64 & I2C_CONTROL_NACK))
5650					ret = 0;
5651				break;
5652			}
5653			msleep(50);
5654			exit_cnt++;
5655		}
5656	}
5657
5658	if (sp->device_type == XFRAME_II_DEVICE) {
5659		int write_cnt = (cnt == 8) ? 0 : cnt;
5660		writeq(SPI_DATA_WRITE(data, (cnt << 3)), &bar0->spi_data);
5661
5662		val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5663			SPI_CONTROL_BYTECNT(write_cnt) |
5664			SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5665		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5666		val64 |= SPI_CONTROL_REQ;
5667		SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5668		while (exit_cnt < 5) {
5669			val64 = readq(&bar0->spi_control);
5670			if (val64 & SPI_CONTROL_NACK) {
5671				ret = 1;
5672				break;
5673			} else if (val64 & SPI_CONTROL_DONE) {
5674				ret = 0;
5675				break;
5676			}
5677			msleep(50);
5678			exit_cnt++;
5679		}
5680	}
5681	return ret;
5682}
5683static void s2io_vpd_read(struct s2io_nic *nic)
5684{
5685	u8 *vpd_data;
5686	u8 data;
5687	int i = 0, cnt, len, fail = 0;
5688	int vpd_addr = 0x80;
5689	struct swStat *swstats = &nic->mac_control.stats_info->sw_stat;
5690
5691	if (nic->device_type == XFRAME_II_DEVICE) {
5692		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5693		vpd_addr = 0x80;
5694	} else {
5695		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5696		vpd_addr = 0x50;
5697	}
5698	strcpy(nic->serial_num, "NOT AVAILABLE");
5699
5700	vpd_data = kmalloc(256, GFP_KERNEL);
5701	if (!vpd_data) {
5702		swstats->mem_alloc_fail_cnt++;
5703		return;
5704	}
5705	swstats->mem_allocated += 256;
5706
5707	for (i = 0; i < 256; i += 4) {
5708		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5709		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5710		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5711		for (cnt = 0; cnt < 5; cnt++) {
5712			msleep(2);
5713			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5714			if (data == 0x80)
5715				break;
5716		}
5717		if (cnt >= 5) {
5718			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5719			fail = 1;
5720			break;
5721		}
5722		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5723				      (u32 *)&vpd_data[i]);
5724	}
5725
5726	if (!fail) {
5727		/* read serial number of adapter */
5728		for (cnt = 0; cnt < 252; cnt++) {
5729			if ((vpd_data[cnt] == 'S') &&
5730			    (vpd_data[cnt+1] == 'N')) {
5731				len = vpd_data[cnt+2];
5732				if (len < min(VPD_STRING_LEN, 256-cnt-2)) {
5733					memcpy(nic->serial_num,
5734					       &vpd_data[cnt + 3],
5735					       len);
5736					memset(nic->serial_num+len,
5737					       0,
5738					       VPD_STRING_LEN-len);
5739					break;
5740				}
5741			}
5742		}
5743	}
5744
5745	if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5746		len = vpd_data[1];
5747		memcpy(nic->product_name, &vpd_data[3], len);
5748		nic->product_name[len] = 0;
5749	}
5750	kfree(vpd_data);
5751	swstats->mem_freed += 256;
5752}
5753
5754/**
5755 *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5756 *  @dev: pointer to netdev
5757 *  @eeprom : pointer to the user level structure provided by ethtool,
5758 *  containing all relevant information.
5759 *  @data_buf : user defined value to be written into Eeprom.
5760 *  Description: Reads the values stored in the Eeprom at given offset
5761 *  for a given length. Stores these values int the input argument data
5762 *  buffer 'data_buf' and returns these to the caller (ethtool.)
5763 *  Return value:
5764 *  int  0 on success
5765 */
5766
5767static int s2io_ethtool_geeprom(struct net_device *dev,
5768				struct ethtool_eeprom *eeprom, u8 * data_buf)
5769{
5770	u32 i, valid;
5771	u64 data;
5772	struct s2io_nic *sp = netdev_priv(dev);
5773
5774	eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5775
5776	if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5777		eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5778
5779	for (i = 0; i < eeprom->len; i += 4) {
5780		if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5781			DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5782			return -EFAULT;
5783		}
5784		valid = INV(data);
5785		memcpy((data_buf + i), &valid, 4);
5786	}
5787	return 0;
5788}
5789
5790/**
5791 *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5792 *  @dev: pointer to netdev
5793 *  @eeprom : pointer to the user level structure provided by ethtool,
5794 *  containing all relevant information.
5795 *  @data_buf : user defined value to be written into Eeprom.
5796 *  Description:
5797 *  Tries to write the user provided value in the Eeprom, at the offset
5798 *  given by the user.
5799 *  Return value:
5800 *  0 on success, -EFAULT on failure.
5801 */
5802
5803static int s2io_ethtool_seeprom(struct net_device *dev,
5804				struct ethtool_eeprom *eeprom,
5805				u8 *data_buf)
5806{
5807	int len = eeprom->len, cnt = 0;
5808	u64 valid = 0, data;
5809	struct s2io_nic *sp = netdev_priv(dev);
5810
5811	if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5812		DBG_PRINT(ERR_DBG,
5813			  "ETHTOOL_WRITE_EEPROM Err: "
5814			  "Magic value is wrong, it is 0x%x should be 0x%x\n",
5815			  (sp->pdev->vendor | (sp->pdev->device << 16)),
5816			  eeprom->magic);
5817		return -EFAULT;
5818	}
5819
5820	while (len) {
5821		data = (u32)data_buf[cnt] & 0x000000FF;
5822		if (data)
5823			valid = (u32)(data << 24);
5824		else
5825			valid = data;
5826
5827		if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5828			DBG_PRINT(ERR_DBG,
5829				  "ETHTOOL_WRITE_EEPROM Err: "
5830				  "Cannot write into the specified offset\n");
5831			return -EFAULT;
5832		}
5833		cnt++;
5834		len--;
5835	}
5836
5837	return 0;
5838}
5839
5840/**
5841 * s2io_register_test - reads and writes into all clock domains.
5842 * @sp : private member of the device structure, which is a pointer to the
5843 * s2io_nic structure.
5844 * @data : variable that returns the result of each of the test conducted b
5845 * by the driver.
5846 * Description:
5847 * Read and write into all clock domains. The NIC has 3 clock domains,
5848 * see that registers in all the three regions are accessible.
5849 * Return value:
5850 * 0 on success.
5851 */
5852
5853static int s2io_register_test(struct s2io_nic *sp, uint64_t *data)
5854{
5855	struct XENA_dev_config __iomem *bar0 = sp->bar0;
5856	u64 val64 = 0, exp_val;
5857	int fail = 0;
5858
5859	val64 = readq(&bar0->pif_rd_swapper_fb);
5860	if (val64 != 0x123456789abcdefULL) {
5861		fail = 1;
5862		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 1);
5863	}
5864
5865	val64 = readq(&bar0->rmac_pause_cfg);
5866	if (val64 != 0xc000ffff00000000ULL) {
5867		fail = 1;
5868		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 2);
5869	}
5870
5871	val64 = readq(&bar0->rx_queue_cfg);
5872	if (sp->device_type == XFRAME_II_DEVICE)
5873		exp_val = 0x0404040404040404ULL;
5874	else
5875		exp_val = 0x0808080808080808ULL;
5876	if (val64 != exp_val) {
5877		fail = 1;
5878		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 3);
5879	}
5880
5881	val64 = readq(&bar0->xgxs_efifo_cfg);
5882	if (val64 != 0x000000001923141EULL) {
5883		fail = 1;
5884		DBG_PRINT(INFO_DBG, "Read Test level %d fails\n", 4);
5885	}
5886
5887	val64 = 0x5A5A5A5A5A5A5A5AULL;
5888	writeq(val64, &bar0->xmsi_data);
5889	val64 = readq(&bar0->xmsi_data);
5890	if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5891		fail = 1;
5892		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 1);
5893	}
5894
5895	val64 = 0xA5A5A5A5A5A5A5A5ULL;
5896	writeq(val64, &bar0->xmsi_data);
5897	val64 = readq(&bar0->xmsi_data);
5898	if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5899		fail = 1;
5900		DBG_PRINT(ERR_DBG, "Write Test level %d fails\n", 2);
5901	}
5902
5903	*data = fail;
5904	return fail;
5905}
5906
5907/**
5908 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5909 * @sp : private member of the device structure, which is a pointer to the
5910 * s2io_nic structure.
5911 * @data:variable that returns the result of each of the test conducted by
5912 * the driver.
5913 * Description:
5914 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5915 * register.
5916 * Return value:
5917 * 0 on success.
5918 */
5919
5920static int s2io_eeprom_test(struct s2io_nic *sp, uint64_t *data)
5921{
5922	int fail = 0;
5923	u64 ret_data, org_4F0, org_7F0;
5924	u8 saved_4F0 = 0, saved_7F0 = 0;
5925	struct net_device *dev = sp->dev;
5926
5927	/* Test Write Error at offset 0 */
5928	/* Note that SPI interface allows write access to all areas
5929	 * of EEPROM. Hence doing all negative testing only for Xframe I.
5930	 */
5931	if (sp->device_type == XFRAME_I_DEVICE)
5932		if (!write_eeprom(sp, 0, 0, 3))
5933			fail = 1;
5934
5935	/* Save current values at offsets 0x4F0 and 0x7F0 */
5936	if (!read_eeprom(sp, 0x4F0, &org_4F0))
5937		saved_4F0 = 1;
5938	if (!read_eeprom(sp, 0x7F0, &org_7F0))
5939		saved_7F0 = 1;
5940
5941	/* Test Write at offset 4f0 */
5942	if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5943		fail = 1;
5944	if (read_eeprom(sp, 0x4F0, &ret_data))
5945		fail = 1;
5946
5947	if (ret_data != 0x012345) {
5948		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5949			  "Data written %llx Data read %llx\n",
5950			  dev->name, (unsigned long long)0x12345,
5951			  (unsigned long long)ret_data);
5952		fail = 1;
5953	}
5954
5955	/* Reset the EEPROM data go FFFF */
5956	write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5957
5958	/* Test Write Request Error at offset 0x7c */
5959	if (sp->device_type == XFRAME_I_DEVICE)
5960		if (!write_eeprom(sp, 0x07C, 0, 3))
5961			fail = 1;
5962
5963	/* Test Write Request at offset 0x7f0 */
5964	if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5965		fail = 1;
5966	if (read_eeprom(sp, 0x7F0, &ret_data))
5967		fail = 1;
5968
5969	if (ret_data != 0x012345) {
5970		DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5971			  "Data written %llx Data read %llx\n",
5972			  dev->name, (unsigned long long)0x12345,
5973			  (unsigned long long)ret_data);
5974		fail = 1;
5975	}
5976
5977	/* Reset the EEPROM data go FFFF */
5978	write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5979
5980	if (sp->device_type == XFRAME_I_DEVICE) {
5981		/* Test Write Error at offset 0x80 */
5982		if (!write_eeprom(sp, 0x080, 0, 3))
5983			fail = 1;
5984
5985		/* Test Write Error at offset 0xfc */
5986		if (!write_eeprom(sp, 0x0FC, 0, 3))
5987			fail = 1;
5988
5989		/* Test Write Error at offset 0x100 */
5990		if (!write_eeprom(sp, 0x100, 0, 3))
5991			fail = 1;
5992
5993		/* Test Write Error at offset 4ec */
5994		if (!write_eeprom(sp, 0x4EC, 0, 3))
5995			fail = 1;
5996	}
5997
5998	/* Restore values at offsets 0x4F0 and 0x7F0 */
5999	if (saved_4F0)
6000		write_eeprom(sp, 0x4F0, org_4F0, 3);
6001	if (saved_7F0)
6002		write_eeprom(sp, 0x7F0, org_7F0, 3);
6003
6004	*data = fail;
6005	return fail;
6006}
6007
6008/**
6009 * s2io_bist_test - invokes the MemBist test of the card .
6010 * @sp : private member of the device structure, which is a pointer to the
6011 * s2io_nic structure.
6012 * @data:variable that returns the result of each of the test conducted by
6013 * the driver.
6014 * Description:
6015 * This invokes the MemBist test of the card. We give around
6016 * 2 secs time for the Test to complete. If it's still not complete
6017 * within this peiod, we consider that the test failed.
6018 * Return value:
6019 * 0 on success and -1 on failure.
6020 */
6021
6022static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
6023{
6024	u8 bist = 0;
6025	int cnt = 0, ret = -1;
6026
6027	pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6028	bist |= PCI_BIST_START;
6029	pci_write_config_word(sp->pdev, PCI_BIST, bist);
6030
6031	while (cnt < 20) {
6032		pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6033		if (!(bist & PCI_BIST_START)) {
6034			*data = (bist & PCI_BIST_CODE_MASK);
6035			ret = 0;
6036			break;
6037		}
6038		msleep(100);
6039		cnt++;
6040	}
6041
6042	return ret;
6043}
6044
6045/**
6046 * s2io_link_test - verifies the link state of the nic
6047 * @sp: private member of the device structure, which is a pointer to the
6048 * s2io_nic structure.
6049 * @data: variable that returns the result of each of the test conducted by
6050 * the driver.
6051 * Description:
6052 * The function verifies the link state of the NIC and updates the input
6053 * argument 'data' appropriately.
6054 * Return value:
6055 * 0 on success.
6056 */
6057
6058static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
6059{
6060	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6061	u64 val64;
6062
6063	val64 = readq(&bar0->adapter_status);
6064	if (!(LINK_IS_UP(val64)))
6065		*data = 1;
6066	else
6067		*data = 0;
6068
6069	return *data;
6070}
6071
6072/**
6073 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6074 * @sp: private member of the device structure, which is a pointer to the
6075 * s2io_nic structure.
6076 * @data: variable that returns the result of each of the test
6077 * conducted by the driver.
6078 * Description:
6079 *  This is one of the offline test that tests the read and write
6080 *  access to the RldRam chip on the NIC.
6081 * Return value:
6082 *  0 on success.
6083 */
6084
6085static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
6086{
6087	struct XENA_dev_config __iomem *bar0 = sp->bar0;
6088	u64 val64;
6089	int cnt, iteration = 0, test_fail = 0;
6090
6091	val64 = readq(&bar0->adapter_control);
6092	val64 &= ~ADAPTER_ECC_EN;
6093	writeq(val64, &bar0->adapter_control);
6094
6095	val64 = readq(&bar0->mc_rldram_test_ctrl);
6096	val64 |= MC_RLDRAM_TEST_MODE;
6097	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6098
6099	val64 = readq(&bar0->mc_rldram_mrs);
6100	val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6101	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6102
6103	val64 |= MC_RLDRAM_MRS_ENABLE;
6104	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6105
6106	while (iteration < 2) {
6107		val64 = 0x55555555aaaa0000ULL;
6108		if (iteration == 1)
6109			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6110		writeq(val64, &bar0->mc_rldram_test_d0);
6111
6112		val64 = 0xaaaa5a5555550000ULL;
6113		if (iteration == 1)
6114			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6115		writeq(val64, &bar0->mc_rldram_test_d1);
6116
6117		val64 = 0x55aaaaaaaa5a0000ULL;
6118		if (iteration == 1)
6119			val64 ^= 0xFFFFFFFFFFFF0000ULL;
6120		writeq(val64, &bar0->mc_rldram_test_d2);
6121
6122		val64 = (u64) (0x0000003ffffe0100ULL);
6123		writeq(val64, &bar0->mc_rldram_test_add);
6124
6125		val64 = MC_RLDRAM_TEST_MODE |
6126			MC_RLDRAM_TEST_WRITE |
6127			MC_RLDRAM_TEST_GO;
6128		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6129
6130		for (cnt = 0; cnt < 5; cnt++) {
6131			val64 = readq(&bar0->mc_rldram_test_ctrl);
6132			if (val64 & MC_RLDRAM_TEST_DONE)
6133				break;
6134			msleep(200);
6135		}
6136
6137		if (cnt == 5)
6138			break;
6139
6140		val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6141		SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6142
6143		for (cnt = 0; cnt < 5; cnt++) {
6144			val64 = readq(&bar0->mc_rldram_test_ctrl);
6145			if (val64 & MC_RLDRAM_TEST_DONE)
6146				break;
6147			msleep(500);
6148		}
6149
6150		if (cnt == 5)
6151			break;
6152
6153		val64 = readq(&bar0->mc_rldram_test_ctrl);
6154		if (!(val64 & MC_RLDRAM_TEST_PASS))
6155			test_fail = 1;
6156
6157		iteration++;
6158	}
6159
6160	*data = test_fail;
6161
6162	/* Bring the adapter out of test mode */
6163	SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6164
6165	return test_fail;
6166}
6167
6168/**
6169 *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6170 *  @dev: pointer to netdev
6171 *  @ethtest : pointer to a ethtool command specific structure that will be
6172 *  returned to the user.
6173 *  @data : variable that returns the result of each of the test
6174 * conducted by the driver.
6175 * Description:
6176 *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6177 *  the health of the card.
6178 * Return value:
6179 *  void
6180 */
6181
6182static void s2io_ethtool_test(struct net_device *dev,
6183			      struct ethtool_test *ethtest,
6184			      uint64_t *data)
6185{
6186	struct s2io_nic *sp = netdev_priv(dev);
6187	int orig_state = netif_running(sp->dev);
6188
6189	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6190		/* Offline Tests. */
6191		if (orig_state)
6192			s2io_close(sp->dev);
6193
6194		if (s2io_register_test(sp, &data[0]))
6195			ethtest->flags |= ETH_TEST_FL_FAILED;
6196
6197		s2io_reset(sp);
6198
6199		if (s2io_rldram_test(sp, &data[3]))
6200			ethtest->flags |= ETH_TEST_FL_FAILED;
6201
6202		s2io_reset(sp);
6203
6204		if (s2io_eeprom_test(sp, &data[1]))
6205			ethtest->flags |= ETH_TEST_FL_FAILED;
6206
6207		if (s2io_bist_test(sp, &data[4]))
6208			ethtest->flags |= ETH_TEST_FL_FAILED;
6209
6210		if (orig_state)
6211			s2io_open(sp->dev);
6212
6213		data[2] = 0;
6214	} else {
6215		/* Online Tests. */
6216		if (!orig_state) {
6217			DBG_PRINT(ERR_DBG, "%s: is not up, cannot run test\n",
6218				  dev->name);
6219			data[0] = -1;
6220			data[1] = -1;
6221			data[2] = -1;
6222			data[3] = -1;
6223			data[4] = -1;
6224		}
6225
6226		if (s2io_link_test(sp, &data[2]))
6227			ethtest->flags |= ETH_TEST_FL_FAILED;
6228
6229		data[0] = 0;
6230		data[1] = 0;
6231		data[3] = 0;
6232		data[4] = 0;
6233	}
6234}
6235
6236static void s2io_get_ethtool_stats(struct net_device *dev,
6237				   struct ethtool_stats *estats,
6238				   u64 *tmp_stats)
6239{
6240	int i = 0, k;
6241	struct s2io_nic *sp = netdev_priv(dev);
6242	struct stat_block *stats = sp->mac_control.stats_info;
6243	struct swStat *swstats = &stats->sw_stat;
6244	struct xpakStat *xstats = &stats->xpak_stat;
6245
6246	s2io_updt_stats(sp);
6247	tmp_stats[i++] =
6248		(u64)le32_to_cpu(stats->tmac_frms_oflow) << 32  |
6249		le32_to_cpu(stats->tmac_frms);
6250	tmp_stats[i++] =
6251		(u64)le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
6252		le32_to_cpu(stats->tmac_data_octets);
6253	tmp_stats[i++] = le64_to_cpu(stats->tmac_drop_frms);
6254	tmp_stats[i++] =
6255		(u64)le32_to_cpu(stats->tmac_mcst_frms_oflow) << 32 |
6256		le32_to_cpu(stats->tmac_mcst_frms);
6257	tmp_stats[i++] =
6258		(u64)le32_to_cpu(stats->tmac_bcst_frms_oflow) << 32 |
6259		le32_to_cpu(stats->tmac_bcst_frms);
6260	tmp_stats[i++] = le64_to_cpu(stats->tmac_pause_ctrl_frms);
6261	tmp_stats[i++] =
6262		(u64)le32_to_cpu(stats->tmac_ttl_octets_oflow) << 32 |
6263		le32_to_cpu(stats->tmac_ttl_octets);
6264	tmp_stats[i++] =
6265		(u64)le32_to_cpu(stats->tmac_ucst_frms_oflow) << 32 |
6266		le32_to_cpu(stats->tmac_ucst_frms);
6267	tmp_stats[i++] =
6268		(u64)le32_to_cpu(stats->tmac_nucst_frms_oflow) << 32 |
6269		le32_to_cpu(stats->tmac_nucst_frms);
6270	tmp_stats[i++] =
6271		(u64)le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
6272		le32_to_cpu(stats->tmac_any_err_frms);
6273	tmp_stats[i++] = le64_to_cpu(stats->tmac_ttl_less_fb_octets);
6274	tmp_stats[i++] = le64_to_cpu(stats->tmac_vld_ip_octets);
6275	tmp_stats[i++] =
6276		(u64)le32_to_cpu(stats->tmac_vld_ip_oflow) << 32 |
6277		le32_to_cpu(stats->tmac_vld_ip);
6278	tmp_stats[i++] =
6279		(u64)le32_to_cpu(stats->tmac_drop_ip_oflow) << 32 |
6280		le32_to_cpu(stats->tmac_drop_ip);
6281	tmp_stats[i++] =
6282		(u64)le32_to_cpu(stats->tmac_icmp_oflow) << 32 |
6283		le32_to_cpu(stats->tmac_icmp);
6284	tmp_stats[i++] =
6285		(u64)le32_to_cpu(stats->tmac_rst_tcp_oflow) << 32 |
6286		le32_to_cpu(stats->tmac_rst_tcp);
6287	tmp_stats[i++] = le64_to_cpu(stats->tmac_tcp);
6288	tmp_stats[i++] = (u64)le32_to_cpu(stats->tmac_udp_oflow) << 32 |
6289		le32_to_cpu(stats->tmac_udp);
6290	tmp_stats[i++] =
6291		(u64)le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
6292		le32_to_cpu(stats->rmac_vld_frms);
6293	tmp_stats[i++] =
6294		(u64)le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
6295		le32_to_cpu(stats->rmac_data_octets);
6296	tmp_stats[i++] = le64_to_cpu(stats->rmac_fcs_err_frms);
6297	tmp_stats[i++] = le64_to_cpu(stats->rmac_drop_frms);
6298	tmp_stats[i++] =
6299		(u64)le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
6300		le32_to_cpu(stats->rmac_vld_mcst_frms);
6301	tmp_stats[i++] =
6302		(u64)le32_to_cpu(stats->rmac_vld_bcst_frms_oflow) << 32 |
6303		le32_to_cpu(stats->rmac_vld_bcst_frms);
6304	tmp_stats[i++] = le32_to_cpu(stats->rmac_in_rng_len_err_frms);
6305	tmp_stats[i++] = le32_to_cpu(stats->rmac_out_rng_len_err_frms);
6306	tmp_stats[i++] = le64_to_cpu(stats->rmac_long_frms);
6307	tmp_stats[i++] = le64_to_cpu(stats->rmac_pause_ctrl_frms);
6308	tmp_stats[i++] = le64_to_cpu(stats->rmac_unsup_ctrl_frms);
6309	tmp_stats[i++] =
6310		(u64)le32_to_cpu(stats->rmac_ttl_octets_oflow) << 32 |
6311		le32_to_cpu(stats->rmac_ttl_octets);
6312	tmp_stats[i++] =
6313		(u64)le32_to_cpu(stats->rmac_accepted_ucst_frms_oflow) << 32
6314		| le32_to_cpu(stats->rmac_accepted_ucst_frms);
6315	tmp_stats[i++] =
6316		(u64)le32_to_cpu(stats->rmac_accepted_nucst_frms_oflow)
6317		<< 32 | le32_to_cpu(stats->rmac_accepted_nucst_frms);
6318	tmp_stats[i++] =
6319		(u64)le32_to_cpu(stats->rmac_discarded_frms_oflow) << 32 |
6320		le32_to_cpu(stats->rmac_discarded_frms);
6321	tmp_stats[i++] =
6322		(u64)le32_to_cpu(stats->rmac_drop_events_oflow)
6323		<< 32 | le32_to_cpu(stats->rmac_drop_events);
6324	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_less_fb_octets);
6325	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_frms);
6326	tmp_stats[i++] =
6327		(u64)le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
6328		le32_to_cpu(stats->rmac_usized_frms);
6329	tmp_stats[i++] =
6330		(u64)le32_to_cpu(stats->rmac_osized_frms_oflow) << 32 |
6331		le32_to_cpu(stats->rmac_osized_frms);
6332	tmp_stats[i++] =
6333		(u64)le32_to_cpu(stats->rmac_frag_frms_oflow) << 32 |
6334		le32_to_cpu(stats->rmac_frag_frms);
6335	tmp_stats[i++] =
6336		(u64)le32_to_cpu(stats->rmac_jabber_frms_oflow) << 32 |
6337		le32_to_cpu(stats->rmac_jabber_frms);
6338	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_64_frms);
6339	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_65_127_frms);
6340	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_128_255_frms);
6341	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_256_511_frms);
6342	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_512_1023_frms);
6343	tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_1024_1518_frms);
6344	tmp_stats[i++] =
6345		(u64)le32_to_cpu(stats->rmac_ip_oflow) << 32 |
6346		le32_to_cpu(stats->rmac_ip);
6347	tmp_stats[i++] = le64_to_cpu(stats->rmac_ip_octets);
6348	tmp_stats[i++] = le32_to_cpu(stats->rmac_hdr_err_ip);
6349	tmp_stats[i++] =
6350		(u64)le32_to_cpu(stats->rmac_drop_ip_oflow) << 32 |
6351		le32_to_cpu(stats->rmac_drop_ip);
6352	tmp_stats[i++] =
6353		(u64)le32_to_cpu(stats->rmac_icmp_oflow) << 32 |
6354		le32_to_cpu(stats->rmac_icmp);
6355	tmp_stats[i++] = le64_to_cpu(stats->rmac_tcp);
6356	tmp_stats[i++] =
6357		(u64)le32_to_cpu(stats->rmac_udp_oflow) << 32 |
6358		le32_to_cpu(stats->rmac_udp);
6359	tmp_stats[i++] =
6360		(u64)le32_to_cpu(stats->rmac_err_drp_udp_oflow) << 32 |
6361		le32_to_cpu(stats->rmac_err_drp_udp);
6362	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_err_sym);
6363	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q0);
6364	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q1);
6365	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q2);
6366	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q3);
6367	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q4);
6368	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q5);
6369	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q6);
6370	tmp_stats[i++] = le64_to_cpu(stats->rmac_frms_q7);
6371	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q0);
6372	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q1);
6373	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q2);
6374	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q3);
6375	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q4);
6376	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q5);
6377	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q6);
6378	tmp_stats[i++] = le16_to_cpu(stats->rmac_full_q7);
6379	tmp_stats[i++] =
6380		(u64)le32_to_cpu(stats->rmac_pause_cnt_oflow) << 32 |
6381		le32_to_cpu(stats->rmac_pause_cnt);
6382	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_data_err_cnt);
6383	tmp_stats[i++] = le64_to_cpu(stats->rmac_xgmii_ctrl_err_cnt);
6384	tmp_stats[i++] =
6385		(u64)le32_to_cpu(stats->rmac_accepted_ip_oflow) << 32 |
6386		le32_to_cpu(stats->rmac_accepted_ip);
6387	tmp_stats[i++] = le32_to_cpu(stats->rmac_err_tcp);
6388	tmp_stats[i++] = le32_to_cpu(stats->rd_req_cnt);
6389	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_cnt);
6390	tmp_stats[i++] = le32_to_cpu(stats->new_rd_req_rtry_cnt);
6391	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_cnt);
6392	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_rd_ack_cnt);
6393	tmp_stats[i++] = le32_to_cpu(stats->wr_req_cnt);
6394	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_cnt);
6395	tmp_stats[i++] = le32_to_cpu(stats->new_wr_req_rtry_cnt);
6396	tmp_stats[i++] = le32_to_cpu(stats->wr_rtry_cnt);
6397	tmp_stats[i++] = le32_to_cpu(stats->wr_disc_cnt);
6398	tmp_stats[i++] = le32_to_cpu(stats->rd_rtry_wr_ack_cnt);
6399	tmp_stats[i++] = le32_to_cpu(stats->txp_wr_cnt);
6400	tmp_stats[i++] = le32_to_cpu(stats->txd_rd_cnt);
6401	tmp_stats[i++] = le32_to_cpu(stats->txd_wr_cnt);
6402	tmp_stats[i++] = le32_to_cpu(stats->rxd_rd_cnt);
6403	tmp_stats[i++] = le32_to_cpu(stats->rxd_wr_cnt);
6404	tmp_stats[i++] = le32_to_cpu(stats->txf_rd_cnt);
6405	tmp_stats[i++] = le32_to_cpu(stats->rxf_wr_cnt);
6406
6407	/* Enhanced statistics exist only for Hercules */
6408	if (sp->device_type == XFRAME_II_DEVICE) {
6409		tmp_stats[i++] =
6410			le64_to_cpu(stats->rmac_ttl_1519_4095_frms);
6411		tmp_stats[i++] =
6412			le64_to_cpu(stats->rmac_ttl_4096_8191_frms);
6413		tmp_stats[i++] =
6414			le64_to_cpu(stats->rmac_ttl_8192_max_frms);
6415		tmp_stats[i++] = le64_to_cpu(stats->rmac_ttl_gt_max_frms);
6416		tmp_stats[i++] = le64_to_cpu(stats->rmac_osized_alt_frms);
6417		tmp_stats[i++] = le64_to_cpu(stats->rmac_jabber_alt_frms);
6418		tmp_stats[i++] = le64_to_cpu(stats->rmac_gt_max_alt_frms);
6419		tmp_stats[i++] = le64_to_cpu(stats->rmac_vlan_frms);
6420		tmp_stats[i++] = le32_to_cpu(stats->rmac_len_discard);
6421		tmp_stats[i++] = le32_to_cpu(stats->rmac_fcs_discard);
6422		tmp_stats[i++] = le32_to_cpu(stats->rmac_pf_discard);
6423		tmp_stats[i++] = le32_to_cpu(stats->rmac_da_discard);
6424		tmp_stats[i++] = le32_to_cpu(stats->rmac_red_discard);
6425		tmp_stats[i++] = le32_to_cpu(stats->rmac_rts_discard);
6426		tmp_stats[i++] = le32_to_cpu(stats->rmac_ingm_full_discard);
6427		tmp_stats[i++] = le32_to_cpu(stats->link_fault_cnt);
6428	}
6429
6430	tmp_stats[i++] = 0;
6431	tmp_stats[i++] = swstats->single_ecc_errs;
6432	tmp_stats[i++] = swstats->double_ecc_errs;
6433	tmp_stats[i++] = swstats->parity_err_cnt;
6434	tmp_stats[i++] = swstats->serious_err_cnt;
6435	tmp_stats[i++] = swstats->soft_reset_cnt;
6436	tmp_stats[i++] = swstats->fifo_full_cnt;
6437	for (k = 0; k < MAX_RX_RINGS; k++)
6438		tmp_stats[i++] = swstats->ring_full_cnt[k];
6439	tmp_stats[i++] = xstats->alarm_transceiver_temp_high;
6440	tmp_stats[i++] = xstats->alarm_transceiver_temp_low;
6441	tmp_stats[i++] = xstats->alarm_laser_bias_current_high;
6442	tmp_stats[i++] = xstats->alarm_laser_bias_current_low;
6443	tmp_stats[i++] = xstats->alarm_laser_output_power_high;
6444	tmp_stats[i++] = xstats->alarm_laser_output_power_low;
6445	tmp_stats[i++] = xstats->warn_transceiver_temp_high;
6446	tmp_stats[i++] = xstats->warn_transceiver_temp_low;
6447	tmp_stats[i++] = xstats->warn_laser_bias_current_high;
6448	tmp_stats[i++] = xstats->warn_laser_bias_current_low;
6449	tmp_stats[i++] = xstats->warn_laser_output_power_high;
6450	tmp_stats[i++] = xstats->warn_laser_output_power_low;
6451	tmp_stats[i++] = swstats->clubbed_frms_cnt;
6452	tmp_stats[i++] = swstats->sending_both;
6453	tmp_stats[i++] = swstats->outof_sequence_pkts;
6454	tmp_stats[i++] = swstats->flush_max_pkts;
6455	if (swstats->num_aggregations) {
6456		u64 tmp = swstats->sum_avg_pkts_aggregated;
6457		int count = 0;
6458		/*
6459		 * Since 64-bit divide does not work on all platforms,
6460		 * do repeated subtraction.
6461		 */
6462		while (tmp >= swstats->num_aggregations) {
6463			tmp -= swstats->num_aggregations;
6464			count++;
6465		}
6466		tmp_stats[i++] = count;
6467	} else
6468		tmp_stats[i++] = 0;
6469	tmp_stats[i++] = swstats->mem_alloc_fail_cnt;
6470	tmp_stats[i++] = swstats->pci_map_fail_cnt;
6471	tmp_stats[i++] = swstats->watchdog_timer_cnt;
6472	tmp_stats[i++] = swstats->mem_allocated;
6473	tmp_stats[i++] = swstats->mem_freed;
6474	tmp_stats[i++] = swstats->link_up_cnt;
6475	tmp_stats[i++] = swstats->link_down_cnt;
6476	tmp_stats[i++] = swstats->link_up_time;
6477	tmp_stats[i++] = swstats->link_down_time;
6478
6479	tmp_stats[i++] = swstats->tx_buf_abort_cnt;
6480	tmp_stats[i++] = swstats->tx_desc_abort_cnt;
6481	tmp_stats[i++] = swstats->tx_parity_err_cnt;
6482	tmp_stats[i++] = swstats->tx_link_loss_cnt;
6483	tmp_stats[i++] = swstats->tx_list_proc_err_cnt;
6484
6485	tmp_stats[i++] = swstats->rx_parity_err_cnt;
6486	tmp_stats[i++] = swstats->rx_abort_cnt;
6487	tmp_stats[i++] = swstats->rx_parity_abort_cnt;
6488	tmp_stats[i++] = swstats->rx_rda_fail_cnt;
6489	tmp_stats[i++] = swstats->rx_unkn_prot_cnt;
6490	tmp_stats[i++] = swstats->rx_fcs_err_cnt;
6491	tmp_stats[i++] = swstats->rx_buf_size_err_cnt;
6492	tmp_stats[i++] = swstats->rx_rxd_corrupt_cnt;
6493	tmp_stats[i++] = swstats->rx_unkn_err_cnt;
6494	tmp_stats[i++] = swstats->tda_err_cnt;
6495	tmp_stats[i++] = swstats->pfc_err_cnt;
6496	tmp_stats[i++] = swstats->pcc_err_cnt;
6497	tmp_stats[i++] = swstats->tti_err_cnt;
6498	tmp_stats[i++] = swstats->tpa_err_cnt;
6499	tmp_stats[i++] = swstats->sm_err_cnt;
6500	tmp_stats[i++] = swstats->lso_err_cnt;
6501	tmp_stats[i++] = swstats->mac_tmac_err_cnt;
6502	tmp_stats[i++] = swstats->mac_rmac_err_cnt;
6503	tmp_stats[i++] = swstats->xgxs_txgxs_err_cnt;
6504	tmp_stats[i++] = swstats->xgxs_rxgxs_err_cnt;
6505	tmp_stats[i++] = swstats->rc_err_cnt;
6506	tmp_stats[i++] = swstats->prc_pcix_err_cnt;
6507	tmp_stats[i++] = swstats->rpa_err_cnt;
6508	tmp_stats[i++] = swstats->rda_err_cnt;
6509	tmp_stats[i++] = swstats->rti_err_cnt;
6510	tmp_stats[i++] = swstats->mc_err_cnt;
6511}
6512
6513static int s2io_ethtool_get_regs_len(struct net_device *dev)
6514{
6515	return XENA_REG_SPACE;
6516}
6517
6518
6519static int s2io_get_eeprom_len(struct net_device *dev)
6520{
6521	return XENA_EEPROM_SPACE;
6522}
6523
6524static int s2io_get_sset_count(struct net_device *dev, int sset)
6525{
6526	struct s2io_nic *sp = netdev_priv(dev);
6527
6528	switch (sset) {
6529	case ETH_SS_TEST:
6530		return S2IO_TEST_LEN;
6531	case ETH_SS_STATS:
6532		switch (sp->device_type) {
6533		case XFRAME_I_DEVICE:
6534			return XFRAME_I_STAT_LEN;
6535		case XFRAME_II_DEVICE:
6536			return XFRAME_II_STAT_LEN;
6537		default:
6538			return 0;
6539		}
6540	default:
6541		return -EOPNOTSUPP;
6542	}
6543}
6544
6545static void s2io_ethtool_get_strings(struct net_device *dev,
6546				     u32 stringset, u8 *data)
6547{
6548	int stat_size = 0;
6549	struct s2io_nic *sp = netdev_priv(dev);
6550
6551	switch (stringset) {
6552	case ETH_SS_TEST:
6553		memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6554		break;
6555	case ETH_SS_STATS:
6556		stat_size = sizeof(ethtool_xena_stats_keys);
6557		memcpy(data, &ethtool_xena_stats_keys, stat_size);
6558		if (sp->device_type == XFRAME_II_DEVICE) {
6559			memcpy(data + stat_size,
6560			       &ethtool_enhanced_stats_keys,
6561			       sizeof(ethtool_enhanced_stats_keys));
6562			stat_size += sizeof(ethtool_enhanced_stats_keys);
6563		}
6564
6565		memcpy(data + stat_size, &ethtool_driver_stats_keys,
6566		       sizeof(ethtool_driver_stats_keys));
6567	}
6568}
6569
6570static int s2io_set_features(struct net_device *dev, netdev_features_t features)
6571{
6572	struct s2io_nic *sp = netdev_priv(dev);
6573	netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
6574
6575	if (changed && netif_running(dev)) {
6576		int rc;
6577
6578		s2io_stop_all_tx_queue(sp);
6579		s2io_card_down(sp);
6580		dev->features = features;
6581		rc = s2io_card_up(sp);
6582		if (rc)
6583			s2io_reset(sp);
6584		else
6585			s2io_start_all_tx_queue(sp);
6586
6587		return rc ? rc : 1;
6588	}
6589
6590	return 0;
6591}
6592
6593static const struct ethtool_ops netdev_ethtool_ops = {
6594	.get_drvinfo = s2io_ethtool_gdrvinfo,
6595	.get_regs_len = s2io_ethtool_get_regs_len,
6596	.get_regs = s2io_ethtool_gregs,
6597	.get_link = ethtool_op_get_link,
6598	.get_eeprom_len = s2io_get_eeprom_len,
6599	.get_eeprom = s2io_ethtool_geeprom,
6600	.set_eeprom = s2io_ethtool_seeprom,
6601	.get_ringparam = s2io_ethtool_gringparam,
6602	.get_pauseparam = s2io_ethtool_getpause_data,
6603	.set_pauseparam = s2io_ethtool_setpause_data,
6604	.self_test = s2io_ethtool_test,
6605	.get_strings = s2io_ethtool_get_strings,
6606	.set_phys_id = s2io_ethtool_set_led,
6607	.get_ethtool_stats = s2io_get_ethtool_stats,
6608	.get_sset_count = s2io_get_sset_count,
6609	.get_link_ksettings = s2io_ethtool_get_link_ksettings,
6610	.set_link_ksettings = s2io_ethtool_set_link_ksettings,
6611};
6612
6613/**
6614 *  s2io_ioctl - Entry point for the Ioctl
6615 *  @dev :  Device pointer.
6616 *  @rq :  An IOCTL specefic structure, that can contain a pointer to
6617 *  a proprietary structure used to pass information to the driver.
6618 *  @cmd :  This is used to distinguish between the different commands that
6619 *  can be passed to the IOCTL functions.
6620 *  Description:
6621 *  Currently there are no special functionality supported in IOCTL, hence
6622 *  function always return EOPNOTSUPPORTED
6623 */
6624
6625static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6626{
6627	return -EOPNOTSUPP;
6628}
6629
6630/**
6631 *  s2io_change_mtu - entry point to change MTU size for the device.
6632 *   @dev : device pointer.
6633 *   @new_mtu : the new MTU size for the device.
6634 *   Description: A driver entry point to change MTU size for the device.
6635 *   Before changing the MTU the device must be stopped.
6636 *  Return value:
6637 *   0 on success and an appropriate (-)ve integer as defined in errno.h
6638 *   file on failure.
6639 */
6640
6641static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6642{
6643	struct s2io_nic *sp = netdev_priv(dev);
6644	int ret = 0;
6645
6646	dev->mtu = new_mtu;
6647	if (netif_running(dev)) {
6648		s2io_stop_all_tx_queue(sp);
6649		s2io_card_down(sp);
6650		ret = s2io_card_up(sp);
6651		if (ret) {
6652			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6653				  __func__);
6654			return ret;
6655		}
6656		s2io_wake_all_tx_queue(sp);
6657	} else { /* Device is down */
6658		struct XENA_dev_config __iomem *bar0 = sp->bar0;
6659		u64 val64 = new_mtu;
6660
6661		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6662	}
6663
6664	return ret;
6665}
6666
6667/**
6668 * s2io_set_link - Set the LInk status
6669 * @work: work struct containing a pointer to device private structure
6670 * Description: Sets the link status for the adapter
6671 */
6672
6673static void s2io_set_link(struct work_struct *work)
6674{
6675	struct s2io_nic *nic = container_of(work, struct s2io_nic,
6676					    set_link_task);
6677	struct net_device *dev = nic->dev;
6678	struct XENA_dev_config __iomem *bar0 = nic->bar0;
6679	register u64 val64;
6680	u16 subid;
6681
6682	rtnl_lock();
6683
6684	if (!netif_running(dev))
6685		goto out_unlock;
6686
6687	if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6688		/* The card is being reset, no point doing anything */
6689		goto out_unlock;
6690	}
6691
6692	subid = nic->pdev->subsystem_device;
6693	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6694		/*
6695		 * Allow a small delay for the NICs self initiated
6696		 * cleanup to complete.
6697		 */
6698		msleep(100);
6699	}
6700
6701	val64 = readq(&bar0->adapter_status);
6702	if (LINK_IS_UP(val64)) {
6703		if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6704			if (verify_xena_quiescence(nic)) {
6705				val64 = readq(&bar0->adapter_control);
6706				val64 |= ADAPTER_CNTL_EN;
6707				writeq(val64, &bar0->adapter_control);
6708				if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6709					    nic->device_type, subid)) {
6710					val64 = readq(&bar0->gpio_control);
6711					val64 |= GPIO_CTRL_GPIO_0;
6712					writeq(val64, &bar0->gpio_control);
6713					val64 = readq(&bar0->gpio_control);
6714				} else {
6715					val64 |= ADAPTER_LED_ON;
6716					writeq(val64, &bar0->adapter_control);
6717				}
6718				nic->device_enabled_once = true;
6719			} else {
6720				DBG_PRINT(ERR_DBG,
6721					  "%s: Error: device is not Quiescent\n",
6722					  dev->name);
6723				s2io_stop_all_tx_queue(nic);
6724			}
6725		}
6726		val64 = readq(&bar0->adapter_control);
6727		val64 |= ADAPTER_LED_ON;
6728		writeq(val64, &bar0->adapter_control);
6729		s2io_link(nic, LINK_UP);
6730	} else {
6731		if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6732						      subid)) {
6733			val64 = readq(&bar0->gpio_control);
6734			val64 &= ~GPIO_CTRL_GPIO_0;
6735			writeq(val64, &bar0->gpio_control);
6736			val64 = readq(&bar0->gpio_control);
6737		}
6738		/* turn off LED */
6739		val64 = readq(&bar0->adapter_control);
6740		val64 = val64 & (~ADAPTER_LED_ON);
6741		writeq(val64, &bar0->adapter_control);
6742		s2io_link(nic, LINK_DOWN);
6743	}
6744	clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6745
6746out_unlock:
6747	rtnl_unlock();
6748}
6749
6750static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6751				  struct buffAdd *ba,
6752				  struct sk_buff **skb, u64 *temp0, u64 *temp1,
6753				  u64 *temp2, int size)
6754{
6755	struct net_device *dev = sp->dev;
6756	struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6757
6758	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6759		struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6760		/* allocate skb */
6761		if (*skb) {
6762			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6763			/*
6764			 * As Rx frame are not going to be processed,
6765			 * using same mapped address for the Rxd
6766			 * buffer pointer
6767			 */
6768			rxdp1->Buffer0_ptr = *temp0;
6769		} else {
6770			*skb = netdev_alloc_skb(dev, size);
6771			if (!(*skb)) {
6772				DBG_PRINT(INFO_DBG,
6773					  "%s: Out of memory to allocate %s\n",
6774					  dev->name, "1 buf mode SKBs");
6775				stats->mem_alloc_fail_cnt++;
6776				return -ENOMEM ;
6777			}
6778			stats->mem_allocated += (*skb)->truesize;
6779			/* storing the mapped addr in a temp variable
6780			 * such it will be used for next rxd whose
6781			 * Host Control is NULL
6782			 */
6783			rxdp1->Buffer0_ptr = *temp0 =
6784				dma_map_single(&sp->pdev->dev, (*skb)->data,
6785					       size - NET_IP_ALIGN,
6786					       DMA_FROM_DEVICE);
6787			if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
6788				goto memalloc_failed;
6789			rxdp->Host_Control = (unsigned long) (*skb);
6790		}
6791	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6792		struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6793		/* Two buffer Mode */
6794		if (*skb) {
6795			rxdp3->Buffer2_ptr = *temp2;
6796			rxdp3->Buffer0_ptr = *temp0;
6797			rxdp3->Buffer1_ptr = *temp1;
6798		} else {
6799			*skb = netdev_alloc_skb(dev, size);
6800			if (!(*skb)) {
6801				DBG_PRINT(INFO_DBG,
6802					  "%s: Out of memory to allocate %s\n",
6803					  dev->name,
6804					  "2 buf mode SKBs");
6805				stats->mem_alloc_fail_cnt++;
6806				return -ENOMEM;
6807			}
6808			stats->mem_allocated += (*skb)->truesize;
6809			rxdp3->Buffer2_ptr = *temp2 =
6810				dma_map_single(&sp->pdev->dev, (*skb)->data,
6811					       dev->mtu + 4, DMA_FROM_DEVICE);
6812			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
6813				goto memalloc_failed;
6814			rxdp3->Buffer0_ptr = *temp0 =
6815				dma_map_single(&sp->pdev->dev, ba->ba_0,
6816					       BUF0_LEN, DMA_FROM_DEVICE);
6817			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
6818				dma_unmap_single(&sp->pdev->dev,
6819						 (dma_addr_t)rxdp3->Buffer2_ptr,
6820						 dev->mtu + 4,
6821						 DMA_FROM_DEVICE);
6822				goto memalloc_failed;
6823			}
6824			rxdp->Host_Control = (unsigned long) (*skb);
6825
6826			/* Buffer-1 will be dummy buffer not used */
6827			rxdp3->Buffer1_ptr = *temp1 =
6828				dma_map_single(&sp->pdev->dev, ba->ba_1,
6829					       BUF1_LEN, DMA_FROM_DEVICE);
6830			if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
6831				dma_unmap_single(&sp->pdev->dev,
6832						 (dma_addr_t)rxdp3->Buffer0_ptr,
6833						 BUF0_LEN, DMA_FROM_DEVICE);
6834				dma_unmap_single(&sp->pdev->dev,
6835						 (dma_addr_t)rxdp3->Buffer2_ptr,
6836						 dev->mtu + 4,
6837						 DMA_FROM_DEVICE);
6838				goto memalloc_failed;
6839			}
6840		}
6841	}
6842	return 0;
6843
6844memalloc_failed:
6845	stats->pci_map_fail_cnt++;
6846	stats->mem_freed += (*skb)->truesize;
6847	dev_kfree_skb(*skb);
6848	return -ENOMEM;
6849}
6850
6851static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6852				int size)
6853{
6854	struct net_device *dev = sp->dev;
6855	if (sp->rxd_mode == RXD_MODE_1) {
6856		rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
6857	} else if (sp->rxd_mode == RXD_MODE_3B) {
6858		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6859		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6860		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu + 4);
6861	}
6862}
6863
6864static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6865{
6866	int i, j, k, blk_cnt = 0, size;
6867	struct config_param *config = &sp->config;
6868	struct mac_info *mac_control = &sp->mac_control;
6869	struct net_device *dev = sp->dev;
6870	struct RxD_t *rxdp = NULL;
6871	struct sk_buff *skb = NULL;
6872	struct buffAdd *ba = NULL;
6873	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6874
6875	/* Calculate the size based on ring mode */
6876	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6877		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6878	if (sp->rxd_mode == RXD_MODE_1)
6879		size += NET_IP_ALIGN;
6880	else if (sp->rxd_mode == RXD_MODE_3B)
6881		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6882
6883	for (i = 0; i < config->rx_ring_num; i++) {
6884		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
6885		struct ring_info *ring = &mac_control->rings[i];
6886
6887		blk_cnt = rx_cfg->num_rxd / (rxd_count[sp->rxd_mode] + 1);
6888
6889		for (j = 0; j < blk_cnt; j++) {
6890			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6891				rxdp = ring->rx_blocks[j].rxds[k].virt_addr;
6892				if (sp->rxd_mode == RXD_MODE_3B)
6893					ba = &ring->ba[j][k];
6894				if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
6895							   &temp0_64,
6896							   &temp1_64,
6897							   &temp2_64,
6898							   size) == -ENOMEM) {
6899					return 0;
6900				}
6901
6902				set_rxd_buffer_size(sp, rxdp, size);
6903				dma_wmb();
6904				/* flip the Ownership bit to Hardware */
6905				rxdp->Control_1 |= RXD_OWN_XENA;
6906			}
6907		}
6908	}
6909	return 0;
6910
6911}
6912
6913static int s2io_add_isr(struct s2io_nic *sp)
6914{
6915	int ret = 0;
6916	struct net_device *dev = sp->dev;
6917	int err = 0;
6918
6919	if (sp->config.intr_type == MSI_X)
6920		ret = s2io_enable_msi_x(sp);
6921	if (ret) {
6922		DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6923		sp->config.intr_type = INTA;
6924	}
6925
6926	/*
6927	 * Store the values of the MSIX table in
6928	 * the struct s2io_nic structure
6929	 */
6930	store_xmsi_data(sp);
6931
6932	/* After proper initialization of H/W, register ISR */
6933	if (sp->config.intr_type == MSI_X) {
6934		int i, msix_rx_cnt = 0;
6935
6936		for (i = 0; i < sp->num_entries; i++) {
6937			if (sp->s2io_entries[i].in_use == MSIX_FLG) {
6938				if (sp->s2io_entries[i].type ==
6939				    MSIX_RING_TYPE) {
6940					snprintf(sp->desc[i],
6941						sizeof(sp->desc[i]),
6942						"%s:MSI-X-%d-RX",
6943						dev->name, i);
6944					err = request_irq(sp->entries[i].vector,
6945							  s2io_msix_ring_handle,
6946							  0,
6947							  sp->desc[i],
6948							  sp->s2io_entries[i].arg);
6949				} else if (sp->s2io_entries[i].type ==
6950					   MSIX_ALARM_TYPE) {
6951					snprintf(sp->desc[i],
6952						sizeof(sp->desc[i]),
6953						"%s:MSI-X-%d-TX",
6954						dev->name, i);
6955					err = request_irq(sp->entries[i].vector,
6956							  s2io_msix_fifo_handle,
6957							  0,
6958							  sp->desc[i],
6959							  sp->s2io_entries[i].arg);
6960
6961				}
6962				/* if either data or addr is zero print it. */
6963				if (!(sp->msix_info[i].addr &&
6964				      sp->msix_info[i].data)) {
6965					DBG_PRINT(ERR_DBG,
6966						  "%s @Addr:0x%llx Data:0x%llx\n",
6967						  sp->desc[i],
6968						  (unsigned long long)
6969						  sp->msix_info[i].addr,
6970						  (unsigned long long)
6971						  ntohl(sp->msix_info[i].data));
6972				} else
6973					msix_rx_cnt++;
6974				if (err) {
6975					remove_msix_isr(sp);
6976
6977					DBG_PRINT(ERR_DBG,
6978						  "%s:MSI-X-%d registration "
6979						  "failed\n", dev->name, i);
6980
6981					DBG_PRINT(ERR_DBG,
6982						  "%s: Defaulting to INTA\n",
6983						  dev->name);
6984					sp->config.intr_type = INTA;
6985					break;
6986				}
6987				sp->s2io_entries[i].in_use =
6988					MSIX_REGISTERED_SUCCESS;
6989			}
6990		}
6991		if (!err) {
6992			pr_info("MSI-X-RX %d entries enabled\n", --msix_rx_cnt);
6993			DBG_PRINT(INFO_DBG,
6994				  "MSI-X-TX entries enabled through alarm vector\n");
6995		}
6996	}
6997	if (sp->config.intr_type == INTA) {
6998		err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
6999				  sp->name, dev);
7000		if (err) {
7001			DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7002				  dev->name);
7003			return -1;
7004		}
7005	}
7006	return 0;
7007}
7008
7009static void s2io_rem_isr(struct s2io_nic *sp)
7010{
7011	if (sp->config.intr_type == MSI_X)
7012		remove_msix_isr(sp);
7013	else
7014		remove_inta_isr(sp);
7015}
7016
7017static void do_s2io_card_down(struct s2io_nic *sp, int do_io)
7018{
7019	int cnt = 0;
7020	struct XENA_dev_config __iomem *bar0 = sp->bar0;
7021	register u64 val64 = 0;
7022	struct config_param *config;
7023	config = &sp->config;
7024
7025	if (!is_s2io_card_up(sp))
7026		return;
7027
7028	del_timer_sync(&sp->alarm_timer);
7029	/* If s2io_set_link task is executing, wait till it completes. */
7030	while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state)))
7031		msleep(50);
7032	clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7033
7034	/* Disable napi */
7035	if (sp->config.napi) {
7036		int off = 0;
7037		if (config->intr_type ==  MSI_X) {
7038			for (; off < sp->config.rx_ring_num; off++)
7039				napi_disable(&sp->mac_control.rings[off].napi);
7040		}
7041		else
7042			napi_disable(&sp->napi);
7043	}
7044
7045	/* disable Tx and Rx traffic on the NIC */
7046	if (do_io)
7047		stop_nic(sp);
7048
7049	s2io_rem_isr(sp);
7050
7051	/* stop the tx queue, indicate link down */
7052	s2io_link(sp, LINK_DOWN);
7053
7054	/* Check if the device is Quiescent and then Reset the NIC */
7055	while (do_io) {
7056		/* As per the HW requirement we need to replenish the
7057		 * receive buffer to avoid the ring bump. Since there is
7058		 * no intention of processing the Rx frame at this pointwe are
7059		 * just setting the ownership bit of rxd in Each Rx
7060		 * ring to HW and set the appropriate buffer size
7061		 * based on the ring mode
7062		 */
7063		rxd_owner_bit_reset(sp);
7064
7065		val64 = readq(&bar0->adapter_status);
7066		if (verify_xena_quiescence(sp)) {
7067			if (verify_pcc_quiescent(sp, sp->device_enabled_once))
7068				break;
7069		}
7070
7071		msleep(50);
7072		cnt++;
7073		if (cnt == 10) {
7074			DBG_PRINT(ERR_DBG, "Device not Quiescent - "
7075				  "adapter status reads 0x%llx\n",
7076				  (unsigned long long)val64);
7077			break;
7078		}
7079	}
7080	if (do_io)
7081		s2io_reset(sp);
7082
7083	/* Free all Tx buffers */
7084	free_tx_buffers(sp);
7085
7086	/* Free all Rx buffers */
7087	free_rx_buffers(sp);
7088
7089	clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7090}
7091
7092static void s2io_card_down(struct s2io_nic *sp)
7093{
7094	do_s2io_card_down(sp, 1);
7095}
7096
7097static int s2io_card_up(struct s2io_nic *sp)
7098{
7099	int i, ret = 0;
7100	struct config_param *config;
7101	struct mac_info *mac_control;
7102	struct net_device *dev = sp->dev;
7103	u16 interruptible;
7104
7105	/* Initialize the H/W I/O registers */
7106	ret = init_nic(sp);
7107	if (ret != 0) {
7108		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7109			  dev->name);
7110		if (ret != -EIO)
7111			s2io_reset(sp);
7112		return ret;
7113	}
7114
7115	/*
7116	 * Initializing the Rx buffers. For now we are considering only 1
7117	 * Rx ring and initializing buffers into 30 Rx blocks
7118	 */
7119	config = &sp->config;
7120	mac_control = &sp->mac_control;
7121
7122	for (i = 0; i < config->rx_ring_num; i++) {
7123		struct ring_info *ring = &mac_control->rings[i];
7124
7125		ring->mtu = dev->mtu;
7126		ring->lro = !!(dev->features & NETIF_F_LRO);
7127		ret = fill_rx_buffers(sp, ring, 1);
7128		if (ret) {
7129			DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7130				  dev->name);
7131			ret = -ENOMEM;
7132			goto err_fill_buff;
7133		}
7134		DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7135			  ring->rx_bufs_left);
7136	}
7137
7138	/* Initialise napi */
7139	if (config->napi) {
7140		if (config->intr_type ==  MSI_X) {
7141			for (i = 0; i < sp->config.rx_ring_num; i++)
7142				napi_enable(&sp->mac_control.rings[i].napi);
7143		} else {
7144			napi_enable(&sp->napi);
7145		}
7146	}
7147
7148	/* Maintain the state prior to the open */
7149	if (sp->promisc_flg)
7150		sp->promisc_flg = 0;
7151	if (sp->m_cast_flg) {
7152		sp->m_cast_flg = 0;
7153		sp->all_multi_pos = 0;
7154	}
7155
7156	/* Setting its receive mode */
7157	s2io_set_multicast(dev, true);
7158
7159	if (dev->features & NETIF_F_LRO) {
7160		/* Initialize max aggregatable pkts per session based on MTU */
7161		sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7162		/* Check if we can use (if specified) user provided value */
7163		if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7164			sp->lro_max_aggr_per_sess = lro_max_pkts;
7165	}
7166
7167	/* Enable Rx Traffic and interrupts on the NIC */
7168	if (start_nic(sp)) {
7169		DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7170		ret = -ENODEV;
7171		goto err_out;
7172	}
7173
7174	/* Add interrupt service routine */
7175	if (s2io_add_isr(sp) != 0) {
7176		if (sp->config.intr_type == MSI_X)
7177			s2io_rem_isr(sp);
7178		ret = -ENODEV;
7179		goto err_out;
7180	}
7181
7182	timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0);
7183	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
7184
7185	set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7186
7187	/*  Enable select interrupts */
7188	en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7189	if (sp->config.intr_type != INTA) {
7190		interruptible = TX_TRAFFIC_INTR | TX_PIC_INTR;
7191		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7192	} else {
7193		interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7194		interruptible |= TX_PIC_INTR;
7195		en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7196	}
7197
7198	return 0;
7199
7200err_out:
7201	if (config->napi) {
7202		if (config->intr_type == MSI_X) {
7203			for (i = 0; i < sp->config.rx_ring_num; i++)
7204				napi_disable(&sp->mac_control.rings[i].napi);
7205		} else {
7206			napi_disable(&sp->napi);
7207		}
7208	}
7209err_fill_buff:
7210	s2io_reset(sp);
7211	free_rx_buffers(sp);
7212	return ret;
7213}
7214
7215/**
7216 * s2io_restart_nic - Resets the NIC.
7217 * @work : work struct containing a pointer to the device private structure
7218 * Description:
7219 * This function is scheduled to be run by the s2io_tx_watchdog
7220 * function after 0.5 secs to reset the NIC. The idea is to reduce
7221 * the run time of the watch dog routine which is run holding a
7222 * spin lock.
7223 */
7224
7225static void s2io_restart_nic(struct work_struct *work)
7226{
7227	struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7228	struct net_device *dev = sp->dev;
7229
7230	rtnl_lock();
7231
7232	if (!netif_running(dev))
7233		goto out_unlock;
7234
7235	s2io_card_down(sp);
7236	if (s2io_card_up(sp)) {
7237		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", dev->name);
7238	}
7239	s2io_wake_all_tx_queue(sp);
7240	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", dev->name);
7241out_unlock:
7242	rtnl_unlock();
7243}
7244
7245/**
7246 *  s2io_tx_watchdog - Watchdog for transmit side.
7247 *  @dev : Pointer to net device structure
7248 *  @txqueue: index of the hanging queue
7249 *  Description:
7250 *  This function is triggered if the Tx Queue is stopped
7251 *  for a pre-defined amount of time when the Interface is still up.
7252 *  If the Interface is jammed in such a situation, the hardware is
7253 *  reset (by s2io_close) and restarted again (by s2io_open) to
7254 *  overcome any problem that might have been caused in the hardware.
7255 *  Return value:
7256 *  void
7257 */
7258
7259static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
7260{
7261	struct s2io_nic *sp = netdev_priv(dev);
7262	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7263
7264	if (netif_carrier_ok(dev)) {
7265		swstats->watchdog_timer_cnt++;
7266		schedule_work(&sp->rst_timer_task);
7267		swstats->soft_reset_cnt++;
7268	}
7269}
7270
7271/**
7272 *   rx_osm_handler - To perform some OS related operations on SKB.
7273 *   @ring_data : the ring from which this RxD was extracted.
7274 *   @rxdp: descriptor
7275 *   Description:
7276 *   This function is called by the Rx interrupt serivce routine to perform
7277 *   some OS related operations on the SKB before passing it to the upper
7278 *   layers. It mainly checks if the checksum is OK, if so adds it to the
7279 *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7280 *   to the upper layer. If the checksum is wrong, it increments the Rx
7281 *   packet error count, frees the SKB and returns error.
7282 *   Return value:
7283 *   SUCCESS on success and -1 on failure.
7284 */
7285static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7286{
7287	struct s2io_nic *sp = ring_data->nic;
7288	struct net_device *dev = ring_data->dev;
7289	struct sk_buff *skb = (struct sk_buff *)
7290		((unsigned long)rxdp->Host_Control);
7291	int ring_no = ring_data->ring_no;
7292	u16 l3_csum, l4_csum;
7293	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7294	struct lro *lro;
7295	u8 err_mask;
7296	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7297
7298	skb->dev = dev;
7299
7300	if (err) {
7301		/* Check for parity error */
7302		if (err & 0x1)
7303			swstats->parity_err_cnt++;
7304
7305		err_mask = err >> 48;
7306		switch (err_mask) {
7307		case 1:
7308			swstats->rx_parity_err_cnt++;
7309			break;
7310
7311		case 2:
7312			swstats->rx_abort_cnt++;
7313			break;
7314
7315		case 3:
7316			swstats->rx_parity_abort_cnt++;
7317			break;
7318
7319		case 4:
7320			swstats->rx_rda_fail_cnt++;
7321			break;
7322
7323		case 5:
7324			swstats->rx_unkn_prot_cnt++;
7325			break;
7326
7327		case 6:
7328			swstats->rx_fcs_err_cnt++;
7329			break;
7330
7331		case 7:
7332			swstats->rx_buf_size_err_cnt++;
7333			break;
7334
7335		case 8:
7336			swstats->rx_rxd_corrupt_cnt++;
7337			break;
7338
7339		case 15:
7340			swstats->rx_unkn_err_cnt++;
7341			break;
7342		}
7343		/*
7344		 * Drop the packet if bad transfer code. Exception being
7345		 * 0x5, which could be due to unsupported IPv6 extension header.
7346		 * In this case, we let stack handle the packet.
7347		 * Note that in this case, since checksum will be incorrect,
7348		 * stack will validate the same.
7349		 */
7350		if (err_mask != 0x5) {
7351			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7352				  dev->name, err_mask);
7353			dev->stats.rx_crc_errors++;
7354			swstats->mem_freed
7355				+= skb->truesize;
7356			dev_kfree_skb(skb);
7357			ring_data->rx_bufs_left -= 1;
7358			rxdp->Host_Control = 0;
7359			return 0;
7360		}
7361	}
7362
7363	rxdp->Host_Control = 0;
7364	if (sp->rxd_mode == RXD_MODE_1) {
7365		int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7366
7367		skb_put(skb, len);
7368	} else if (sp->rxd_mode == RXD_MODE_3B) {
7369		int get_block = ring_data->rx_curr_get_info.block_index;
7370		int get_off = ring_data->rx_curr_get_info.offset;
7371		int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7372		int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7373
7374		struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7375		skb_put_data(skb, ba->ba_0, buf0_len);
7376		skb_put(skb, buf2_len);
7377	}
7378
7379	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
7380	    ((!ring_data->lro) ||
7381	     (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
7382	    (dev->features & NETIF_F_RXCSUM)) {
7383		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7384		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7385		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7386			/*
7387			 * NIC verifies if the Checksum of the received
7388			 * frame is Ok or not and accordingly returns
7389			 * a flag in the RxD.
7390			 */
7391			skb->ip_summed = CHECKSUM_UNNECESSARY;
7392			if (ring_data->lro) {
7393				u32 tcp_len = 0;
7394				u8 *tcp;
7395				int ret = 0;
7396
7397				ret = s2io_club_tcp_session(ring_data,
7398							    skb->data, &tcp,
7399							    &tcp_len, &lro,
7400							    rxdp, sp);
7401				switch (ret) {
7402				case 3: /* Begin anew */
7403					lro->parent = skb;
7404					goto aggregate;
7405				case 1: /* Aggregate */
7406					lro_append_pkt(sp, lro, skb, tcp_len);
7407					goto aggregate;
7408				case 4: /* Flush session */
7409					lro_append_pkt(sp, lro, skb, tcp_len);
7410					queue_rx_frame(lro->parent,
7411						       lro->vlan_tag);
7412					clear_lro_session(lro);
7413					swstats->flush_max_pkts++;
7414					goto aggregate;
7415				case 2: /* Flush both */
7416					lro->parent->data_len = lro->frags_len;
7417					swstats->sending_both++;
7418					queue_rx_frame(lro->parent,
7419						       lro->vlan_tag);
7420					clear_lro_session(lro);
7421					goto send_up;
7422				case 0: /* sessions exceeded */
7423				case -1: /* non-TCP or not L2 aggregatable */
7424				case 5: /*
7425					 * First pkt in session not
7426					 * L3/L4 aggregatable
7427					 */
7428					break;
7429				default:
7430					DBG_PRINT(ERR_DBG,
7431						  "%s: Samadhana!!\n",
7432						  __func__);
7433					BUG();
7434				}
7435			}
7436		} else {
7437			/*
7438			 * Packet with erroneous checksum, let the
7439			 * upper layers deal with it.
7440			 */
7441			skb_checksum_none_assert(skb);
7442		}
7443	} else
7444		skb_checksum_none_assert(skb);
7445
7446	swstats->mem_freed += skb->truesize;
7447send_up:
7448	skb_record_rx_queue(skb, ring_no);
7449	queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7450aggregate:
7451	sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7452	return SUCCESS;
7453}
7454
7455/**
7456 *  s2io_link - stops/starts the Tx queue.
7457 *  @sp : private member of the device structure, which is a pointer to the
7458 *  s2io_nic structure.
7459 *  @link : inidicates whether link is UP/DOWN.
7460 *  Description:
7461 *  This function stops/starts the Tx queue depending on whether the link
7462 *  status of the NIC is down or up. This is called by the Alarm
7463 *  interrupt handler whenever a link change interrupt comes up.
7464 *  Return value:
7465 *  void.
7466 */
7467
7468static void s2io_link(struct s2io_nic *sp, int link)
7469{
7470	struct net_device *dev = sp->dev;
7471	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
7472
7473	if (link != sp->last_link_state) {
7474		init_tti(sp, link, false);
7475		if (link == LINK_DOWN) {
7476			DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7477			s2io_stop_all_tx_queue(sp);
7478			netif_carrier_off(dev);
7479			if (swstats->link_up_cnt)
7480				swstats->link_up_time =
7481					jiffies - sp->start_time;
7482			swstats->link_down_cnt++;
7483		} else {
7484			DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7485			if (swstats->link_down_cnt)
7486				swstats->link_down_time =
7487					jiffies - sp->start_time;
7488			swstats->link_up_cnt++;
7489			netif_carrier_on(dev);
7490			s2io_wake_all_tx_queue(sp);
7491		}
7492	}
7493	sp->last_link_state = link;
7494	sp->start_time = jiffies;
7495}
7496
7497/**
7498 *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7499 *  @sp : private member of the device structure, which is a pointer to the
7500 *  s2io_nic structure.
7501 *  Description:
7502 *  This function initializes a few of the PCI and PCI-X configuration registers
7503 *  with recommended values.
7504 *  Return value:
7505 *  void
7506 */
7507
7508static void s2io_init_pci(struct s2io_nic *sp)
7509{
7510	u16 pci_cmd = 0, pcix_cmd = 0;
7511
7512	/* Enable Data Parity Error Recovery in PCI-X command register. */
7513	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7514			     &(pcix_cmd));
7515	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7516			      (pcix_cmd | 1));
7517	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7518			     &(pcix_cmd));
7519
7520	/* Set the PErr Response bit in PCI command register. */
7521	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7522	pci_write_config_word(sp->pdev, PCI_COMMAND,
7523			      (pci_cmd | PCI_COMMAND_PARITY));
7524	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7525}
7526
7527static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7528			    u8 *dev_multiq)
7529{
7530	int i;
7531
7532	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7533		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7534			  "(%d) not supported\n", tx_fifo_num);
7535
7536		if (tx_fifo_num < 1)
7537			tx_fifo_num = 1;
7538		else
7539			tx_fifo_num = MAX_TX_FIFOS;
7540
7541		DBG_PRINT(ERR_DBG, "Default to %d tx fifos\n", tx_fifo_num);
7542	}
7543
7544	if (multiq)
7545		*dev_multiq = multiq;
7546
7547	if (tx_steering_type && (1 == tx_fifo_num)) {
7548		if (tx_steering_type != TX_DEFAULT_STEERING)
7549			DBG_PRINT(ERR_DBG,
7550				  "Tx steering is not supported with "
7551				  "one fifo. Disabling Tx steering.\n");
7552		tx_steering_type = NO_STEERING;
7553	}
7554
7555	if ((tx_steering_type < NO_STEERING) ||
7556	    (tx_steering_type > TX_DEFAULT_STEERING)) {
7557		DBG_PRINT(ERR_DBG,
7558			  "Requested transmit steering not supported\n");
7559		DBG_PRINT(ERR_DBG, "Disabling transmit steering\n");
7560		tx_steering_type = NO_STEERING;
7561	}
7562
7563	if (rx_ring_num > MAX_RX_RINGS) {
7564		DBG_PRINT(ERR_DBG,
7565			  "Requested number of rx rings not supported\n");
7566		DBG_PRINT(ERR_DBG, "Default to %d rx rings\n",
7567			  MAX_RX_RINGS);
7568		rx_ring_num = MAX_RX_RINGS;
7569	}
7570
7571	if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7572		DBG_PRINT(ERR_DBG, "Wrong intr_type requested. "
7573			  "Defaulting to INTA\n");
7574		*dev_intr_type = INTA;
7575	}
7576
7577	if ((*dev_intr_type == MSI_X) &&
7578	    ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7579	     (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7580		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. "
7581			  "Defaulting to INTA\n");
7582		*dev_intr_type = INTA;
7583	}
7584
7585	if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7586		DBG_PRINT(ERR_DBG, "Requested ring mode not supported\n");
7587		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7588		rx_ring_mode = 1;
7589	}
7590
7591	for (i = 0; i < MAX_RX_RINGS; i++)
7592		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7593			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7594				  "supported\nDefaulting to %d\n",
7595				  MAX_RX_BLOCKS_PER_RING);
7596			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7597		}
7598
7599	return SUCCESS;
7600}
7601
7602/**
7603 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
7604 * @nic: device private variable
7605 * @ds_codepoint: data
7606 * @ring: ring index
7607 * Description: The function configures the receive steering to
7608 * desired receive ring.
7609 * Return Value:  SUCCESS on success and
7610 * '-1' on failure (endian settings incorrect).
7611 */
7612static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7613{
7614	struct XENA_dev_config __iomem *bar0 = nic->bar0;
7615	register u64 val64 = 0;
7616
7617	if (ds_codepoint > 63)
7618		return FAILURE;
7619
7620	val64 = RTS_DS_MEM_DATA(ring);
7621	writeq(val64, &bar0->rts_ds_mem_data);
7622
7623	val64 = RTS_DS_MEM_CTRL_WE |
7624		RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7625		RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7626
7627	writeq(val64, &bar0->rts_ds_mem_ctrl);
7628
7629	return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7630				     RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7631				     S2IO_BIT_RESET, true);
7632}
7633
7634static const struct net_device_ops s2io_netdev_ops = {
7635	.ndo_open	        = s2io_open,
7636	.ndo_stop	        = s2io_close,
7637	.ndo_get_stats	        = s2io_get_stats,
7638	.ndo_start_xmit    	= s2io_xmit,
7639	.ndo_validate_addr	= eth_validate_addr,
7640	.ndo_set_rx_mode	= s2io_ndo_set_multicast,
7641	.ndo_eth_ioctl		= s2io_ioctl,
7642	.ndo_set_mac_address    = s2io_set_mac_addr,
7643	.ndo_change_mtu	   	= s2io_change_mtu,
7644	.ndo_set_features	= s2io_set_features,
7645	.ndo_tx_timeout	   	= s2io_tx_watchdog,
7646#ifdef CONFIG_NET_POLL_CONTROLLER
7647	.ndo_poll_controller    = s2io_netpoll,
7648#endif
7649};
7650
7651/**
7652 *  s2io_init_nic - Initialization of the adapter .
7653 *  @pdev : structure containing the PCI related information of the device.
7654 *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7655 *  Description:
7656 *  The function initializes an adapter identified by the pci_dec structure.
7657 *  All OS related initialization including memory and device structure and
7658 *  initlaization of the device private variable is done. Also the swapper
7659 *  control register is initialized to enable read and write into the I/O
7660 *  registers of the device.
7661 *  Return value:
7662 *  returns 0 on success and negative on failure.
7663 */
7664
7665static int
7666s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7667{
7668	struct s2io_nic *sp;
7669	struct net_device *dev;
7670	int i, j, ret;
7671	u32 mac_up, mac_down;
7672	u64 val64 = 0, tmp64 = 0;
7673	struct XENA_dev_config __iomem *bar0 = NULL;
7674	u16 subid;
7675	struct config_param *config;
7676	struct mac_info *mac_control;
7677	int mode;
7678	u8 dev_intr_type = intr_type;
7679	u8 dev_multiq = 0;
7680
7681	ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7682	if (ret)
7683		return ret;
7684
7685	ret = pci_enable_device(pdev);
7686	if (ret) {
7687		DBG_PRINT(ERR_DBG,
7688			  "%s: pci_enable_device failed\n", __func__);
7689		return ret;
7690	}
7691
7692	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
7693		DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
7694	} else {
7695		pci_disable_device(pdev);
7696		return -ENOMEM;
7697	}
7698	ret = pci_request_regions(pdev, s2io_driver_name);
7699	if (ret) {
7700		DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x\n",
7701			  __func__, ret);
7702		pci_disable_device(pdev);
7703		return -ENODEV;
7704	}
7705	if (dev_multiq)
7706		dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7707	else
7708		dev = alloc_etherdev(sizeof(struct s2io_nic));
7709	if (dev == NULL) {
7710		pci_disable_device(pdev);
7711		pci_release_regions(pdev);
7712		return -ENODEV;
7713	}
7714
7715	pci_set_master(pdev);
7716	pci_set_drvdata(pdev, dev);
7717	SET_NETDEV_DEV(dev, &pdev->dev);
7718
7719	/*  Private member variable initialized to s2io NIC structure */
7720	sp = netdev_priv(dev);
7721	sp->dev = dev;
7722	sp->pdev = pdev;
7723	sp->device_enabled_once = false;
7724	if (rx_ring_mode == 1)
7725		sp->rxd_mode = RXD_MODE_1;
7726	if (rx_ring_mode == 2)
7727		sp->rxd_mode = RXD_MODE_3B;
7728
7729	sp->config.intr_type = dev_intr_type;
7730
7731	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7732	    (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7733		sp->device_type = XFRAME_II_DEVICE;
7734	else
7735		sp->device_type = XFRAME_I_DEVICE;
7736
7737
7738	/* Initialize some PCI/PCI-X fields of the NIC. */
7739	s2io_init_pci(sp);
7740
7741	/*
7742	 * Setting the device configuration parameters.
7743	 * Most of these parameters can be specified by the user during
7744	 * module insertion as they are module loadable parameters. If
7745	 * these parameters are not specified during load time, they
7746	 * are initialized with default values.
7747	 */
7748	config = &sp->config;
7749	mac_control = &sp->mac_control;
7750
7751	config->napi = napi;
7752	config->tx_steering_type = tx_steering_type;
7753
7754	/* Tx side parameters. */
7755	if (config->tx_steering_type == TX_PRIORITY_STEERING)
7756		config->tx_fifo_num = MAX_TX_FIFOS;
7757	else
7758		config->tx_fifo_num = tx_fifo_num;
7759
7760	/* Initialize the fifos used for tx steering */
7761	if (config->tx_fifo_num < 5) {
7762		if (config->tx_fifo_num  == 1)
7763			sp->total_tcp_fifos = 1;
7764		else
7765			sp->total_tcp_fifos = config->tx_fifo_num - 1;
7766		sp->udp_fifo_idx = config->tx_fifo_num - 1;
7767		sp->total_udp_fifos = 1;
7768		sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7769	} else {
7770		sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7771				       FIFO_OTHER_MAX_NUM);
7772		sp->udp_fifo_idx = sp->total_tcp_fifos;
7773		sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7774		sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7775	}
7776
7777	config->multiq = dev_multiq;
7778	for (i = 0; i < config->tx_fifo_num; i++) {
7779		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7780
7781		tx_cfg->fifo_len = tx_fifo_len[i];
7782		tx_cfg->fifo_priority = i;
7783	}
7784
7785	/* mapping the QoS priority to the configured fifos */
7786	for (i = 0; i < MAX_TX_FIFOS; i++)
7787		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7788
7789	/* map the hashing selector table to the configured fifos */
7790	for (i = 0; i < config->tx_fifo_num; i++)
7791		sp->fifo_selector[i] = fifo_selector[i];
7792
7793
7794	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7795	for (i = 0; i < config->tx_fifo_num; i++) {
7796		struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
7797
7798		tx_cfg->f_no_snoop = (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7799		if (tx_cfg->fifo_len < 65) {
7800			config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7801			break;
7802		}
7803	}
7804	/* + 2 because one Txd for skb->data and one Txd for UFO */
7805	config->max_txds = MAX_SKB_FRAGS + 2;
7806
7807	/* Rx side parameters. */
7808	config->rx_ring_num = rx_ring_num;
7809	for (i = 0; i < config->rx_ring_num; i++) {
7810		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7811		struct ring_info *ring = &mac_control->rings[i];
7812
7813		rx_cfg->num_rxd = rx_ring_sz[i] * (rxd_count[sp->rxd_mode] + 1);
7814		rx_cfg->ring_priority = i;
7815		ring->rx_bufs_left = 0;
7816		ring->rxd_mode = sp->rxd_mode;
7817		ring->rxd_count = rxd_count[sp->rxd_mode];
7818		ring->pdev = sp->pdev;
7819		ring->dev = sp->dev;
7820	}
7821
7822	for (i = 0; i < rx_ring_num; i++) {
7823		struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
7824
7825		rx_cfg->ring_org = RING_ORG_BUFF1;
7826		rx_cfg->f_no_snoop = (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7827	}
7828
7829	/*  Setting Mac Control parameters */
7830	mac_control->rmac_pause_time = rmac_pause_time;
7831	mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7832	mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7833
7834
7835	/*  initialize the shared memory used by the NIC and the host */
7836	if (init_shared_mem(sp)) {
7837		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", dev->name);
7838		ret = -ENOMEM;
7839		goto mem_alloc_failed;
7840	}
7841
7842	sp->bar0 = pci_ioremap_bar(pdev, 0);
7843	if (!sp->bar0) {
7844		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7845			  dev->name);
7846		ret = -ENOMEM;
7847		goto bar0_remap_failed;
7848	}
7849
7850	sp->bar1 = pci_ioremap_bar(pdev, 2);
7851	if (!sp->bar1) {
7852		DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7853			  dev->name);
7854		ret = -ENOMEM;
7855		goto bar1_remap_failed;
7856	}
7857
7858	/* Initializing the BAR1 address as the start of the FIFO pointer. */
7859	for (j = 0; j < MAX_TX_FIFOS; j++) {
7860		mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
7861	}
7862
7863	/*  Driver entry points */
7864	dev->netdev_ops = &s2io_netdev_ops;
7865	dev->ethtool_ops = &netdev_ethtool_ops;
7866	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
7867		NETIF_F_TSO | NETIF_F_TSO6 |
7868		NETIF_F_RXCSUM | NETIF_F_LRO;
7869	dev->features |= dev->hw_features |
7870		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
7871		NETIF_F_HIGHDMA;
7872	dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7873	INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7874	INIT_WORK(&sp->set_link_task, s2io_set_link);
7875
7876	pci_save_state(sp->pdev);
7877
7878	/* Setting swapper control on the NIC, for proper reset operation */
7879	if (s2io_set_swapper(sp)) {
7880		DBG_PRINT(ERR_DBG, "%s: swapper settings are wrong\n",
7881			  dev->name);
7882		ret = -EAGAIN;
7883		goto set_swap_failed;
7884	}
7885
7886	/* Verify if the Herc works on the slot its placed into */
7887	if (sp->device_type & XFRAME_II_DEVICE) {
7888		mode = s2io_verify_pci_mode(sp);
7889		if (mode < 0) {
7890			DBG_PRINT(ERR_DBG, "%s: Unsupported PCI bus mode\n",
7891				  __func__);
7892			ret = -EBADSLT;
7893			goto set_swap_failed;
7894		}
7895	}
7896
7897	if (sp->config.intr_type == MSI_X) {
7898		sp->num_entries = config->rx_ring_num + 1;
7899		ret = s2io_enable_msi_x(sp);
7900
7901		if (!ret) {
7902			ret = s2io_test_msi(sp);
7903			/* rollback MSI-X, will re-enable during add_isr() */
7904			remove_msix_isr(sp);
7905		}
7906		if (ret) {
7907
7908			DBG_PRINT(ERR_DBG,
7909				  "MSI-X requested but failed to enable\n");
7910			sp->config.intr_type = INTA;
7911		}
7912	}
7913
7914	if (config->intr_type ==  MSI_X) {
7915		for (i = 0; i < config->rx_ring_num ; i++) {
7916			struct ring_info *ring = &mac_control->rings[i];
7917
7918			netif_napi_add(dev, &ring->napi, s2io_poll_msix);
7919		}
7920	} else {
7921		netif_napi_add(dev, &sp->napi, s2io_poll_inta);
7922	}
7923
7924	/* Not needed for Herc */
7925	if (sp->device_type & XFRAME_I_DEVICE) {
7926		/*
7927		 * Fix for all "FFs" MAC address problems observed on
7928		 * Alpha platforms
7929		 */
7930		fix_mac_address(sp);
7931		s2io_reset(sp);
7932	}
7933
7934	/*
7935	 * MAC address initialization.
7936	 * For now only one mac address will be read and used.
7937	 */
7938	bar0 = sp->bar0;
7939	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7940		RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7941	writeq(val64, &bar0->rmac_addr_cmd_mem);
7942	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7943			      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
7944			      S2IO_BIT_RESET, true);
7945	tmp64 = readq(&bar0->rmac_addr_data0_mem);
7946	mac_down = (u32)tmp64;
7947	mac_up = (u32) (tmp64 >> 32);
7948
7949	sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7950	sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7951	sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7952	sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7953	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7954	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7955
7956	/*  Set the factory defined MAC address initially   */
7957	dev->addr_len = ETH_ALEN;
7958	eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
7959
7960	/* initialize number of multicast & unicast MAC entries variables */
7961	if (sp->device_type == XFRAME_I_DEVICE) {
7962		config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
7963		config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
7964		config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
7965	} else if (sp->device_type == XFRAME_II_DEVICE) {
7966		config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
7967		config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
7968		config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
7969	}
7970
7971	/* MTU range: 46 - 9600 */
7972	dev->min_mtu = MIN_MTU;
7973	dev->max_mtu = S2IO_JUMBO_SIZE;
7974
7975	/* store mac addresses from CAM to s2io_nic structure */
7976	do_s2io_store_unicast_mc(sp);
7977
7978	/* Configure MSIX vector for number of rings configured plus one */
7979	if ((sp->device_type == XFRAME_II_DEVICE) &&
7980	    (config->intr_type == MSI_X))
7981		sp->num_entries = config->rx_ring_num + 1;
7982
7983	/* Store the values of the MSIX table in the s2io_nic structure */
7984	store_xmsi_data(sp);
7985	/* reset Nic and bring it to known state */
7986	s2io_reset(sp);
7987
7988	/*
7989	 * Initialize link state flags
7990	 * and the card state parameter
7991	 */
7992	sp->state = 0;
7993
7994	/* Initialize spinlocks */
7995	for (i = 0; i < sp->config.tx_fifo_num; i++) {
7996		struct fifo_info *fifo = &mac_control->fifos[i];
7997
7998		spin_lock_init(&fifo->tx_lock);
7999	}
8000
8001	/*
8002	 * SXE-002: Configure link and activity LED to init state
8003	 * on driver load.
8004	 */
8005	subid = sp->pdev->subsystem_device;
8006	if ((subid & 0xFF) >= 0x07) {
8007		val64 = readq(&bar0->gpio_control);
8008		val64 |= 0x0000800000000000ULL;
8009		writeq(val64, &bar0->gpio_control);
8010		val64 = 0x0411040400000000ULL;
8011		writeq(val64, (void __iomem *)bar0 + 0x2700);
8012		val64 = readq(&bar0->gpio_control);
8013	}
8014
8015	sp->rx_csum = 1;	/* Rx chksum verify enabled by default */
8016
8017	if (register_netdev(dev)) {
8018		DBG_PRINT(ERR_DBG, "Device registration failed\n");
8019		ret = -ENODEV;
8020		goto register_failed;
8021	}
8022	s2io_vpd_read(sp);
8023	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2010 Exar Corp.\n");
8024	DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n", dev->name,
8025		  sp->product_name, pdev->revision);
8026	DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8027		  s2io_driver_version);
8028	DBG_PRINT(ERR_DBG, "%s: MAC Address: %pM\n", dev->name, dev->dev_addr);
8029	DBG_PRINT(ERR_DBG, "Serial number: %s\n", sp->serial_num);
8030	if (sp->device_type & XFRAME_II_DEVICE) {
8031		mode = s2io_print_pci_mode(sp);
8032		if (mode < 0) {
8033			ret = -EBADSLT;
8034			unregister_netdev(dev);
8035			goto set_swap_failed;
8036		}
8037	}
8038	switch (sp->rxd_mode) {
8039	case RXD_MODE_1:
8040		DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8041			  dev->name);
8042		break;
8043	case RXD_MODE_3B:
8044		DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8045			  dev->name);
8046		break;
8047	}
8048
8049	switch (sp->config.napi) {
8050	case 0:
8051		DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8052		break;
8053	case 1:
8054		DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8055		break;
8056	}
8057
8058	DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8059		  sp->config.tx_fifo_num);
8060
8061	DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8062		  sp->config.rx_ring_num);
8063
8064	switch (sp->config.intr_type) {
8065	case INTA:
8066		DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8067		break;
8068	case MSI_X:
8069		DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8070		break;
8071	}
8072	if (sp->config.multiq) {
8073		for (i = 0; i < sp->config.tx_fifo_num; i++) {
8074			struct fifo_info *fifo = &mac_control->fifos[i];
8075
8076			fifo->multiq = config->multiq;
8077		}
8078		DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8079			  dev->name);
8080	} else
8081		DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8082			  dev->name);
8083
8084	switch (sp->config.tx_steering_type) {
8085	case NO_STEERING:
8086		DBG_PRINT(ERR_DBG, "%s: No steering enabled for transmit\n",
8087			  dev->name);
8088		break;
8089	case TX_PRIORITY_STEERING:
8090		DBG_PRINT(ERR_DBG,
8091			  "%s: Priority steering enabled for transmit\n",
8092			  dev->name);
8093		break;
8094	case TX_DEFAULT_STEERING:
8095		DBG_PRINT(ERR_DBG,
8096			  "%s: Default steering enabled for transmit\n",
8097			  dev->name);
8098	}
8099
8100	DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8101		  dev->name);
8102	/* Initialize device name */
8103	snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
8104		 sp->product_name);
8105
8106	if (vlan_tag_strip)
8107		sp->vlan_strip_flag = 1;
8108	else
8109		sp->vlan_strip_flag = 0;
8110
8111	/*
8112	 * Make Link state as off at this point, when the Link change
8113	 * interrupt comes the state will be automatically changed to
8114	 * the right state.
8115	 */
8116	netif_carrier_off(dev);
8117
8118	return 0;
8119
8120register_failed:
8121set_swap_failed:
8122	iounmap(sp->bar1);
8123bar1_remap_failed:
8124	iounmap(sp->bar0);
8125bar0_remap_failed:
8126mem_alloc_failed:
8127	free_shared_mem(sp);
8128	pci_disable_device(pdev);
8129	pci_release_regions(pdev);
8130	free_netdev(dev);
8131
8132	return ret;
8133}
8134
8135/**
8136 * s2io_rem_nic - Free the PCI device
8137 * @pdev: structure containing the PCI related information of the device.
8138 * Description: This function is called by the Pci subsystem to release a
8139 * PCI device and free up all resource held up by the device. This could
8140 * be in response to a Hot plug event or when the driver is to be removed
8141 * from memory.
8142 */
8143
8144static void s2io_rem_nic(struct pci_dev *pdev)
8145{
8146	struct net_device *dev = pci_get_drvdata(pdev);
8147	struct s2io_nic *sp;
8148
8149	if (dev == NULL) {
8150		DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8151		return;
8152	}
8153
8154	sp = netdev_priv(dev);
8155
8156	cancel_work_sync(&sp->rst_timer_task);
8157	cancel_work_sync(&sp->set_link_task);
8158
8159	unregister_netdev(dev);
8160
8161	free_shared_mem(sp);
8162	iounmap(sp->bar0);
8163	iounmap(sp->bar1);
8164	pci_release_regions(pdev);
8165	free_netdev(dev);
8166	pci_disable_device(pdev);
8167}
8168
8169module_pci_driver(s2io_driver);
8170
8171static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8172				struct tcphdr **tcp, struct RxD_t *rxdp,
8173				struct s2io_nic *sp)
8174{
8175	int ip_off;
8176	u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8177
8178	if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8179		DBG_PRINT(INIT_DBG,
8180			  "%s: Non-TCP frames not supported for LRO\n",
8181			  __func__);
8182		return -1;
8183	}
8184
8185	/* Checking for DIX type or DIX type with VLAN */
8186	if ((l2_type == 0) || (l2_type == 4)) {
8187		ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8188		/*
8189		 * If vlan stripping is disabled and the frame is VLAN tagged,
8190		 * shift the offset by the VLAN header size bytes.
8191		 */
8192		if ((!sp->vlan_strip_flag) &&
8193		    (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8194			ip_off += HEADER_VLAN_SIZE;
8195	} else {
8196		/* LLC, SNAP etc are considered non-mergeable */
8197		return -1;
8198	}
8199
8200	*ip = (struct iphdr *)(buffer + ip_off);
8201	ip_len = (u8)((*ip)->ihl);
8202	ip_len <<= 2;
8203	*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8204
8205	return 0;
8206}
8207
8208static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8209				  struct tcphdr *tcp)
8210{
8211	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8212	if ((lro->iph->saddr != ip->saddr) ||
8213	    (lro->iph->daddr != ip->daddr) ||
8214	    (lro->tcph->source != tcp->source) ||
8215	    (lro->tcph->dest != tcp->dest))
8216		return -1;
8217	return 0;
8218}
8219
8220static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8221{
8222	return ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2);
8223}
8224
8225static void initiate_new_session(struct lro *lro, u8 *l2h,
8226				 struct iphdr *ip, struct tcphdr *tcp,
8227				 u32 tcp_pyld_len, u16 vlan_tag)
8228{
8229	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8230	lro->l2h = l2h;
8231	lro->iph = ip;
8232	lro->tcph = tcp;
8233	lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8234	lro->tcp_ack = tcp->ack_seq;
8235	lro->sg_num = 1;
8236	lro->total_len = ntohs(ip->tot_len);
8237	lro->frags_len = 0;
8238	lro->vlan_tag = vlan_tag;
8239	/*
8240	 * Check if we saw TCP timestamp.
8241	 * Other consistency checks have already been done.
8242	 */
8243	if (tcp->doff == 8) {
8244		__be32 *ptr;
8245		ptr = (__be32 *)(tcp+1);
8246		lro->saw_ts = 1;
8247		lro->cur_tsval = ntohl(*(ptr+1));
8248		lro->cur_tsecr = *(ptr+2);
8249	}
8250	lro->in_use = 1;
8251}
8252
8253static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8254{
8255	struct iphdr *ip = lro->iph;
8256	struct tcphdr *tcp = lro->tcph;
8257	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8258
8259	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8260
8261	/* Update L3 header */
8262	csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8263	ip->tot_len = htons(lro->total_len);
8264
8265	/* Update L4 header */
8266	tcp->ack_seq = lro->tcp_ack;
8267	tcp->window = lro->window;
8268
8269	/* Update tsecr field if this session has timestamps enabled */
8270	if (lro->saw_ts) {
8271		__be32 *ptr = (__be32 *)(tcp + 1);
8272		*(ptr+2) = lro->cur_tsecr;
8273	}
8274
8275	/* Update counters required for calculation of
8276	 * average no. of packets aggregated.
8277	 */
8278	swstats->sum_avg_pkts_aggregated += lro->sg_num;
8279	swstats->num_aggregations++;
8280}
8281
8282static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8283			     struct tcphdr *tcp, u32 l4_pyld)
8284{
8285	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8286	lro->total_len += l4_pyld;
8287	lro->frags_len += l4_pyld;
8288	lro->tcp_next_seq += l4_pyld;
8289	lro->sg_num++;
8290
8291	/* Update ack seq no. and window ad(from this pkt) in LRO object */
8292	lro->tcp_ack = tcp->ack_seq;
8293	lro->window = tcp->window;
8294
8295	if (lro->saw_ts) {
8296		__be32 *ptr;
8297		/* Update tsecr and tsval from this packet */
8298		ptr = (__be32 *)(tcp+1);
8299		lro->cur_tsval = ntohl(*(ptr+1));
8300		lro->cur_tsecr = *(ptr + 2);
8301	}
8302}
8303
8304static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8305				    struct tcphdr *tcp, u32 tcp_pyld_len)
8306{
8307	u8 *ptr;
8308
8309	DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8310
8311	if (!tcp_pyld_len) {
8312		/* Runt frame or a pure ack */
8313		return -1;
8314	}
8315
8316	if (ip->ihl != 5) /* IP has options */
8317		return -1;
8318
8319	/* If we see CE codepoint in IP header, packet is not mergeable */
8320	if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8321		return -1;
8322
8323	/* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8324	if (tcp->urg || tcp->psh || tcp->rst ||
8325	    tcp->syn || tcp->fin ||
8326	    tcp->ece || tcp->cwr || !tcp->ack) {
8327		/*
8328		 * Currently recognize only the ack control word and
8329		 * any other control field being set would result in
8330		 * flushing the LRO session
8331		 */
8332		return -1;
8333	}
8334
8335	/*
8336	 * Allow only one TCP timestamp option. Don't aggregate if
8337	 * any other options are detected.
8338	 */
8339	if (tcp->doff != 5 && tcp->doff != 8)
8340		return -1;
8341
8342	if (tcp->doff == 8) {
8343		ptr = (u8 *)(tcp + 1);
8344		while (*ptr == TCPOPT_NOP)
8345			ptr++;
8346		if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8347			return -1;
8348
8349		/* Ensure timestamp value increases monotonically */
8350		if (l_lro)
8351			if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8352				return -1;
8353
8354		/* timestamp echo reply should be non-zero */
8355		if (*((__be32 *)(ptr+6)) == 0)
8356			return -1;
8357	}
8358
8359	return 0;
8360}
8361
8362static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
8363				 u8 **tcp, u32 *tcp_len, struct lro **lro,
8364				 struct RxD_t *rxdp, struct s2io_nic *sp)
8365{
8366	struct iphdr *ip;
8367	struct tcphdr *tcph;
8368	int ret = 0, i;
8369	u16 vlan_tag = 0;
8370	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8371
8372	ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8373				   rxdp, sp);
8374	if (ret)
8375		return ret;
8376
8377	DBG_PRINT(INFO_DBG, "IP Saddr: %x Daddr: %x\n", ip->saddr, ip->daddr);
8378
8379	vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8380	tcph = (struct tcphdr *)*tcp;
8381	*tcp_len = get_l4_pyld_length(ip, tcph);
8382	for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8383		struct lro *l_lro = &ring_data->lro0_n[i];
8384		if (l_lro->in_use) {
8385			if (check_for_socket_match(l_lro, ip, tcph))
8386				continue;
8387			/* Sock pair matched */
8388			*lro = l_lro;
8389
8390			if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8391				DBG_PRINT(INFO_DBG, "%s: Out of sequence. "
8392					  "expected 0x%x, actual 0x%x\n",
8393					  __func__,
8394					  (*lro)->tcp_next_seq,
8395					  ntohl(tcph->seq));
8396
8397				swstats->outof_sequence_pkts++;
8398				ret = 2;
8399				break;
8400			}
8401
8402			if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,
8403						      *tcp_len))
8404				ret = 1; /* Aggregate */
8405			else
8406				ret = 2; /* Flush both */
8407			break;
8408		}
8409	}
8410
8411	if (ret == 0) {
8412		/* Before searching for available LRO objects,
8413		 * check if the pkt is L3/L4 aggregatable. If not
8414		 * don't create new LRO session. Just send this
8415		 * packet up.
8416		 */
8417		if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len))
8418			return 5;
8419
8420		for (i = 0; i < MAX_LRO_SESSIONS; i++) {
8421			struct lro *l_lro = &ring_data->lro0_n[i];
8422			if (!(l_lro->in_use)) {
8423				*lro = l_lro;
8424				ret = 3; /* Begin anew */
8425				break;
8426			}
8427		}
8428	}
8429
8430	if (ret == 0) { /* sessions exceeded */
8431		DBG_PRINT(INFO_DBG, "%s: All LRO sessions already in use\n",
8432			  __func__);
8433		*lro = NULL;
8434		return ret;
8435	}
8436
8437	switch (ret) {
8438	case 3:
8439		initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8440				     vlan_tag);
8441		break;
8442	case 2:
8443		update_L3L4_header(sp, *lro);
8444		break;
8445	case 1:
8446		aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8447		if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8448			update_L3L4_header(sp, *lro);
8449			ret = 4; /* Flush the LRO */
8450		}
8451		break;
8452	default:
8453		DBG_PRINT(ERR_DBG, "%s: Don't know, can't say!!\n", __func__);
8454		break;
8455	}
8456
8457	return ret;
8458}
8459
8460static void clear_lro_session(struct lro *lro)
8461{
8462	static u16 lro_struct_size = sizeof(struct lro);
8463
8464	memset(lro, 0, lro_struct_size);
8465}
8466
8467static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8468{
8469	struct net_device *dev = skb->dev;
8470	struct s2io_nic *sp = netdev_priv(dev);
8471
8472	skb->protocol = eth_type_trans(skb, dev);
8473	if (vlan_tag && sp->vlan_strip_flag)
8474		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
8475	if (sp->config.napi)
8476		netif_receive_skb(skb);
8477	else
8478		netif_rx(skb);
8479}
8480
8481static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8482			   struct sk_buff *skb, u32 tcp_len)
8483{
8484	struct sk_buff *first = lro->parent;
8485	struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8486
8487	first->len += tcp_len;
8488	first->data_len = lro->frags_len;
8489	skb_pull(skb, (skb->len - tcp_len));
8490	if (skb_shinfo(first)->frag_list)
8491		lro->last_frag->next = skb;
8492	else
8493		skb_shinfo(first)->frag_list = skb;
8494	first->truesize += skb->truesize;
8495	lro->last_frag = skb;
8496	swstats->clubbed_frms_cnt++;
8497}
8498
8499/**
8500 * s2io_io_error_detected - called when PCI error is detected
8501 * @pdev: Pointer to PCI device
8502 * @state: The current pci connection state
8503 *
8504 * This function is called after a PCI bus error affecting
8505 * this device has been detected.
8506 */
8507static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8508					       pci_channel_state_t state)
8509{
8510	struct net_device *netdev = pci_get_drvdata(pdev);
8511	struct s2io_nic *sp = netdev_priv(netdev);
8512
8513	netif_device_detach(netdev);
8514
8515	if (state == pci_channel_io_perm_failure)
8516		return PCI_ERS_RESULT_DISCONNECT;
8517
8518	if (netif_running(netdev)) {
8519		/* Bring down the card, while avoiding PCI I/O */
8520		do_s2io_card_down(sp, 0);
8521	}
8522	pci_disable_device(pdev);
8523
8524	return PCI_ERS_RESULT_NEED_RESET;
8525}
8526
8527/**
8528 * s2io_io_slot_reset - called after the pci bus has been reset.
8529 * @pdev: Pointer to PCI device
8530 *
8531 * Restart the card from scratch, as if from a cold-boot.
8532 * At this point, the card has exprienced a hard reset,
8533 * followed by fixups by BIOS, and has its config space
8534 * set up identically to what it was at cold boot.
8535 */
8536static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8537{
8538	struct net_device *netdev = pci_get_drvdata(pdev);
8539	struct s2io_nic *sp = netdev_priv(netdev);
8540
8541	if (pci_enable_device(pdev)) {
8542		pr_err("Cannot re-enable PCI device after reset.\n");
8543		return PCI_ERS_RESULT_DISCONNECT;
8544	}
8545
8546	pci_set_master(pdev);
8547	s2io_reset(sp);
8548
8549	return PCI_ERS_RESULT_RECOVERED;
8550}
8551
8552/**
8553 * s2io_io_resume - called when traffic can start flowing again.
8554 * @pdev: Pointer to PCI device
8555 *
8556 * This callback is called when the error recovery driver tells
8557 * us that its OK to resume normal operation.
8558 */
8559static void s2io_io_resume(struct pci_dev *pdev)
8560{
8561	struct net_device *netdev = pci_get_drvdata(pdev);
8562	struct s2io_nic *sp = netdev_priv(netdev);
8563
8564	if (netif_running(netdev)) {
8565		if (s2io_card_up(sp)) {
8566			pr_err("Can't bring device back up after reset.\n");
8567			return;
8568		}
8569
8570		if (do_s2io_prog_unicast(netdev, netdev->dev_addr) == FAILURE) {
8571			s2io_card_down(sp);
8572			pr_err("Can't restore mac addr after reset.\n");
8573			return;
8574		}
8575	}
8576
8577	netif_device_attach(netdev);
8578	netif_tx_wake_all_queues(netdev);
8579}