Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/****************************************************************************
   2 * Driver for Solarflare Solarstorm network controllers and boards
   3 * Copyright 2005-2006 Fen Systems Ltd.
   4 * Copyright 2006-2010 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/delay.h>
  13#include <linux/pci.h>
  14#include <linux/module.h>
  15#include <linux/seq_file.h>
  16#include <linux/i2c.h>
  17#include <linux/mii.h>
  18#include <linux/slab.h>
  19#include "net_driver.h"
  20#include "bitfield.h"
  21#include "efx.h"
  22#include "spi.h"
  23#include "nic.h"
  24#include "regs.h"
  25#include "io.h"
  26#include "phy.h"
  27#include "workarounds.h"
  28
  29/* Hardware control for SFC4000 (aka Falcon). */
  30
  31static const unsigned int
  32/* "Large" EEPROM device: Atmel AT25640 or similar
  33 * 8 KB, 16-bit address, 32 B write block */
  34large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
  35		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
  36		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
  37/* Default flash device: Atmel AT25F1024
  38 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
  39default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
  40		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
  41		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
  42		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
  43		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
  44
  45/**************************************************************************
  46 *
  47 * I2C bus - this is a bit-bashing interface using GPIO pins
  48 * Note that it uses the output enables to tristate the outputs
  49 * SDA is the data pin and SCL is the clock
  50 *
  51 **************************************************************************
  52 */
  53static void falcon_setsda(void *data, int state)
  54{
  55	struct efx_nic *efx = (struct efx_nic *)data;
  56	efx_oword_t reg;
  57
  58	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
  59	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
  60	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
  61}
  62
  63static void falcon_setscl(void *data, int state)
  64{
  65	struct efx_nic *efx = (struct efx_nic *)data;
  66	efx_oword_t reg;
  67
  68	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
  69	EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
  70	efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
  71}
  72
  73static int falcon_getsda(void *data)
  74{
  75	struct efx_nic *efx = (struct efx_nic *)data;
  76	efx_oword_t reg;
  77
  78	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
  79	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
  80}
  81
  82static int falcon_getscl(void *data)
  83{
  84	struct efx_nic *efx = (struct efx_nic *)data;
  85	efx_oword_t reg;
  86
  87	efx_reado(efx, &reg, FR_AB_GPIO_CTL);
  88	return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
  89}
  90
  91static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
  92	.setsda		= falcon_setsda,
  93	.setscl		= falcon_setscl,
  94	.getsda		= falcon_getsda,
  95	.getscl		= falcon_getscl,
  96	.udelay		= 5,
  97	/* Wait up to 50 ms for slave to let us pull SCL high */
  98	.timeout	= DIV_ROUND_UP(HZ, 20),
  99};
 100
 101static void falcon_push_irq_moderation(struct efx_channel *channel)
 102{
 103	efx_dword_t timer_cmd;
 104	struct efx_nic *efx = channel->efx;
 105
 106	/* Set timer register */
 107	if (channel->irq_moderation) {
 108		EFX_POPULATE_DWORD_2(timer_cmd,
 109				     FRF_AB_TC_TIMER_MODE,
 110				     FFE_BB_TIMER_MODE_INT_HLDOFF,
 111				     FRF_AB_TC_TIMER_VAL,
 112				     channel->irq_moderation - 1);
 113	} else {
 114		EFX_POPULATE_DWORD_2(timer_cmd,
 115				     FRF_AB_TC_TIMER_MODE,
 116				     FFE_BB_TIMER_MODE_DIS,
 117				     FRF_AB_TC_TIMER_VAL, 0);
 118	}
 119	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
 120	efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
 121			       channel->channel);
 122}
 123
 124static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
 125
 126static void falcon_prepare_flush(struct efx_nic *efx)
 127{
 128	falcon_deconfigure_mac_wrapper(efx);
 129
 130	/* Wait for the tx and rx fifo's to get to the next packet boundary
 131	 * (~1ms without back-pressure), then to drain the remainder of the
 132	 * fifo's at data path speeds (negligible), with a healthy margin. */
 133	msleep(10);
 134}
 135
 136/* Acknowledge a legacy interrupt from Falcon
 137 *
 138 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 139 *
 140 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 141 * BIU. Interrupt acknowledge is read sensitive so must write instead
 142 * (then read to ensure the BIU collector is flushed)
 143 *
 144 * NB most hardware supports MSI interrupts
 145 */
 146inline void falcon_irq_ack_a1(struct efx_nic *efx)
 147{
 148	efx_dword_t reg;
 149
 150	EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
 151	efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
 152	efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
 153}
 154
 155
 156irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 157{
 158	struct efx_nic *efx = dev_id;
 159	efx_oword_t *int_ker = efx->irq_status.addr;
 160	int syserr;
 161	int queues;
 162
 163	/* Check to see if this is our interrupt.  If it isn't, we
 164	 * exit without having touched the hardware.
 165	 */
 166	if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
 167		netif_vdbg(efx, intr, efx->net_dev,
 168			   "IRQ %d on CPU %d not for me\n", irq,
 169			   raw_smp_processor_id());
 170		return IRQ_NONE;
 171	}
 172	efx->last_irq_cpu = raw_smp_processor_id();
 173	netif_vdbg(efx, intr, efx->net_dev,
 174		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
 175		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
 176
 177	/* Check to see if we have a serious error condition */
 178	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 179	if (unlikely(syserr))
 180		return efx_nic_fatal_interrupt(efx);
 181
 182	/* Determine interrupting queues, clear interrupt status
 183	 * register and acknowledge the device interrupt.
 184	 */
 185	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
 186	queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
 187	EFX_ZERO_OWORD(*int_ker);
 188	wmb(); /* Ensure the vector is cleared before interrupt ack */
 189	falcon_irq_ack_a1(efx);
 190
 191	if (queues & 1)
 192		efx_schedule_channel_irq(efx_get_channel(efx, 0));
 193	if (queues & 2)
 194		efx_schedule_channel_irq(efx_get_channel(efx, 1));
 195	return IRQ_HANDLED;
 196}
 197/**************************************************************************
 198 *
 199 * EEPROM/flash
 200 *
 201 **************************************************************************
 202 */
 203
 204#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
 205
 206static int falcon_spi_poll(struct efx_nic *efx)
 207{
 208	efx_oword_t reg;
 209	efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
 210	return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
 211}
 212
 213/* Wait for SPI command completion */
 214static int falcon_spi_wait(struct efx_nic *efx)
 215{
 216	/* Most commands will finish quickly, so we start polling at
 217	 * very short intervals.  Sometimes the command may have to
 218	 * wait for VPD or expansion ROM access outside of our
 219	 * control, so we allow up to 100 ms. */
 220	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
 221	int i;
 222
 223	for (i = 0; i < 10; i++) {
 224		if (!falcon_spi_poll(efx))
 225			return 0;
 226		udelay(10);
 227	}
 228
 229	for (;;) {
 230		if (!falcon_spi_poll(efx))
 231			return 0;
 232		if (time_after_eq(jiffies, timeout)) {
 233			netif_err(efx, hw, efx->net_dev,
 234				  "timed out waiting for SPI\n");
 235			return -ETIMEDOUT;
 236		}
 237		schedule_timeout_uninterruptible(1);
 238	}
 239}
 240
 241int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
 242		   unsigned int command, int address,
 243		   const void *in, void *out, size_t len)
 244{
 245	bool addressed = (address >= 0);
 246	bool reading = (out != NULL);
 247	efx_oword_t reg;
 248	int rc;
 249
 250	/* Input validation */
 251	if (len > FALCON_SPI_MAX_LEN)
 252		return -EINVAL;
 253
 254	/* Check that previous command is not still running */
 255	rc = falcon_spi_poll(efx);
 256	if (rc)
 257		return rc;
 258
 259	/* Program address register, if we have an address */
 260	if (addressed) {
 261		EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
 262		efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
 263	}
 264
 265	/* Program data register, if we have data */
 266	if (in != NULL) {
 267		memcpy(&reg, in, len);
 268		efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
 269	}
 270
 271	/* Issue read/write command */
 272	EFX_POPULATE_OWORD_7(reg,
 273			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
 274			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
 275			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
 276			     FRF_AB_EE_SPI_HCMD_READ, reading,
 277			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
 278			     FRF_AB_EE_SPI_HCMD_ADBCNT,
 279			     (addressed ? spi->addr_len : 0),
 280			     FRF_AB_EE_SPI_HCMD_ENC, command);
 281	efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
 282
 283	/* Wait for read/write to complete */
 284	rc = falcon_spi_wait(efx);
 285	if (rc)
 286		return rc;
 287
 288	/* Read data */
 289	if (out != NULL) {
 290		efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
 291		memcpy(out, &reg, len);
 292	}
 293
 294	return 0;
 295}
 296
 297static size_t
 298falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
 299{
 300	return min(FALCON_SPI_MAX_LEN,
 301		   (spi->block_size - (start & (spi->block_size - 1))));
 302}
 303
 304static inline u8
 305efx_spi_munge_command(const struct efx_spi_device *spi,
 306		      const u8 command, const unsigned int address)
 307{
 308	return command | (((address >> 8) & spi->munge_address) << 3);
 309}
 310
 311/* Wait up to 10 ms for buffered write completion */
 312int
 313falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
 314{
 315	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
 316	u8 status;
 317	int rc;
 318
 319	for (;;) {
 320		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 321				    &status, sizeof(status));
 322		if (rc)
 323			return rc;
 324		if (!(status & SPI_STATUS_NRDY))
 325			return 0;
 326		if (time_after_eq(jiffies, timeout)) {
 327			netif_err(efx, hw, efx->net_dev,
 328				  "SPI write timeout on device %d"
 329				  " last status=0x%02x\n",
 330				  spi->device_id, status);
 331			return -ETIMEDOUT;
 332		}
 333		schedule_timeout_uninterruptible(1);
 334	}
 335}
 336
 337int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
 338		    loff_t start, size_t len, size_t *retlen, u8 *buffer)
 339{
 340	size_t block_len, pos = 0;
 341	unsigned int command;
 342	int rc = 0;
 343
 344	while (pos < len) {
 345		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
 346
 347		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
 348		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
 349				    buffer + pos, block_len);
 350		if (rc)
 351			break;
 352		pos += block_len;
 353
 354		/* Avoid locking up the system */
 355		cond_resched();
 356		if (signal_pending(current)) {
 357			rc = -EINTR;
 358			break;
 359		}
 360	}
 361
 362	if (retlen)
 363		*retlen = pos;
 364	return rc;
 365}
 366
 367int
 368falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
 369		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
 370{
 371	u8 verify_buffer[FALCON_SPI_MAX_LEN];
 372	size_t block_len, pos = 0;
 373	unsigned int command;
 374	int rc = 0;
 375
 376	while (pos < len) {
 377		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 378		if (rc)
 379			break;
 380
 381		block_len = min(len - pos,
 382				falcon_spi_write_limit(spi, start + pos));
 383		command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
 384		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 385				    buffer + pos, NULL, block_len);
 386		if (rc)
 387			break;
 388
 389		rc = falcon_spi_wait_write(efx, spi);
 390		if (rc)
 391			break;
 392
 393		command = efx_spi_munge_command(spi, SPI_READ, start + pos);
 394		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 395				    NULL, verify_buffer, block_len);
 396		if (memcmp(verify_buffer, buffer + pos, block_len)) {
 397			rc = -EIO;
 398			break;
 399		}
 400
 401		pos += block_len;
 402
 403		/* Avoid locking up the system */
 404		cond_resched();
 405		if (signal_pending(current)) {
 406			rc = -EINTR;
 407			break;
 408		}
 409	}
 410
 411	if (retlen)
 412		*retlen = pos;
 413	return rc;
 414}
 415
 416/**************************************************************************
 417 *
 418 * MAC wrapper
 419 *
 420 **************************************************************************
 421 */
 422
 423static void falcon_push_multicast_hash(struct efx_nic *efx)
 424{
 425	union efx_multicast_hash *mc_hash = &efx->multicast_hash;
 426
 427	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 428
 429	efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
 430	efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
 431}
 432
 433static void falcon_reset_macs(struct efx_nic *efx)
 434{
 435	struct falcon_nic_data *nic_data = efx->nic_data;
 436	efx_oword_t reg, mac_ctrl;
 437	int count;
 438
 439	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
 440		/* It's not safe to use GLB_CTL_REG to reset the
 441		 * macs, so instead use the internal MAC resets
 442		 */
 443		EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
 444		efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
 445
 446		for (count = 0; count < 10000; count++) {
 447			efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
 448			if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
 449			    0)
 450				return;
 451			udelay(10);
 452		}
 453
 454		netif_err(efx, hw, efx->net_dev,
 455			  "timed out waiting for XMAC core reset\n");
 456	}
 457
 458	/* Mac stats will fail whist the TX fifo is draining */
 459	WARN_ON(nic_data->stats_disable_count == 0);
 460
 461	efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
 462	EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
 463	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
 464
 465	efx_reado(efx, &reg, FR_AB_GLB_CTL);
 466	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
 467	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
 468	EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
 469	efx_writeo(efx, &reg, FR_AB_GLB_CTL);
 470
 471	count = 0;
 472	while (1) {
 473		efx_reado(efx, &reg, FR_AB_GLB_CTL);
 474		if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
 475		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
 476		    !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
 477			netif_dbg(efx, hw, efx->net_dev,
 478				  "Completed MAC reset after %d loops\n",
 479				  count);
 480			break;
 481		}
 482		if (count > 20) {
 483			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
 484			break;
 485		}
 486		count++;
 487		udelay(10);
 488	}
 489
 490	/* Ensure the correct MAC is selected before statistics
 491	 * are re-enabled by the caller */
 492	efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
 493
 494	falcon_setup_xaui(efx);
 495}
 496
 497void falcon_drain_tx_fifo(struct efx_nic *efx)
 498{
 499	efx_oword_t reg;
 500
 501	if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
 502	    (efx->loopback_mode != LOOPBACK_NONE))
 503		return;
 504
 505	efx_reado(efx, &reg, FR_AB_MAC_CTRL);
 506	/* There is no point in draining more than once */
 507	if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
 508		return;
 509
 510	falcon_reset_macs(efx);
 511}
 512
 513static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
 514{
 515	efx_oword_t reg;
 516
 517	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
 518		return;
 519
 520	/* Isolate the MAC -> RX */
 521	efx_reado(efx, &reg, FR_AZ_RX_CFG);
 522	EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
 523	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 524
 525	/* Isolate TX -> MAC */
 526	falcon_drain_tx_fifo(efx);
 527}
 528
 529void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
 530{
 531	struct efx_link_state *link_state = &efx->link_state;
 532	efx_oword_t reg;
 533	int link_speed, isolate;
 534
 535	isolate = !!ACCESS_ONCE(efx->reset_pending);
 536
 537	switch (link_state->speed) {
 538	case 10000: link_speed = 3; break;
 539	case 1000:  link_speed = 2; break;
 540	case 100:   link_speed = 1; break;
 541	default:    link_speed = 0; break;
 542	}
 543	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
 544	 * as advertised.  Disable to ensure packets are not
 545	 * indefinitely held and TX queue can be flushed at any point
 546	 * while the link is down. */
 547	EFX_POPULATE_OWORD_5(reg,
 548			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
 549			     FRF_AB_MAC_BCAD_ACPT, 1,
 550			     FRF_AB_MAC_UC_PROM, efx->promiscuous,
 551			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
 552			     FRF_AB_MAC_SPEED, link_speed);
 553	/* On B0, MAC backpressure can be disabled and packets get
 554	 * discarded. */
 555	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
 556		EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
 557				    !link_state->up || isolate);
 558	}
 559
 560	efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
 561
 562	/* Restore the multicast hash registers. */
 563	falcon_push_multicast_hash(efx);
 564
 565	efx_reado(efx, &reg, FR_AZ_RX_CFG);
 566	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
 567	 * initialisation but it may read back as 0) */
 568	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
 569	/* Unisolate the MAC -> RX */
 570	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
 571		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
 572	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
 573}
 574
 575static void falcon_stats_request(struct efx_nic *efx)
 576{
 577	struct falcon_nic_data *nic_data = efx->nic_data;
 578	efx_oword_t reg;
 579
 580	WARN_ON(nic_data->stats_pending);
 581	WARN_ON(nic_data->stats_disable_count);
 582
 583	if (nic_data->stats_dma_done == NULL)
 584		return;	/* no mac selected */
 585
 586	*nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
 587	nic_data->stats_pending = true;
 588	wmb(); /* ensure done flag is clear */
 589
 590	/* Initiate DMA transfer of stats */
 591	EFX_POPULATE_OWORD_2(reg,
 592			     FRF_AB_MAC_STAT_DMA_CMD, 1,
 593			     FRF_AB_MAC_STAT_DMA_ADR,
 594			     efx->stats_buffer.dma_addr);
 595	efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
 596
 597	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
 598}
 599
 600static void falcon_stats_complete(struct efx_nic *efx)
 601{
 602	struct falcon_nic_data *nic_data = efx->nic_data;
 603
 604	if (!nic_data->stats_pending)
 605		return;
 606
 607	nic_data->stats_pending = false;
 608	if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
 609		rmb(); /* read the done flag before the stats */
 610		falcon_update_stats_xmac(efx);
 611	} else {
 612		netif_err(efx, hw, efx->net_dev,
 613			  "timed out waiting for statistics\n");
 614	}
 615}
 616
 617static void falcon_stats_timer_func(unsigned long context)
 618{
 619	struct efx_nic *efx = (struct efx_nic *)context;
 620	struct falcon_nic_data *nic_data = efx->nic_data;
 621
 622	spin_lock(&efx->stats_lock);
 623
 624	falcon_stats_complete(efx);
 625	if (nic_data->stats_disable_count == 0)
 626		falcon_stats_request(efx);
 627
 628	spin_unlock(&efx->stats_lock);
 629}
 630
 631static bool falcon_loopback_link_poll(struct efx_nic *efx)
 632{
 633	struct efx_link_state old_state = efx->link_state;
 634
 635	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 636	WARN_ON(!LOOPBACK_INTERNAL(efx));
 637
 638	efx->link_state.fd = true;
 639	efx->link_state.fc = efx->wanted_fc;
 640	efx->link_state.up = true;
 641	efx->link_state.speed = 10000;
 642
 643	return !efx_link_state_equal(&efx->link_state, &old_state);
 644}
 645
 646static int falcon_reconfigure_port(struct efx_nic *efx)
 647{
 648	int rc;
 649
 650	WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
 651
 652	/* Poll the PHY link state *before* reconfiguring it. This means we
 653	 * will pick up the correct speed (in loopback) to select the correct
 654	 * MAC.
 655	 */
 656	if (LOOPBACK_INTERNAL(efx))
 657		falcon_loopback_link_poll(efx);
 658	else
 659		efx->phy_op->poll(efx);
 660
 661	falcon_stop_nic_stats(efx);
 662	falcon_deconfigure_mac_wrapper(efx);
 663
 664	falcon_reset_macs(efx);
 665
 666	efx->phy_op->reconfigure(efx);
 667	rc = falcon_reconfigure_xmac(efx);
 668	BUG_ON(rc);
 669
 670	falcon_start_nic_stats(efx);
 671
 672	/* Synchronise efx->link_state with the kernel */
 673	efx_link_status_changed(efx);
 674
 675	return 0;
 676}
 677
 678/**************************************************************************
 679 *
 680 * PHY access via GMII
 681 *
 682 **************************************************************************
 683 */
 684
 685/* Wait for GMII access to complete */
 686static int falcon_gmii_wait(struct efx_nic *efx)
 687{
 688	efx_oword_t md_stat;
 689	int count;
 690
 691	/* wait up to 50ms - taken max from datasheet */
 692	for (count = 0; count < 5000; count++) {
 693		efx_reado(efx, &md_stat, FR_AB_MD_STAT);
 694		if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
 695			if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
 696			    EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
 697				netif_err(efx, hw, efx->net_dev,
 698					  "error from GMII access "
 699					  EFX_OWORD_FMT"\n",
 700					  EFX_OWORD_VAL(md_stat));
 701				return -EIO;
 702			}
 703			return 0;
 704		}
 705		udelay(10);
 706	}
 707	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
 708	return -ETIMEDOUT;
 709}
 710
 711/* Write an MDIO register of a PHY connected to Falcon. */
 712static int falcon_mdio_write(struct net_device *net_dev,
 713			     int prtad, int devad, u16 addr, u16 value)
 714{
 715	struct efx_nic *efx = netdev_priv(net_dev);
 716	struct falcon_nic_data *nic_data = efx->nic_data;
 717	efx_oword_t reg;
 718	int rc;
 719
 720	netif_vdbg(efx, hw, efx->net_dev,
 721		   "writing MDIO %d register %d.%d with 0x%04x\n",
 722		    prtad, devad, addr, value);
 723
 724	mutex_lock(&nic_data->mdio_lock);
 725
 726	/* Check MDIO not currently being accessed */
 727	rc = falcon_gmii_wait(efx);
 728	if (rc)
 729		goto out;
 730
 731	/* Write the address/ID register */
 732	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
 733	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
 734
 735	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
 736			     FRF_AB_MD_DEV_ADR, devad);
 737	efx_writeo(efx, &reg, FR_AB_MD_ID);
 738
 739	/* Write data */
 740	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
 741	efx_writeo(efx, &reg, FR_AB_MD_TXD);
 742
 743	EFX_POPULATE_OWORD_2(reg,
 744			     FRF_AB_MD_WRC, 1,
 745			     FRF_AB_MD_GC, 0);
 746	efx_writeo(efx, &reg, FR_AB_MD_CS);
 747
 748	/* Wait for data to be written */
 749	rc = falcon_gmii_wait(efx);
 750	if (rc) {
 751		/* Abort the write operation */
 752		EFX_POPULATE_OWORD_2(reg,
 753				     FRF_AB_MD_WRC, 0,
 754				     FRF_AB_MD_GC, 1);
 755		efx_writeo(efx, &reg, FR_AB_MD_CS);
 756		udelay(10);
 757	}
 758
 759out:
 760	mutex_unlock(&nic_data->mdio_lock);
 761	return rc;
 762}
 763
 764/* Read an MDIO register of a PHY connected to Falcon. */
 765static int falcon_mdio_read(struct net_device *net_dev,
 766			    int prtad, int devad, u16 addr)
 767{
 768	struct efx_nic *efx = netdev_priv(net_dev);
 769	struct falcon_nic_data *nic_data = efx->nic_data;
 770	efx_oword_t reg;
 771	int rc;
 772
 773	mutex_lock(&nic_data->mdio_lock);
 774
 775	/* Check MDIO not currently being accessed */
 776	rc = falcon_gmii_wait(efx);
 777	if (rc)
 778		goto out;
 779
 780	EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
 781	efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
 782
 783	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
 784			     FRF_AB_MD_DEV_ADR, devad);
 785	efx_writeo(efx, &reg, FR_AB_MD_ID);
 786
 787	/* Request data to be read */
 788	EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
 789	efx_writeo(efx, &reg, FR_AB_MD_CS);
 790
 791	/* Wait for data to become available */
 792	rc = falcon_gmii_wait(efx);
 793	if (rc == 0) {
 794		efx_reado(efx, &reg, FR_AB_MD_RXD);
 795		rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
 796		netif_vdbg(efx, hw, efx->net_dev,
 797			   "read from MDIO %d register %d.%d, got %04x\n",
 798			   prtad, devad, addr, rc);
 799	} else {
 800		/* Abort the read operation */
 801		EFX_POPULATE_OWORD_2(reg,
 802				     FRF_AB_MD_RIC, 0,
 803				     FRF_AB_MD_GC, 1);
 804		efx_writeo(efx, &reg, FR_AB_MD_CS);
 805
 806		netif_dbg(efx, hw, efx->net_dev,
 807			  "read from MDIO %d register %d.%d, got error %d\n",
 808			  prtad, devad, addr, rc);
 809	}
 810
 811out:
 812	mutex_unlock(&nic_data->mdio_lock);
 813	return rc;
 814}
 815
 816/* This call is responsible for hooking in the MAC and PHY operations */
 817static int falcon_probe_port(struct efx_nic *efx)
 818{
 819	struct falcon_nic_data *nic_data = efx->nic_data;
 820	int rc;
 821
 822	switch (efx->phy_type) {
 823	case PHY_TYPE_SFX7101:
 824		efx->phy_op = &falcon_sfx7101_phy_ops;
 825		break;
 826	case PHY_TYPE_QT2022C2:
 827	case PHY_TYPE_QT2025C:
 828		efx->phy_op = &falcon_qt202x_phy_ops;
 829		break;
 830	case PHY_TYPE_TXC43128:
 831		efx->phy_op = &falcon_txc_phy_ops;
 832		break;
 833	default:
 834		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
 835			  efx->phy_type);
 836		return -ENODEV;
 837	}
 838
 839	/* Fill out MDIO structure and loopback modes */
 840	mutex_init(&nic_data->mdio_lock);
 841	efx->mdio.mdio_read = falcon_mdio_read;
 842	efx->mdio.mdio_write = falcon_mdio_write;
 843	rc = efx->phy_op->probe(efx);
 844	if (rc != 0)
 845		return rc;
 846
 847	/* Initial assumption */
 848	efx->link_state.speed = 10000;
 849	efx->link_state.fd = true;
 850
 851	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
 852	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
 853		efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
 854	else
 855		efx->wanted_fc = EFX_FC_RX;
 856	if (efx->mdio.mmds & MDIO_DEVS_AN)
 857		efx->wanted_fc |= EFX_FC_AUTO;
 858
 859	/* Allocate buffer for stats */
 860	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
 861				  FALCON_MAC_STATS_SIZE);
 862	if (rc)
 863		return rc;
 864	netif_dbg(efx, probe, efx->net_dev,
 865		  "stats buffer at %llx (virt %p phys %llx)\n",
 866		  (u64)efx->stats_buffer.dma_addr,
 867		  efx->stats_buffer.addr,
 868		  (u64)virt_to_phys(efx->stats_buffer.addr));
 869	nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
 870
 871	return 0;
 872}
 873
 874static void falcon_remove_port(struct efx_nic *efx)
 875{
 876	efx->phy_op->remove(efx);
 877	efx_nic_free_buffer(efx, &efx->stats_buffer);
 878}
 879
 880/* Global events are basically PHY events */
 881static bool
 882falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
 883{
 884	struct efx_nic *efx = channel->efx;
 885	struct falcon_nic_data *nic_data = efx->nic_data;
 886
 887	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
 888	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
 889	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
 890		/* Ignored */
 891		return true;
 892
 893	if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
 894	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
 895		nic_data->xmac_poll_required = true;
 896		return true;
 897	}
 898
 899	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
 900	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
 901	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
 902		netif_err(efx, rx_err, efx->net_dev,
 903			  "channel %d seen global RX_RESET event. Resetting.\n",
 904			  channel->channel);
 905
 906		atomic_inc(&efx->rx_reset);
 907		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
 908				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
 909		return true;
 910	}
 911
 912	return false;
 913}
 914
 915/**************************************************************************
 916 *
 917 * Falcon test code
 918 *
 919 **************************************************************************/
 920
 921static int
 922falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
 923{
 924	struct falcon_nic_data *nic_data = efx->nic_data;
 925	struct falcon_nvconfig *nvconfig;
 926	struct efx_spi_device *spi;
 927	void *region;
 928	int rc, magic_num, struct_ver;
 929	__le16 *word, *limit;
 930	u32 csum;
 931
 932	if (efx_spi_present(&nic_data->spi_flash))
 933		spi = &nic_data->spi_flash;
 934	else if (efx_spi_present(&nic_data->spi_eeprom))
 935		spi = &nic_data->spi_eeprom;
 936	else
 937		return -EINVAL;
 938
 939	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
 940	if (!region)
 941		return -ENOMEM;
 942	nvconfig = region + FALCON_NVCONFIG_OFFSET;
 943
 944	mutex_lock(&nic_data->spi_lock);
 945	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
 946	mutex_unlock(&nic_data->spi_lock);
 947	if (rc) {
 948		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
 949			  efx_spi_present(&nic_data->spi_flash) ?
 950			  "flash" : "EEPROM");
 951		rc = -EIO;
 952		goto out;
 953	}
 954
 955	magic_num = le16_to_cpu(nvconfig->board_magic_num);
 956	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
 957
 958	rc = -EINVAL;
 959	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
 960		netif_err(efx, hw, efx->net_dev,
 961			  "NVRAM bad magic 0x%x\n", magic_num);
 962		goto out;
 963	}
 964	if (struct_ver < 2) {
 965		netif_err(efx, hw, efx->net_dev,
 966			  "NVRAM has ancient version 0x%x\n", struct_ver);
 967		goto out;
 968	} else if (struct_ver < 4) {
 969		word = &nvconfig->board_magic_num;
 970		limit = (__le16 *) (nvconfig + 1);
 971	} else {
 972		word = region;
 973		limit = region + FALCON_NVCONFIG_END;
 974	}
 975	for (csum = 0; word < limit; ++word)
 976		csum += le16_to_cpu(*word);
 977
 978	if (~csum & 0xffff) {
 979		netif_err(efx, hw, efx->net_dev,
 980			  "NVRAM has incorrect checksum\n");
 981		goto out;
 982	}
 983
 984	rc = 0;
 985	if (nvconfig_out)
 986		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
 987
 988 out:
 989	kfree(region);
 990	return rc;
 991}
 992
 993static int falcon_test_nvram(struct efx_nic *efx)
 994{
 995	return falcon_read_nvram(efx, NULL);
 996}
 997
 998static const struct efx_nic_register_test falcon_b0_register_tests[] = {
 999	{ FR_AZ_ADR_REGION,
1000	  EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1001	{ FR_AZ_RX_CFG,
1002	  EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1003	{ FR_AZ_TX_CFG,
1004	  EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1005	{ FR_AZ_TX_RESERVED,
1006	  EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1007	{ FR_AB_MAC_CTRL,
1008	  EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1009	{ FR_AZ_SRM_TX_DC_CFG,
1010	  EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1011	{ FR_AZ_RX_DC_CFG,
1012	  EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1013	{ FR_AZ_RX_DC_PF_WM,
1014	  EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1015	{ FR_BZ_DP_CTRL,
1016	  EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1017	{ FR_AB_GM_CFG2,
1018	  EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1019	{ FR_AB_GMF_CFG0,
1020	  EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1021	{ FR_AB_XM_GLB_CFG,
1022	  EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1023	{ FR_AB_XM_TX_CFG,
1024	  EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1025	{ FR_AB_XM_RX_CFG,
1026	  EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1027	{ FR_AB_XM_RX_PARAM,
1028	  EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1029	{ FR_AB_XM_FC,
1030	  EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1031	{ FR_AB_XM_ADR_LO,
1032	  EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1033	{ FR_AB_XX_SD_CTL,
1034	  EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1035};
1036
1037static int falcon_b0_test_registers(struct efx_nic *efx)
1038{
1039	return efx_nic_test_registers(efx, falcon_b0_register_tests,
1040				      ARRAY_SIZE(falcon_b0_register_tests));
1041}
1042
1043/**************************************************************************
1044 *
1045 * Device reset
1046 *
1047 **************************************************************************
1048 */
1049
1050static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1051{
1052	switch (reason) {
1053	case RESET_TYPE_RX_RECOVERY:
1054	case RESET_TYPE_RX_DESC_FETCH:
1055	case RESET_TYPE_TX_DESC_FETCH:
1056	case RESET_TYPE_TX_SKIP:
1057		/* These can occasionally occur due to hardware bugs.
1058		 * We try to reset without disrupting the link.
1059		 */
1060		return RESET_TYPE_INVISIBLE;
1061	default:
1062		return RESET_TYPE_ALL;
1063	}
1064}
1065
1066static int falcon_map_reset_flags(u32 *flags)
1067{
1068	enum {
1069		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1070					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1071		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1072		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1073	};
1074
1075	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1076		*flags &= ~FALCON_RESET_WORLD;
1077		return RESET_TYPE_WORLD;
1078	}
1079
1080	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1081		*flags &= ~FALCON_RESET_ALL;
1082		return RESET_TYPE_ALL;
1083	}
1084
1085	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1086		*flags &= ~FALCON_RESET_INVISIBLE;
1087		return RESET_TYPE_INVISIBLE;
1088	}
1089
1090	return -EINVAL;
1091}
1092
1093/* Resets NIC to known state.  This routine must be called in process
1094 * context and is allowed to sleep. */
1095static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1096{
1097	struct falcon_nic_data *nic_data = efx->nic_data;
1098	efx_oword_t glb_ctl_reg_ker;
1099	int rc;
1100
1101	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1102		  RESET_TYPE(method));
1103
1104	/* Initiate device reset */
1105	if (method == RESET_TYPE_WORLD) {
1106		rc = pci_save_state(efx->pci_dev);
1107		if (rc) {
1108			netif_err(efx, drv, efx->net_dev,
1109				  "failed to backup PCI state of primary "
1110				  "function prior to hardware reset\n");
1111			goto fail1;
1112		}
1113		if (efx_nic_is_dual_func(efx)) {
1114			rc = pci_save_state(nic_data->pci_dev2);
1115			if (rc) {
1116				netif_err(efx, drv, efx->net_dev,
1117					  "failed to backup PCI state of "
1118					  "secondary function prior to "
1119					  "hardware reset\n");
1120				goto fail2;
1121			}
1122		}
1123
1124		EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1125				     FRF_AB_EXT_PHY_RST_DUR,
1126				     FFE_AB_EXT_PHY_RST_DUR_10240US,
1127				     FRF_AB_SWRST, 1);
1128	} else {
1129		EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1130				     /* exclude PHY from "invisible" reset */
1131				     FRF_AB_EXT_PHY_RST_CTL,
1132				     method == RESET_TYPE_INVISIBLE,
1133				     /* exclude EEPROM/flash and PCIe */
1134				     FRF_AB_PCIE_CORE_RST_CTL, 1,
1135				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
1136				     FRF_AB_PCIE_SD_RST_CTL, 1,
1137				     FRF_AB_EE_RST_CTL, 1,
1138				     FRF_AB_EXT_PHY_RST_DUR,
1139				     FFE_AB_EXT_PHY_RST_DUR_10240US,
1140				     FRF_AB_SWRST, 1);
1141	}
1142	efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1143
1144	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1145	schedule_timeout_uninterruptible(HZ / 20);
1146
1147	/* Restore PCI configuration if needed */
1148	if (method == RESET_TYPE_WORLD) {
1149		if (efx_nic_is_dual_func(efx))
1150			pci_restore_state(nic_data->pci_dev2);
1151		pci_restore_state(efx->pci_dev);
1152		netif_dbg(efx, drv, efx->net_dev,
1153			  "successfully restored PCI config\n");
1154	}
1155
1156	/* Assert that reset complete */
1157	efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1158	if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1159		rc = -ETIMEDOUT;
1160		netif_err(efx, hw, efx->net_dev,
1161			  "timed out waiting for hardware reset\n");
1162		goto fail3;
1163	}
1164	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1165
1166	return 0;
1167
1168	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
1169fail2:
1170	pci_restore_state(efx->pci_dev);
1171fail1:
1172fail3:
1173	return rc;
1174}
1175
1176static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1177{
1178	struct falcon_nic_data *nic_data = efx->nic_data;
1179	int rc;
1180
1181	mutex_lock(&nic_data->spi_lock);
1182	rc = __falcon_reset_hw(efx, method);
1183	mutex_unlock(&nic_data->spi_lock);
1184
1185	return rc;
1186}
1187
1188static void falcon_monitor(struct efx_nic *efx)
1189{
1190	bool link_changed;
1191	int rc;
1192
1193	BUG_ON(!mutex_is_locked(&efx->mac_lock));
1194
1195	rc = falcon_board(efx)->type->monitor(efx);
1196	if (rc) {
1197		netif_err(efx, hw, efx->net_dev,
1198			  "Board sensor %s; shutting down PHY\n",
1199			  (rc == -ERANGE) ? "reported fault" : "failed");
1200		efx->phy_mode |= PHY_MODE_LOW_POWER;
1201		rc = __efx_reconfigure_port(efx);
1202		WARN_ON(rc);
1203	}
1204
1205	if (LOOPBACK_INTERNAL(efx))
1206		link_changed = falcon_loopback_link_poll(efx);
1207	else
1208		link_changed = efx->phy_op->poll(efx);
1209
1210	if (link_changed) {
1211		falcon_stop_nic_stats(efx);
1212		falcon_deconfigure_mac_wrapper(efx);
1213
1214		falcon_reset_macs(efx);
1215		rc = falcon_reconfigure_xmac(efx);
1216		BUG_ON(rc);
1217
1218		falcon_start_nic_stats(efx);
1219
1220		efx_link_status_changed(efx);
1221	}
1222
1223	falcon_poll_xmac(efx);
1224}
1225
1226/* Zeroes out the SRAM contents.  This routine must be called in
1227 * process context and is allowed to sleep.
1228 */
1229static int falcon_reset_sram(struct efx_nic *efx)
1230{
1231	efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
1232	int count;
1233
1234	/* Set the SRAM wake/sleep GPIO appropriately. */
1235	efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1236	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
1237	EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1238	efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1239
1240	/* Initiate SRAM reset */
1241	EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1242			     FRF_AZ_SRM_INIT_EN, 1,
1243			     FRF_AZ_SRM_NB_SZ, 0);
1244	efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1245
1246	/* Wait for SRAM reset to complete */
1247	count = 0;
1248	do {
1249		netif_dbg(efx, hw, efx->net_dev,
1250			  "waiting for SRAM reset (attempt %d)...\n", count);
1251
1252		/* SRAM reset is slow; expect around 16ms */
1253		schedule_timeout_uninterruptible(HZ / 50);
1254
1255		/* Check for reset complete */
1256		efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1257		if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1258			netif_dbg(efx, hw, efx->net_dev,
1259				  "SRAM reset complete\n");
1260
1261			return 0;
1262		}
1263	} while (++count < 20);	/* wait up to 0.4 sec */
1264
1265	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1266	return -ETIMEDOUT;
1267}
1268
1269static void falcon_spi_device_init(struct efx_nic *efx,
1270				  struct efx_spi_device *spi_device,
1271				  unsigned int device_id, u32 device_type)
1272{
1273	if (device_type != 0) {
1274		spi_device->device_id = device_id;
1275		spi_device->size =
1276			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
1277		spi_device->addr_len =
1278			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
1279		spi_device->munge_address = (spi_device->size == 1 << 9 &&
1280					     spi_device->addr_len == 1);
1281		spi_device->erase_command =
1282			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
1283		spi_device->erase_size =
1284			1 << SPI_DEV_TYPE_FIELD(device_type,
1285						SPI_DEV_TYPE_ERASE_SIZE);
1286		spi_device->block_size =
1287			1 << SPI_DEV_TYPE_FIELD(device_type,
1288						SPI_DEV_TYPE_BLOCK_SIZE);
1289	} else {
1290		spi_device->size = 0;
1291	}
1292}
1293
1294/* Extract non-volatile configuration */
1295static int falcon_probe_nvconfig(struct efx_nic *efx)
1296{
1297	struct falcon_nic_data *nic_data = efx->nic_data;
1298	struct falcon_nvconfig *nvconfig;
1299	int rc;
1300
1301	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1302	if (!nvconfig)
1303		return -ENOMEM;
1304
1305	rc = falcon_read_nvram(efx, nvconfig);
1306	if (rc)
1307		goto out;
1308
1309	efx->phy_type = nvconfig->board_v2.port0_phy_type;
1310	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1311
1312	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1313		falcon_spi_device_init(
1314			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1315			le32_to_cpu(nvconfig->board_v3
1316				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1317		falcon_spi_device_init(
1318			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1319			le32_to_cpu(nvconfig->board_v3
1320				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1321	}
1322
1323	/* Read the MAC addresses */
1324	memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1325
1326	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1327		  efx->phy_type, efx->mdio.prtad);
1328
1329	rc = falcon_probe_board(efx,
1330				le16_to_cpu(nvconfig->board_v2.board_revision));
1331out:
1332	kfree(nvconfig);
1333	return rc;
1334}
1335
1336static void falcon_dimension_resources(struct efx_nic *efx)
1337{
1338	efx->rx_dc_base = 0x20000;
1339	efx->tx_dc_base = 0x26000;
1340}
1341
1342/* Probe all SPI devices on the NIC */
1343static void falcon_probe_spi_devices(struct efx_nic *efx)
1344{
1345	struct falcon_nic_data *nic_data = efx->nic_data;
1346	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1347	int boot_dev;
1348
1349	efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
1350	efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1351	efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1352
1353	if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1354		boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1355			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1356		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
1357			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
1358			  "flash" : "EEPROM");
1359	} else {
1360		/* Disable VPD and set clock dividers to safe
1361		 * values for initial programming. */
1362		boot_dev = -1;
1363		netif_dbg(efx, probe, efx->net_dev,
1364			  "Booted from internal ASIC settings;"
1365			  " setting SPI config\n");
1366		EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1367				     /* 125 MHz / 7 ~= 20 MHz */
1368				     FRF_AB_EE_SF_CLOCK_DIV, 7,
1369				     /* 125 MHz / 63 ~= 2 MHz */
1370				     FRF_AB_EE_EE_CLOCK_DIV, 63);
1371		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1372	}
1373
1374	mutex_init(&nic_data->spi_lock);
1375
1376	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1377		falcon_spi_device_init(efx, &nic_data->spi_flash,
1378				       FFE_AB_SPI_DEVICE_FLASH,
1379				       default_flash_type);
1380	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1381		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1382				       FFE_AB_SPI_DEVICE_EEPROM,
1383				       large_eeprom_type);
1384}
1385
1386static int falcon_probe_nic(struct efx_nic *efx)
1387{
1388	struct falcon_nic_data *nic_data;
1389	struct falcon_board *board;
1390	int rc;
1391
1392	/* Allocate storage for hardware specific data */
1393	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1394	if (!nic_data)
1395		return -ENOMEM;
1396	efx->nic_data = nic_data;
1397
1398	rc = -ENODEV;
1399
1400	if (efx_nic_fpga_ver(efx) != 0) {
1401		netif_err(efx, probe, efx->net_dev,
1402			  "Falcon FPGA not supported\n");
1403		goto fail1;
1404	}
1405
1406	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1407		efx_oword_t nic_stat;
1408		struct pci_dev *dev;
1409		u8 pci_rev = efx->pci_dev->revision;
1410
1411		if ((pci_rev == 0xff) || (pci_rev == 0)) {
1412			netif_err(efx, probe, efx->net_dev,
1413				  "Falcon rev A0 not supported\n");
1414			goto fail1;
1415		}
1416		efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1417		if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
1418			netif_err(efx, probe, efx->net_dev,
1419				  "Falcon rev A1 1G not supported\n");
1420			goto fail1;
1421		}
1422		if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
1423			netif_err(efx, probe, efx->net_dev,
1424				  "Falcon rev A1 PCI-X not supported\n");
1425			goto fail1;
1426		}
1427
1428		dev = pci_dev_get(efx->pci_dev);
1429		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
1430					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
1431					     dev))) {
1432			if (dev->bus == efx->pci_dev->bus &&
1433			    dev->devfn == efx->pci_dev->devfn + 1) {
1434				nic_data->pci_dev2 = dev;
1435				break;
1436			}
1437		}
1438		if (!nic_data->pci_dev2) {
1439			netif_err(efx, probe, efx->net_dev,
1440				  "failed to find secondary function\n");
1441			rc = -ENODEV;
1442			goto fail2;
1443		}
1444	}
1445
1446	/* Now we can reset the NIC */
1447	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
1448	if (rc) {
1449		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1450		goto fail3;
1451	}
1452
1453	/* Allocate memory for INT_KER */
1454	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
1455	if (rc)
1456		goto fail4;
1457	BUG_ON(efx->irq_status.dma_addr & 0x0f);
1458
1459	netif_dbg(efx, probe, efx->net_dev,
1460		  "INT_KER at %llx (virt %p phys %llx)\n",
1461		  (u64)efx->irq_status.dma_addr,
1462		  efx->irq_status.addr,
1463		  (u64)virt_to_phys(efx->irq_status.addr));
1464
1465	falcon_probe_spi_devices(efx);
1466
1467	/* Read in the non-volatile configuration */
1468	rc = falcon_probe_nvconfig(efx);
1469	if (rc) {
1470		if (rc == -EINVAL)
1471			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
1472		goto fail5;
1473	}
1474
1475	efx->timer_quantum_ns = 4968; /* 621 cycles */
1476
1477	/* Initialise I2C adapter */
1478	board = falcon_board(efx);
1479	board->i2c_adap.owner = THIS_MODULE;
1480	board->i2c_data = falcon_i2c_bit_operations;
1481	board->i2c_data.data = efx;
1482	board->i2c_adap.algo_data = &board->i2c_data;
1483	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
1484	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
1485		sizeof(board->i2c_adap.name));
1486	rc = i2c_bit_add_bus(&board->i2c_adap);
1487	if (rc)
1488		goto fail5;
1489
1490	rc = falcon_board(efx)->type->init(efx);
1491	if (rc) {
1492		netif_err(efx, probe, efx->net_dev,
1493			  "failed to initialise board\n");
1494		goto fail6;
1495	}
1496
1497	nic_data->stats_disable_count = 1;
1498	setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
1499		    (unsigned long)efx);
1500
1501	return 0;
1502
1503 fail6:
1504	BUG_ON(i2c_del_adapter(&board->i2c_adap));
1505	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1506 fail5:
1507	efx_nic_free_buffer(efx, &efx->irq_status);
1508 fail4:
1509 fail3:
1510	if (nic_data->pci_dev2) {
1511		pci_dev_put(nic_data->pci_dev2);
1512		nic_data->pci_dev2 = NULL;
1513	}
1514 fail2:
1515 fail1:
1516	kfree(efx->nic_data);
1517	return rc;
1518}
1519
1520static void falcon_init_rx_cfg(struct efx_nic *efx)
1521{
1522	/* Prior to Siena the RX DMA engine will split each frame at
1523	 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
1524	 * be so large that that never happens. */
1525	const unsigned huge_buf_size = (3 * 4096) >> 5;
1526	/* RX control FIFO thresholds (32 entries) */
1527	const unsigned ctrl_xon_thr = 20;
1528	const unsigned ctrl_xoff_thr = 25;
1529	efx_oword_t reg;
1530
1531	efx_reado(efx, &reg, FR_AZ_RX_CFG);
1532	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1533		/* Data FIFO size is 5.5K */
1534		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1535		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1536				    huge_buf_size);
1537		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1538		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1539		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
1540		EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
1541	} else {
1542		/* Data FIFO size is 80K; register fields moved */
1543		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1544		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1545				    huge_buf_size);
1546		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
1547		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1548		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
1549		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
1550		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
1551		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
1552
1553		/* Enable hash insertion. This is broken for the
1554		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
1555		 * IPv4 hashes. */
1556		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
1557		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
1558		EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
1559	}
1560	/* Always enable XOFF signal from RX FIFO.  We enable
1561	 * or disable transmission of pause frames at the MAC. */
1562	EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1563	efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1564}
1565
1566/* This call performs hardware-specific global initialisation, such as
1567 * defining the descriptor cache sizes and number of RSS channels.
1568 * It does not set up any buffers, descriptor rings or event queues.
1569 */
1570static int falcon_init_nic(struct efx_nic *efx)
1571{
1572	efx_oword_t temp;
1573	int rc;
1574
1575	/* Use on-chip SRAM */
1576	efx_reado(efx, &temp, FR_AB_NIC_STAT);
1577	EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
1578	efx_writeo(efx, &temp, FR_AB_NIC_STAT);
1579
1580	rc = falcon_reset_sram(efx);
1581	if (rc)
1582		return rc;
1583
1584	/* Clear the parity enables on the TX data fifos as
1585	 * they produce false parity errors because of timing issues
1586	 */
1587	if (EFX_WORKAROUND_5129(efx)) {
1588		efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
1589		EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
1590		efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
1591	}
1592
1593	if (EFX_WORKAROUND_7244(efx)) {
1594		efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
1595		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
1596		EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
1597		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
1598		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
1599		efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
1600	}
1601
1602	/* XXX This is documented only for Falcon A0/A1 */
1603	/* Setup RX.  Wait for descriptor is broken and must
1604	 * be disabled.  RXDP recovery shouldn't be needed, but is.
1605	 */
1606	efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
1607	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
1608	EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
1609	if (EFX_WORKAROUND_5583(efx))
1610		EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
1611	efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
1612
1613	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
1614	 * descriptors (which is bad).
1615	 */
1616	efx_reado(efx, &temp, FR_AZ_TX_CFG);
1617	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
1618	efx_writeo(efx, &temp, FR_AZ_TX_CFG);
1619
1620	falcon_init_rx_cfg(efx);
1621
1622	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1623		/* Set hash key for IPv4 */
1624		memcpy(&temp, efx->rx_hash_key, sizeof(temp));
1625		efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
1626
1627		/* Set destination of both TX and RX Flush events */
1628		EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
1629		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
1630	}
1631
1632	efx_nic_init_common(efx);
1633
1634	return 0;
1635}
1636
1637static void falcon_remove_nic(struct efx_nic *efx)
1638{
1639	struct falcon_nic_data *nic_data = efx->nic_data;
1640	struct falcon_board *board = falcon_board(efx);
1641	int rc;
1642
1643	board->type->fini(efx);
1644
1645	/* Remove I2C adapter and clear it in preparation for a retry */
1646	rc = i2c_del_adapter(&board->i2c_adap);
1647	BUG_ON(rc);
1648	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1649
1650	efx_nic_free_buffer(efx, &efx->irq_status);
1651
1652	__falcon_reset_hw(efx, RESET_TYPE_ALL);
1653
1654	/* Release the second function after the reset */
1655	if (nic_data->pci_dev2) {
1656		pci_dev_put(nic_data->pci_dev2);
1657		nic_data->pci_dev2 = NULL;
1658	}
1659
1660	/* Tear down the private nic state */
1661	kfree(efx->nic_data);
1662	efx->nic_data = NULL;
1663}
1664
1665static void falcon_update_nic_stats(struct efx_nic *efx)
1666{
1667	struct falcon_nic_data *nic_data = efx->nic_data;
1668	efx_oword_t cnt;
1669
1670	if (nic_data->stats_disable_count)
1671		return;
1672
1673	efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
1674	efx->n_rx_nodesc_drop_cnt +=
1675		EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
1676
1677	if (nic_data->stats_pending &&
1678	    *nic_data->stats_dma_done == FALCON_STATS_DONE) {
1679		nic_data->stats_pending = false;
1680		rmb(); /* read the done flag before the stats */
1681		falcon_update_stats_xmac(efx);
1682	}
1683}
1684
1685void falcon_start_nic_stats(struct efx_nic *efx)
1686{
1687	struct falcon_nic_data *nic_data = efx->nic_data;
1688
1689	spin_lock_bh(&efx->stats_lock);
1690	if (--nic_data->stats_disable_count == 0)
1691		falcon_stats_request(efx);
1692	spin_unlock_bh(&efx->stats_lock);
1693}
1694
1695void falcon_stop_nic_stats(struct efx_nic *efx)
1696{
1697	struct falcon_nic_data *nic_data = efx->nic_data;
1698	int i;
1699
1700	might_sleep();
1701
1702	spin_lock_bh(&efx->stats_lock);
1703	++nic_data->stats_disable_count;
1704	spin_unlock_bh(&efx->stats_lock);
1705
1706	del_timer_sync(&nic_data->stats_timer);
1707
1708	/* Wait enough time for the most recent transfer to
1709	 * complete. */
1710	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
1711		if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
1712			break;
1713		msleep(1);
1714	}
1715
1716	spin_lock_bh(&efx->stats_lock);
1717	falcon_stats_complete(efx);
1718	spin_unlock_bh(&efx->stats_lock);
1719}
1720
1721static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1722{
1723	falcon_board(efx)->type->set_id_led(efx, mode);
1724}
1725
1726/**************************************************************************
1727 *
1728 * Wake on LAN
1729 *
1730 **************************************************************************
1731 */
1732
1733static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1734{
1735	wol->supported = 0;
1736	wol->wolopts = 0;
1737	memset(&wol->sopass, 0, sizeof(wol->sopass));
1738}
1739
1740static int falcon_set_wol(struct efx_nic *efx, u32 type)
1741{
1742	if (type != 0)
1743		return -EINVAL;
1744	return 0;
1745}
1746
1747/**************************************************************************
1748 *
1749 * Revision-dependent attributes used by efx.c and nic.c
1750 *
1751 **************************************************************************
1752 */
1753
1754const struct efx_nic_type falcon_a1_nic_type = {
1755	.probe = falcon_probe_nic,
1756	.remove = falcon_remove_nic,
1757	.init = falcon_init_nic,
1758	.dimension_resources = falcon_dimension_resources,
1759	.fini = efx_port_dummy_op_void,
1760	.monitor = falcon_monitor,
1761	.map_reset_reason = falcon_map_reset_reason,
1762	.map_reset_flags = falcon_map_reset_flags,
1763	.reset = falcon_reset_hw,
1764	.probe_port = falcon_probe_port,
1765	.remove_port = falcon_remove_port,
1766	.handle_global_event = falcon_handle_global_event,
1767	.prepare_flush = falcon_prepare_flush,
1768	.update_stats = falcon_update_nic_stats,
1769	.start_stats = falcon_start_nic_stats,
1770	.stop_stats = falcon_stop_nic_stats,
1771	.set_id_led = falcon_set_id_led,
1772	.push_irq_moderation = falcon_push_irq_moderation,
1773	.reconfigure_port = falcon_reconfigure_port,
1774	.reconfigure_mac = falcon_reconfigure_xmac,
1775	.check_mac_fault = falcon_xmac_check_fault,
1776	.get_wol = falcon_get_wol,
1777	.set_wol = falcon_set_wol,
1778	.resume_wol = efx_port_dummy_op_void,
1779	.test_nvram = falcon_test_nvram,
1780
1781	.revision = EFX_REV_FALCON_A1,
1782	.mem_map_size = 0x20000,
1783	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
1784	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
1785	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
1786	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
1787	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
1788	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1789	.rx_buffer_padding = 0x24,
1790	.max_interrupt_mode = EFX_INT_MODE_MSI,
1791	.phys_addr_channels = 4,
1792	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1793	.offload_features = NETIF_F_IP_CSUM,
1794};
1795
1796const struct efx_nic_type falcon_b0_nic_type = {
1797	.probe = falcon_probe_nic,
1798	.remove = falcon_remove_nic,
1799	.init = falcon_init_nic,
1800	.dimension_resources = falcon_dimension_resources,
1801	.fini = efx_port_dummy_op_void,
1802	.monitor = falcon_monitor,
1803	.map_reset_reason = falcon_map_reset_reason,
1804	.map_reset_flags = falcon_map_reset_flags,
1805	.reset = falcon_reset_hw,
1806	.probe_port = falcon_probe_port,
1807	.remove_port = falcon_remove_port,
1808	.handle_global_event = falcon_handle_global_event,
1809	.prepare_flush = falcon_prepare_flush,
1810	.update_stats = falcon_update_nic_stats,
1811	.start_stats = falcon_start_nic_stats,
1812	.stop_stats = falcon_stop_nic_stats,
1813	.set_id_led = falcon_set_id_led,
1814	.push_irq_moderation = falcon_push_irq_moderation,
1815	.reconfigure_port = falcon_reconfigure_port,
1816	.reconfigure_mac = falcon_reconfigure_xmac,
1817	.check_mac_fault = falcon_xmac_check_fault,
1818	.get_wol = falcon_get_wol,
1819	.set_wol = falcon_set_wol,
1820	.resume_wol = efx_port_dummy_op_void,
1821	.test_registers = falcon_b0_test_registers,
1822	.test_nvram = falcon_test_nvram,
1823
1824	.revision = EFX_REV_FALCON_B0,
1825	/* Map everything up to and including the RSS indirection
1826	 * table.  Don't map MSI-X table, MSI-X PBA since Linux
1827	 * requires that they not be mapped.  */
1828	.mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
1829			 FR_BZ_RX_INDIRECTION_TBL_STEP *
1830			 FR_BZ_RX_INDIRECTION_TBL_ROWS),
1831	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
1832	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
1833	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
1834	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
1835	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
1836	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1837	.rx_buffer_hash_size = 0x10,
1838	.rx_buffer_padding = 0,
1839	.max_interrupt_mode = EFX_INT_MODE_MSIX,
1840	.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1841				   * interrupt handler only supports 32
1842				   * channels */
1843	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
1844	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
1845};
1846