Linux Audio

Check our new training course

Loading...
v4.17
 
   1/****************************************************************************
   2 * Driver for Solarflare network controllers and boards
   3 * Copyright 2005-2006 Fen Systems Ltd.
   4 * Copyright 2006-2013 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include <linux/bitops.h>
  12#include <linux/delay.h>
  13#include <linux/pci.h>
  14#include <linux/module.h>
  15#include <linux/seq_file.h>
  16#include <linux/i2c.h>
  17#include <linux/mii.h>
  18#include <linux/slab.h>
  19#include <linux/sched/signal.h>
  20
  21#include "net_driver.h"
  22#include "bitfield.h"
  23#include "efx.h"
  24#include "nic.h"
  25#include "farch_regs.h"
  26#include "io.h"
  27#include "phy.h"
  28#include "workarounds.h"
  29#include "selftest.h"
  30#include "mdio_10g.h"
  31
  32/* Hardware control for SFC4000 (aka Falcon). */
  33
  34/**************************************************************************
  35 *
  36 * NIC stats
  37 *
  38 **************************************************************************
  39 */
  40
  41#define FALCON_MAC_STATS_SIZE 0x100
  42
  43#define XgRxOctets_offset 0x0
  44#define XgRxOctets_WIDTH 48
  45#define XgRxOctetsOK_offset 0x8
  46#define XgRxOctetsOK_WIDTH 48
  47#define XgRxPkts_offset 0x10
  48#define XgRxPkts_WIDTH 32
  49#define XgRxPktsOK_offset 0x14
  50#define XgRxPktsOK_WIDTH 32
  51#define XgRxBroadcastPkts_offset 0x18
  52#define XgRxBroadcastPkts_WIDTH 32
  53#define XgRxMulticastPkts_offset 0x1C
  54#define XgRxMulticastPkts_WIDTH 32
  55#define XgRxUnicastPkts_offset 0x20
  56#define XgRxUnicastPkts_WIDTH 32
  57#define XgRxUndersizePkts_offset 0x24
  58#define XgRxUndersizePkts_WIDTH 32
  59#define XgRxOversizePkts_offset 0x28
  60#define XgRxOversizePkts_WIDTH 32
  61#define XgRxJabberPkts_offset 0x2C
  62#define XgRxJabberPkts_WIDTH 32
  63#define XgRxUndersizeFCSerrorPkts_offset 0x30
  64#define XgRxUndersizeFCSerrorPkts_WIDTH 32
  65#define XgRxDropEvents_offset 0x34
  66#define XgRxDropEvents_WIDTH 32
  67#define XgRxFCSerrorPkts_offset 0x38
  68#define XgRxFCSerrorPkts_WIDTH 32
  69#define XgRxAlignError_offset 0x3C
  70#define XgRxAlignError_WIDTH 32
  71#define XgRxSymbolError_offset 0x40
  72#define XgRxSymbolError_WIDTH 32
  73#define XgRxInternalMACError_offset 0x44
  74#define XgRxInternalMACError_WIDTH 32
  75#define XgRxControlPkts_offset 0x48
  76#define XgRxControlPkts_WIDTH 32
  77#define XgRxPausePkts_offset 0x4C
  78#define XgRxPausePkts_WIDTH 32
  79#define XgRxPkts64Octets_offset 0x50
  80#define XgRxPkts64Octets_WIDTH 32
  81#define XgRxPkts65to127Octets_offset 0x54
  82#define XgRxPkts65to127Octets_WIDTH 32
  83#define XgRxPkts128to255Octets_offset 0x58
  84#define XgRxPkts128to255Octets_WIDTH 32
  85#define XgRxPkts256to511Octets_offset 0x5C
  86#define XgRxPkts256to511Octets_WIDTH 32
  87#define XgRxPkts512to1023Octets_offset 0x60
  88#define XgRxPkts512to1023Octets_WIDTH 32
  89#define XgRxPkts1024to15xxOctets_offset 0x64
  90#define XgRxPkts1024to15xxOctets_WIDTH 32
  91#define XgRxPkts15xxtoMaxOctets_offset 0x68
  92#define XgRxPkts15xxtoMaxOctets_WIDTH 32
  93#define XgRxLengthError_offset 0x6C
  94#define XgRxLengthError_WIDTH 32
  95#define XgTxPkts_offset 0x80
  96#define XgTxPkts_WIDTH 32
  97#define XgTxOctets_offset 0x88
  98#define XgTxOctets_WIDTH 48
  99#define XgTxMulticastPkts_offset 0x90
 100#define XgTxMulticastPkts_WIDTH 32
 101#define XgTxBroadcastPkts_offset 0x94
 102#define XgTxBroadcastPkts_WIDTH 32
 103#define XgTxUnicastPkts_offset 0x98
 104#define XgTxUnicastPkts_WIDTH 32
 105#define XgTxControlPkts_offset 0x9C
 106#define XgTxControlPkts_WIDTH 32
 107#define XgTxPausePkts_offset 0xA0
 108#define XgTxPausePkts_WIDTH 32
 109#define XgTxPkts64Octets_offset 0xA4
 110#define XgTxPkts64Octets_WIDTH 32
 111#define XgTxPkts65to127Octets_offset 0xA8
 112#define XgTxPkts65to127Octets_WIDTH 32
 113#define XgTxPkts128to255Octets_offset 0xAC
 114#define XgTxPkts128to255Octets_WIDTH 32
 115#define XgTxPkts256to511Octets_offset 0xB0
 116#define XgTxPkts256to511Octets_WIDTH 32
 117#define XgTxPkts512to1023Octets_offset 0xB4
 118#define XgTxPkts512to1023Octets_WIDTH 32
 119#define XgTxPkts1024to15xxOctets_offset 0xB8
 120#define XgTxPkts1024to15xxOctets_WIDTH 32
 121#define XgTxPkts1519toMaxOctets_offset 0xBC
 122#define XgTxPkts1519toMaxOctets_WIDTH 32
 123#define XgTxUndersizePkts_offset 0xC0
 124#define XgTxUndersizePkts_WIDTH 32
 125#define XgTxOversizePkts_offset 0xC4
 126#define XgTxOversizePkts_WIDTH 32
 127#define XgTxNonTcpUdpPkt_offset 0xC8
 128#define XgTxNonTcpUdpPkt_WIDTH 16
 129#define XgTxMacSrcErrPkt_offset 0xCC
 130#define XgTxMacSrcErrPkt_WIDTH 16
 131#define XgTxIpSrcErrPkt_offset 0xD0
 132#define XgTxIpSrcErrPkt_WIDTH 16
 133#define XgDmaDone_offset 0xD4
 134#define XgDmaDone_WIDTH 32
 135
 136#define FALCON_XMAC_STATS_DMA_FLAG(efx)				\
 137	(*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
 138
 139#define FALCON_DMA_STAT(ext_name, hw_name)				\
 140	[FALCON_STAT_ ## ext_name] =					\
 141	{ #ext_name,							\
 142	  /* 48-bit stats are zero-padded to 64 on DMA */		\
 143	  hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH,	\
 144	  hw_name ## _ ## offset }
 145#define FALCON_OTHER_STAT(ext_name)					\
 146	[FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 147#define GENERIC_SW_STAT(ext_name)				\
 148	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 149
 150static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
 151	FALCON_DMA_STAT(tx_bytes, XgTxOctets),
 152	FALCON_DMA_STAT(tx_packets, XgTxPkts),
 153	FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
 154	FALCON_DMA_STAT(tx_control, XgTxControlPkts),
 155	FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
 156	FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
 157	FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
 158	FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
 159	FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
 160	FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
 161	FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
 162	FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
 163	FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
 164	FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
 165	FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
 166	FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
 167	FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
 168	FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
 169	FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
 170	FALCON_DMA_STAT(rx_bytes, XgRxOctets),
 171	FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
 172	FALCON_OTHER_STAT(rx_bad_bytes),
 173	FALCON_DMA_STAT(rx_packets, XgRxPkts),
 174	FALCON_DMA_STAT(rx_good, XgRxPktsOK),
 175	FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
 176	FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
 177	FALCON_DMA_STAT(rx_control, XgRxControlPkts),
 178	FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
 179	FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
 180	FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
 181	FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
 182	FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
 183	FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
 184	FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
 185	FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
 186	FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
 187	FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
 188	FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
 189	FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
 190	FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
 191	FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
 192	FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
 193	FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
 194	FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
 195	FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
 196	FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
 197	FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
 198	GENERIC_SW_STAT(rx_nodesc_trunc),
 199	GENERIC_SW_STAT(rx_noskb_drops),
 200};
 201static const unsigned long falcon_stat_mask[] = {
 202	[0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
 203};
 204
 205/**************************************************************************
 206 *
 207 * Basic SPI command set and bit definitions
 208 *
 209 *************************************************************************/
 210
 211#define SPI_WRSR 0x01		/* Write status register */
 212#define SPI_WRITE 0x02		/* Write data to memory array */
 213#define SPI_READ 0x03		/* Read data from memory array */
 214#define SPI_WRDI 0x04		/* Reset write enable latch */
 215#define SPI_RDSR 0x05		/* Read status register */
 216#define SPI_WREN 0x06		/* Set write enable latch */
 217#define SPI_SST_EWSR 0x50	/* SST: Enable write to status register */
 218
 219#define SPI_STATUS_WPEN 0x80	/* Write-protect pin enabled */
 220#define SPI_STATUS_BP2 0x10	/* Block protection bit 2 */
 221#define SPI_STATUS_BP1 0x08	/* Block protection bit 1 */
 222#define SPI_STATUS_BP0 0x04	/* Block protection bit 0 */
 223#define SPI_STATUS_WEN 0x02	/* State of the write enable latch */
 224#define SPI_STATUS_NRDY 0x01	/* Device busy flag */
 225
 226/**************************************************************************
 227 *
 228 * Non-volatile memory layout
 229 *
 230 **************************************************************************
 231 */
 232
 233/* SFC4000 flash is partitioned into:
 234 *     0-0x400       chip and board config (see struct falcon_nvconfig)
 235 *     0x400-0x8000  unused (or may contain VPD if EEPROM not present)
 236 *     0x8000-end    boot code (mapped to PCI expansion ROM)
 237 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
 238 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
 239 *     0-0x400       chip and board config
 240 *     configurable  VPD
 241 *     0x800-0x1800  boot config
 242 * Aside from the chip and board config, all of these are optional and may
 243 * be absent or truncated depending on the devices used.
 244 */
 245#define FALCON_NVCONFIG_END 0x400U
 246#define FALCON_FLASH_BOOTCODE_START 0x8000U
 247#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
 248#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
 249
 250/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
 251struct falcon_nvconfig_board_v2 {
 252	__le16 nports;
 253	u8 port0_phy_addr;
 254	u8 port0_phy_type;
 255	u8 port1_phy_addr;
 256	u8 port1_phy_type;
 257	__le16 asic_sub_revision;
 258	__le16 board_revision;
 259} __packed;
 260
 261/* Board configuration v3 extra information */
 262struct falcon_nvconfig_board_v3 {
 263	__le32 spi_device_type[2];
 264} __packed;
 265
 266/* Bit numbers for spi_device_type */
 267#define SPI_DEV_TYPE_SIZE_LBN 0
 268#define SPI_DEV_TYPE_SIZE_WIDTH 5
 269#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
 270#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
 271#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
 272#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
 273#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
 274#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
 275#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
 276#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
 277#define SPI_DEV_TYPE_FIELD(type, field)					\
 278	(((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field)))
 279
 280#define FALCON_NVCONFIG_OFFSET 0x300
 281
 282#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
 283struct falcon_nvconfig {
 284	ef4_oword_t ee_vpd_cfg_reg;			/* 0x300 */
 285	u8 mac_address[2][8];			/* 0x310 */
 286	ef4_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
 287	ef4_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
 288	ef4_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
 289	ef4_oword_t hw_init_reg;			/* 0x350 */
 290	ef4_oword_t nic_stat_reg;			/* 0x360 */
 291	ef4_oword_t glb_ctl_reg;			/* 0x370 */
 292	ef4_oword_t srm_cfg_reg;			/* 0x380 */
 293	ef4_oword_t spare_reg;				/* 0x390 */
 294	__le16 board_magic_num;			/* 0x3A0 */
 295	__le16 board_struct_ver;
 296	__le16 board_checksum;
 297	struct falcon_nvconfig_board_v2 board_v2;
 298	ef4_oword_t ee_base_page_reg;			/* 0x3B0 */
 299	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
 300} __packed;
 301
 302/*************************************************************************/
 303
 304static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method);
 305static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx);
 306
 307static const unsigned int
 308/* "Large" EEPROM device: Atmel AT25640 or similar
 309 * 8 KB, 16-bit address, 32 B write block */
 310large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
 311		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
 312		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
 313/* Default flash device: Atmel AT25F1024
 314 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
 315default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
 316		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
 317		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
 318		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
 319		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
 320
 321/**************************************************************************
 322 *
 323 * I2C bus - this is a bit-bashing interface using GPIO pins
 324 * Note that it uses the output enables to tristate the outputs
 325 * SDA is the data pin and SCL is the clock
 326 *
 327 **************************************************************************
 328 */
 329static void falcon_setsda(void *data, int state)
 330{
 331	struct ef4_nic *efx = (struct ef4_nic *)data;
 332	ef4_oword_t reg;
 333
 334	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 335	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
 336	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
 337}
 338
 339static void falcon_setscl(void *data, int state)
 340{
 341	struct ef4_nic *efx = (struct ef4_nic *)data;
 342	ef4_oword_t reg;
 343
 344	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 345	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
 346	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
 347}
 348
 349static int falcon_getsda(void *data)
 350{
 351	struct ef4_nic *efx = (struct ef4_nic *)data;
 352	ef4_oword_t reg;
 353
 354	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 355	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
 356}
 357
 358static int falcon_getscl(void *data)
 359{
 360	struct ef4_nic *efx = (struct ef4_nic *)data;
 361	ef4_oword_t reg;
 362
 363	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 364	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
 365}
 366
 367static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
 368	.setsda		= falcon_setsda,
 369	.setscl		= falcon_setscl,
 370	.getsda		= falcon_getsda,
 371	.getscl		= falcon_getscl,
 372	.udelay		= 5,
 373	/* Wait up to 50 ms for slave to let us pull SCL high */
 374	.timeout	= DIV_ROUND_UP(HZ, 20),
 375};
 376
 377static void falcon_push_irq_moderation(struct ef4_channel *channel)
 378{
 379	ef4_dword_t timer_cmd;
 380	struct ef4_nic *efx = channel->efx;
 381
 382	/* Set timer register */
 383	if (channel->irq_moderation_us) {
 384		unsigned int ticks;
 385
 386		ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us);
 387		EF4_POPULATE_DWORD_2(timer_cmd,
 388				     FRF_AB_TC_TIMER_MODE,
 389				     FFE_BB_TIMER_MODE_INT_HLDOFF,
 390				     FRF_AB_TC_TIMER_VAL,
 391				     ticks - 1);
 392	} else {
 393		EF4_POPULATE_DWORD_2(timer_cmd,
 394				     FRF_AB_TC_TIMER_MODE,
 395				     FFE_BB_TIMER_MODE_DIS,
 396				     FRF_AB_TC_TIMER_VAL, 0);
 397	}
 398	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
 399	ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
 400			       channel->channel);
 401}
 402
 403static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx);
 404
 405static void falcon_prepare_flush(struct ef4_nic *efx)
 406{
 407	falcon_deconfigure_mac_wrapper(efx);
 408
 409	/* Wait for the tx and rx fifo's to get to the next packet boundary
 410	 * (~1ms without back-pressure), then to drain the remainder of the
 411	 * fifo's at data path speeds (negligible), with a healthy margin. */
 412	msleep(10);
 413}
 414
 415/* Acknowledge a legacy interrupt from Falcon
 416 *
 417 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 418 *
 419 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 420 * BIU. Interrupt acknowledge is read sensitive so must write instead
 421 * (then read to ensure the BIU collector is flushed)
 422 *
 423 * NB most hardware supports MSI interrupts
 424 */
 425static inline void falcon_irq_ack_a1(struct ef4_nic *efx)
 426{
 427	ef4_dword_t reg;
 428
 429	EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
 430	ef4_writed(efx, &reg, FR_AA_INT_ACK_KER);
 431	ef4_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
 432}
 433
 434static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 435{
 436	struct ef4_nic *efx = dev_id;
 437	ef4_oword_t *int_ker = efx->irq_status.addr;
 438	int syserr;
 439	int queues;
 440
 441	/* Check to see if this is our interrupt.  If it isn't, we
 442	 * exit without having touched the hardware.
 443	 */
 444	if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
 445		netif_vdbg(efx, intr, efx->net_dev,
 446			   "IRQ %d on CPU %d not for me\n", irq,
 447			   raw_smp_processor_id());
 448		return IRQ_NONE;
 449	}
 450	efx->last_irq_cpu = raw_smp_processor_id();
 451	netif_vdbg(efx, intr, efx->net_dev,
 452		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
 453		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
 454
 455	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
 456		return IRQ_HANDLED;
 457
 458	/* Check to see if we have a serious error condition */
 459	syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 460	if (unlikely(syserr))
 461		return ef4_farch_fatal_interrupt(efx);
 462
 463	/* Determine interrupting queues, clear interrupt status
 464	 * register and acknowledge the device interrupt.
 465	 */
 466	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
 467	queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
 468	EF4_ZERO_OWORD(*int_ker);
 469	wmb(); /* Ensure the vector is cleared before interrupt ack */
 470	falcon_irq_ack_a1(efx);
 471
 472	if (queues & 1)
 473		ef4_schedule_channel_irq(ef4_get_channel(efx, 0));
 474	if (queues & 2)
 475		ef4_schedule_channel_irq(ef4_get_channel(efx, 1));
 476	return IRQ_HANDLED;
 477}
 478
 479/**************************************************************************
 480 *
 481 * RSS
 482 *
 483 **************************************************************************
 484 */
 485static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
 486				    const u32 *rx_indir_table)
 487{
 488	(void) efx;
 489	(void) user;
 490	(void) rx_indir_table;
 491	return -ENOSYS;
 492}
 493
 494static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
 495					const u32 *rx_indir_table)
 496{
 497	ef4_oword_t temp;
 498
 499	(void) user;
 500	/* Set hash key for IPv4 */
 501	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
 502	ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
 503
 504	memcpy(efx->rx_indir_table, rx_indir_table,
 505	       sizeof(efx->rx_indir_table));
 506	ef4_farch_rx_push_indir_table(efx);
 507	return 0;
 508}
 509
 510/**************************************************************************
 511 *
 512 * EEPROM/flash
 513 *
 514 **************************************************************************
 515 */
 516
 517#define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t)
 518
 519static int falcon_spi_poll(struct ef4_nic *efx)
 520{
 521	ef4_oword_t reg;
 522	ef4_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
 523	return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
 524}
 525
 526/* Wait for SPI command completion */
 527static int falcon_spi_wait(struct ef4_nic *efx)
 528{
 529	/* Most commands will finish quickly, so we start polling at
 530	 * very short intervals.  Sometimes the command may have to
 531	 * wait for VPD or expansion ROM access outside of our
 532	 * control, so we allow up to 100 ms. */
 533	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
 534	int i;
 535
 536	for (i = 0; i < 10; i++) {
 537		if (!falcon_spi_poll(efx))
 538			return 0;
 539		udelay(10);
 540	}
 541
 542	for (;;) {
 543		if (!falcon_spi_poll(efx))
 544			return 0;
 545		if (time_after_eq(jiffies, timeout)) {
 546			netif_err(efx, hw, efx->net_dev,
 547				  "timed out waiting for SPI\n");
 548			return -ETIMEDOUT;
 549		}
 550		schedule_timeout_uninterruptible(1);
 551	}
 552}
 553
 554static int
 555falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi,
 556	       unsigned int command, int address,
 557	       const void *in, void *out, size_t len)
 558{
 559	bool addressed = (address >= 0);
 560	bool reading = (out != NULL);
 561	ef4_oword_t reg;
 562	int rc;
 563
 564	/* Input validation */
 565	if (len > FALCON_SPI_MAX_LEN)
 566		return -EINVAL;
 567
 568	/* Check that previous command is not still running */
 569	rc = falcon_spi_poll(efx);
 570	if (rc)
 571		return rc;
 572
 573	/* Program address register, if we have an address */
 574	if (addressed) {
 575		EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
 576		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
 577	}
 578
 579	/* Program data register, if we have data */
 580	if (in != NULL) {
 581		memcpy(&reg, in, len);
 582		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
 583	}
 584
 585	/* Issue read/write command */
 586	EF4_POPULATE_OWORD_7(reg,
 587			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
 588			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
 589			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
 590			     FRF_AB_EE_SPI_HCMD_READ, reading,
 591			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
 592			     FRF_AB_EE_SPI_HCMD_ADBCNT,
 593			     (addressed ? spi->addr_len : 0),
 594			     FRF_AB_EE_SPI_HCMD_ENC, command);
 595	ef4_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
 596
 597	/* Wait for read/write to complete */
 598	rc = falcon_spi_wait(efx);
 599	if (rc)
 600		return rc;
 601
 602	/* Read data */
 603	if (out != NULL) {
 604		ef4_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
 605		memcpy(out, &reg, len);
 606	}
 607
 608	return 0;
 609}
 610
 611static inline u8
 612falcon_spi_munge_command(const struct falcon_spi_device *spi,
 613			 const u8 command, const unsigned int address)
 614{
 615	return command | (((address >> 8) & spi->munge_address) << 3);
 616}
 617
 618static int
 619falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
 620		loff_t start, size_t len, size_t *retlen, u8 *buffer)
 621{
 622	size_t block_len, pos = 0;
 623	unsigned int command;
 624	int rc = 0;
 625
 626	while (pos < len) {
 627		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
 628
 629		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
 630		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
 631				    buffer + pos, block_len);
 632		if (rc)
 633			break;
 634		pos += block_len;
 635
 636		/* Avoid locking up the system */
 637		cond_resched();
 638		if (signal_pending(current)) {
 639			rc = -EINTR;
 640			break;
 641		}
 642	}
 643
 644	if (retlen)
 645		*retlen = pos;
 646	return rc;
 647}
 648
 649#ifdef CONFIG_SFC_FALCON_MTD
 650
 651struct falcon_mtd_partition {
 652	struct ef4_mtd_partition common;
 653	const struct falcon_spi_device *spi;
 654	size_t offset;
 655};
 656
 657#define to_falcon_mtd_partition(mtd)				\
 658	container_of(mtd, struct falcon_mtd_partition, common.mtd)
 659
 660static size_t
 661falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
 662{
 663	return min(FALCON_SPI_MAX_LEN,
 664		   (spi->block_size - (start & (spi->block_size - 1))));
 665}
 666
 667/* Wait up to 10 ms for buffered write completion */
 668static int
 669falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi)
 670{
 671	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
 672	u8 status;
 673	int rc;
 674
 675	for (;;) {
 676		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 677				    &status, sizeof(status));
 678		if (rc)
 679			return rc;
 680		if (!(status & SPI_STATUS_NRDY))
 681			return 0;
 682		if (time_after_eq(jiffies, timeout)) {
 683			netif_err(efx, hw, efx->net_dev,
 684				  "SPI write timeout on device %d"
 685				  " last status=0x%02x\n",
 686				  spi->device_id, status);
 687			return -ETIMEDOUT;
 688		}
 689		schedule_timeout_uninterruptible(1);
 690	}
 691}
 692
 693static int
 694falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
 695		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
 696{
 697	u8 verify_buffer[FALCON_SPI_MAX_LEN];
 698	size_t block_len, pos = 0;
 699	unsigned int command;
 700	int rc = 0;
 701
 702	while (pos < len) {
 703		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 704		if (rc)
 705			break;
 706
 707		block_len = min(len - pos,
 708				falcon_spi_write_limit(spi, start + pos));
 709		command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
 710		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 711				    buffer + pos, NULL, block_len);
 712		if (rc)
 713			break;
 714
 715		rc = falcon_spi_wait_write(efx, spi);
 716		if (rc)
 717			break;
 718
 719		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
 720		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 721				    NULL, verify_buffer, block_len);
 722		if (memcmp(verify_buffer, buffer + pos, block_len)) {
 723			rc = -EIO;
 724			break;
 725		}
 726
 727		pos += block_len;
 728
 729		/* Avoid locking up the system */
 730		cond_resched();
 731		if (signal_pending(current)) {
 732			rc = -EINTR;
 733			break;
 734		}
 735	}
 736
 737	if (retlen)
 738		*retlen = pos;
 739	return rc;
 740}
 741
 742static int
 743falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
 744{
 745	const struct falcon_spi_device *spi = part->spi;
 746	struct ef4_nic *efx = part->common.mtd.priv;
 747	u8 status;
 748	int rc, i;
 749
 750	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
 751	for (i = 0; i < 40; i++) {
 752		__set_current_state(uninterruptible ?
 753				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
 754		schedule_timeout(HZ / 10);
 755		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 756				    &status, sizeof(status));
 757		if (rc)
 758			return rc;
 759		if (!(status & SPI_STATUS_NRDY))
 760			return 0;
 761		if (signal_pending(current))
 762			return -EINTR;
 763	}
 764	pr_err("%s: timed out waiting for %s\n",
 765	       part->common.name, part->common.dev_type_name);
 766	return -ETIMEDOUT;
 767}
 768
 769static int
 770falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi)
 771{
 772	const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
 773				SPI_STATUS_BP0);
 774	u8 status;
 775	int rc;
 776
 777	rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 778			    &status, sizeof(status));
 779	if (rc)
 780		return rc;
 781
 782	if (!(status & unlock_mask))
 783		return 0; /* already unlocked */
 784
 785	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 786	if (rc)
 787		return rc;
 788	rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
 789	if (rc)
 790		return rc;
 791
 792	status &= ~unlock_mask;
 793	rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
 794			    NULL, sizeof(status));
 795	if (rc)
 796		return rc;
 797	rc = falcon_spi_wait_write(efx, spi);
 798	if (rc)
 799		return rc;
 800
 801	return 0;
 802}
 803
 804#define FALCON_SPI_VERIFY_BUF_LEN 16
 805
 806static int
 807falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
 808{
 809	const struct falcon_spi_device *spi = part->spi;
 810	struct ef4_nic *efx = part->common.mtd.priv;
 811	unsigned pos, block_len;
 812	u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
 813	u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
 814	int rc;
 815
 816	if (len != spi->erase_size)
 817		return -EINVAL;
 818
 819	if (spi->erase_command == 0)
 820		return -EOPNOTSUPP;
 821
 822	rc = falcon_spi_unlock(efx, spi);
 823	if (rc)
 824		return rc;
 825	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 826	if (rc)
 827		return rc;
 828	rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
 829			    NULL, 0);
 830	if (rc)
 831		return rc;
 832	rc = falcon_spi_slow_wait(part, false);
 833
 834	/* Verify the entire region has been wiped */
 835	memset(empty, 0xff, sizeof(empty));
 836	for (pos = 0; pos < len; pos += block_len) {
 837		block_len = min(len - pos, sizeof(buffer));
 838		rc = falcon_spi_read(efx, spi, start + pos, block_len,
 839				     NULL, buffer);
 840		if (rc)
 841			return rc;
 842		if (memcmp(empty, buffer, block_len))
 843			return -EIO;
 844
 845		/* Avoid locking up the system */
 846		cond_resched();
 847		if (signal_pending(current))
 848			return -EINTR;
 849	}
 850
 851	return rc;
 852}
 853
 854static void falcon_mtd_rename(struct ef4_mtd_partition *part)
 855{
 856	struct ef4_nic *efx = part->mtd.priv;
 857
 858	snprintf(part->name, sizeof(part->name), "%s %s",
 859		 efx->name, part->type_name);
 860}
 861
 862static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
 863			   size_t len, size_t *retlen, u8 *buffer)
 864{
 865	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 866	struct ef4_nic *efx = mtd->priv;
 867	struct falcon_nic_data *nic_data = efx->nic_data;
 868	int rc;
 869
 870	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 871	if (rc)
 872		return rc;
 873	rc = falcon_spi_read(efx, part->spi, part->offset + start,
 874			     len, retlen, buffer);
 875	mutex_unlock(&nic_data->spi_lock);
 876	return rc;
 877}
 878
 879static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
 880{
 881	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 882	struct ef4_nic *efx = mtd->priv;
 883	struct falcon_nic_data *nic_data = efx->nic_data;
 884	int rc;
 885
 886	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 887	if (rc)
 888		return rc;
 889	rc = falcon_spi_erase(part, part->offset + start, len);
 890	mutex_unlock(&nic_data->spi_lock);
 891	return rc;
 892}
 893
 894static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
 895			    size_t len, size_t *retlen, const u8 *buffer)
 896{
 897	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 898	struct ef4_nic *efx = mtd->priv;
 899	struct falcon_nic_data *nic_data = efx->nic_data;
 900	int rc;
 901
 902	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 903	if (rc)
 904		return rc;
 905	rc = falcon_spi_write(efx, part->spi, part->offset + start,
 906			      len, retlen, buffer);
 907	mutex_unlock(&nic_data->spi_lock);
 908	return rc;
 909}
 910
 911static int falcon_mtd_sync(struct mtd_info *mtd)
 912{
 913	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 914	struct ef4_nic *efx = mtd->priv;
 915	struct falcon_nic_data *nic_data = efx->nic_data;
 916	int rc;
 917
 918	mutex_lock(&nic_data->spi_lock);
 919	rc = falcon_spi_slow_wait(part, true);
 920	mutex_unlock(&nic_data->spi_lock);
 921	return rc;
 922}
 923
 924static int falcon_mtd_probe(struct ef4_nic *efx)
 925{
 926	struct falcon_nic_data *nic_data = efx->nic_data;
 927	struct falcon_mtd_partition *parts;
 928	struct falcon_spi_device *spi;
 929	size_t n_parts;
 930	int rc = -ENODEV;
 931
 932	ASSERT_RTNL();
 933
 934	/* Allocate space for maximum number of partitions */
 935	parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
 936	if (!parts)
 937		return -ENOMEM;
 938	n_parts = 0;
 939
 940	spi = &nic_data->spi_flash;
 941	if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
 942		parts[n_parts].spi = spi;
 943		parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
 944		parts[n_parts].common.dev_type_name = "flash";
 945		parts[n_parts].common.type_name = "sfc_flash_bootrom";
 946		parts[n_parts].common.mtd.type = MTD_NORFLASH;
 947		parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
 948		parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
 949		parts[n_parts].common.mtd.erasesize = spi->erase_size;
 950		n_parts++;
 951	}
 952
 953	spi = &nic_data->spi_eeprom;
 954	if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
 955		parts[n_parts].spi = spi;
 956		parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
 957		parts[n_parts].common.dev_type_name = "EEPROM";
 958		parts[n_parts].common.type_name = "sfc_bootconfig";
 959		parts[n_parts].common.mtd.type = MTD_RAM;
 960		parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
 961		parts[n_parts].common.mtd.size =
 962			min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
 963			FALCON_EEPROM_BOOTCONFIG_START;
 964		parts[n_parts].common.mtd.erasesize = spi->erase_size;
 965		n_parts++;
 966	}
 967
 968	rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
 969	if (rc)
 970		kfree(parts);
 971	return rc;
 972}
 973
 974#endif /* CONFIG_SFC_FALCON_MTD */
 975
 976/**************************************************************************
 977 *
 978 * XMAC operations
 979 *
 980 **************************************************************************
 981 */
 982
 983/* Configure the XAUI driver that is an output from Falcon */
 984static void falcon_setup_xaui(struct ef4_nic *efx)
 985{
 986	ef4_oword_t sdctl, txdrv;
 987
 988	/* Move the XAUI into low power, unless there is no PHY, in
 989	 * which case the XAUI will have to drive a cable. */
 990	if (efx->phy_type == PHY_TYPE_NONE)
 991		return;
 992
 993	ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
 994	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
 995	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
 996	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
 997	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
 998	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
 999	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
1000	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
1001	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
1002	ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
1003
1004	EF4_POPULATE_OWORD_8(txdrv,
1005			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
1006			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
1007			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
1008			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
1009			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
1010			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
1011			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
1012			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
1013	ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
1014}
1015
1016int falcon_reset_xaui(struct ef4_nic *efx)
1017{
1018	struct falcon_nic_data *nic_data = efx->nic_data;
1019	ef4_oword_t reg;
1020	int count;
1021
1022	/* Don't fetch MAC statistics over an XMAC reset */
1023	WARN_ON(nic_data->stats_disable_count == 0);
1024
1025	/* Start reset sequence */
1026	EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
1027	ef4_writeo(efx, &reg, FR_AB_XX_PWR_RST);
1028
1029	/* Wait up to 10 ms for completion, then reinitialise */
1030	for (count = 0; count < 1000; count++) {
1031		ef4_reado(efx, &reg, FR_AB_XX_PWR_RST);
1032		if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
1033		    EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
1034			falcon_setup_xaui(efx);
1035			return 0;
1036		}
1037		udelay(10);
1038	}
1039	netif_err(efx, hw, efx->net_dev,
1040		  "timed out waiting for XAUI/XGXS reset\n");
1041	return -ETIMEDOUT;
1042}
1043
1044static void falcon_ack_status_intr(struct ef4_nic *efx)
1045{
1046	struct falcon_nic_data *nic_data = efx->nic_data;
1047	ef4_oword_t reg;
1048
1049	if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1050		return;
1051
1052	/* We expect xgmii faults if the wireside link is down */
1053	if (!efx->link_state.up)
1054		return;
1055
1056	/* We can only use this interrupt to signal the negative edge of
1057	 * xaui_align [we have to poll the positive edge]. */
1058	if (nic_data->xmac_poll_required)
1059		return;
1060
1061	ef4_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1062}
1063
1064static bool falcon_xgxs_link_ok(struct ef4_nic *efx)
1065{
1066	ef4_oword_t reg;
1067	bool align_done, link_ok = false;
1068	int sync_status;
1069
1070	/* Read link status */
1071	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1072
1073	align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1074	sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1075	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1076		link_ok = true;
1077
1078	/* Clear link status ready for next read */
1079	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1080	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1081	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1082	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1083
1084	return link_ok;
1085}
1086
1087static bool falcon_xmac_link_ok(struct ef4_nic *efx)
1088{
1089	/*
1090	 * Check MAC's XGXS link status except when using XGMII loopback
1091	 * which bypasses the XGXS block.
1092	 * If possible, check PHY's XGXS link status except when using
1093	 * MAC loopback.
1094	 */
1095	return (efx->loopback_mode == LOOPBACK_XGMII ||
1096		falcon_xgxs_link_ok(efx)) &&
1097		(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1098		 LOOPBACK_INTERNAL(efx) ||
1099		 ef4_mdio_phyxgxs_lane_sync(efx));
1100}
1101
1102static void falcon_reconfigure_xmac_core(struct ef4_nic *efx)
1103{
1104	unsigned int max_frame_len;
1105	ef4_oword_t reg;
1106	bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX);
1107	bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX);
1108
1109	/* Configure MAC  - cut-thru mode is hard wired on */
1110	EF4_POPULATE_OWORD_3(reg,
1111			     FRF_AB_XM_RX_JUMBO_MODE, 1,
1112			     FRF_AB_XM_TX_STAT_EN, 1,
1113			     FRF_AB_XM_RX_STAT_EN, 1);
1114	ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1115
1116	/* Configure TX */
1117	EF4_POPULATE_OWORD_6(reg,
1118			     FRF_AB_XM_TXEN, 1,
1119			     FRF_AB_XM_TX_PRMBL, 1,
1120			     FRF_AB_XM_AUTO_PAD, 1,
1121			     FRF_AB_XM_TXCRC, 1,
1122			     FRF_AB_XM_FCNTL, tx_fc,
1123			     FRF_AB_XM_IPG, 0x3);
1124	ef4_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1125
1126	/* Configure RX */
1127	EF4_POPULATE_OWORD_5(reg,
1128			     FRF_AB_XM_RXEN, 1,
1129			     FRF_AB_XM_AUTO_DEPAD, 0,
1130			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
1131			     FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1132			     FRF_AB_XM_PASS_CRC_ERR, 1);
1133	ef4_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1134
1135	/* Set frame length */
1136	max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu);
1137	EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1138	ef4_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1139	EF4_POPULATE_OWORD_2(reg,
1140			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1141			     FRF_AB_XM_TX_JUMBO_MODE, 1);
1142	ef4_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1143
1144	EF4_POPULATE_OWORD_2(reg,
1145			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1146			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
1147	ef4_writeo(efx, &reg, FR_AB_XM_FC);
1148
1149	/* Set MAC address */
1150	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1151	ef4_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1152	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1153	ef4_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1154}
1155
1156static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx)
1157{
1158	ef4_oword_t reg;
1159	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1160	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1161	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1162	bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1163
1164	/* XGXS block is flaky and will need to be reset if moving
1165	 * into our out of XGMII, XGXS or XAUI loopbacks. */
1166	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1167	old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1168	old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1169
1170	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1171	old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1172
1173	/* The PHY driver may have turned XAUI off */
1174	if ((xgxs_loopback != old_xgxs_loopback) ||
1175	    (xaui_loopback != old_xaui_loopback) ||
1176	    (xgmii_loopback != old_xgmii_loopback))
1177		falcon_reset_xaui(efx);
1178
1179	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1180	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1181			    (xgxs_loopback || xaui_loopback) ?
1182			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1183	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1184	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1185	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1186
1187	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1188	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1189	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1190	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1191	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1192	ef4_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1193}
1194
1195
1196/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
1197static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
1198{
1199	bool mac_up = falcon_xmac_link_ok(efx);
1200
1201	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1202	    ef4_phy_mode_disabled(efx->phy_mode))
1203		/* XAUI link is expected to be down */
1204		return mac_up;
1205
1206	falcon_stop_nic_stats(efx);
1207
1208	while (!mac_up && tries) {
1209		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1210		falcon_reset_xaui(efx);
1211		udelay(200);
1212
1213		mac_up = falcon_xmac_link_ok(efx);
1214		--tries;
1215	}
1216
1217	falcon_start_nic_stats(efx);
1218
1219	return mac_up;
1220}
1221
1222static bool falcon_xmac_check_fault(struct ef4_nic *efx)
1223{
1224	return !falcon_xmac_link_ok_retry(efx, 5);
1225}
1226
1227static int falcon_reconfigure_xmac(struct ef4_nic *efx)
1228{
1229	struct falcon_nic_data *nic_data = efx->nic_data;
1230
1231	ef4_farch_filter_sync_rx_mode(efx);
1232
1233	falcon_reconfigure_xgxs_core(efx);
1234	falcon_reconfigure_xmac_core(efx);
1235
1236	falcon_reconfigure_mac_wrapper(efx);
1237
1238	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1239	falcon_ack_status_intr(efx);
1240
1241	return 0;
1242}
1243
1244static void falcon_poll_xmac(struct ef4_nic *efx)
1245{
1246	struct falcon_nic_data *nic_data = efx->nic_data;
1247
1248	/* We expect xgmii faults if the wireside link is down */
1249	if (!efx->link_state.up || !nic_data->xmac_poll_required)
1250		return;
1251
1252	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1253	falcon_ack_status_intr(efx);
1254}
1255
1256/**************************************************************************
1257 *
1258 * MAC wrapper
1259 *
1260 **************************************************************************
1261 */
1262
1263static void falcon_push_multicast_hash(struct ef4_nic *efx)
1264{
1265	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
1266
1267	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1268
1269	ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
1270	ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
1271}
1272
1273static void falcon_reset_macs(struct ef4_nic *efx)
1274{
1275	struct falcon_nic_data *nic_data = efx->nic_data;
1276	ef4_oword_t reg, mac_ctrl;
1277	int count;
1278
1279	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
1280		/* It's not safe to use GLB_CTL_REG to reset the
1281		 * macs, so instead use the internal MAC resets
1282		 */
1283		EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1284		ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1285
1286		for (count = 0; count < 10000; count++) {
1287			ef4_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1288			if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1289			    0)
1290				return;
1291			udelay(10);
1292		}
1293
1294		netif_err(efx, hw, efx->net_dev,
1295			  "timed out waiting for XMAC core reset\n");
1296	}
1297
1298	/* Mac stats will fail whist the TX fifo is draining */
1299	WARN_ON(nic_data->stats_disable_count == 0);
1300
1301	ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1302	EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1303	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1304
1305	ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1306	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1307	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1308	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1309	ef4_writeo(efx, &reg, FR_AB_GLB_CTL);
1310
1311	count = 0;
1312	while (1) {
1313		ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1314		if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1315		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1316		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1317			netif_dbg(efx, hw, efx->net_dev,
1318				  "Completed MAC reset after %d loops\n",
1319				  count);
1320			break;
1321		}
1322		if (count > 20) {
1323			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1324			break;
1325		}
1326		count++;
1327		udelay(10);
1328	}
1329
1330	/* Ensure the correct MAC is selected before statistics
1331	 * are re-enabled by the caller */
1332	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1333
1334	falcon_setup_xaui(efx);
1335}
1336
1337static void falcon_drain_tx_fifo(struct ef4_nic *efx)
1338{
1339	ef4_oword_t reg;
1340
1341	if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) ||
1342	    (efx->loopback_mode != LOOPBACK_NONE))
1343		return;
1344
1345	ef4_reado(efx, &reg, FR_AB_MAC_CTRL);
1346	/* There is no point in draining more than once */
1347	if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1348		return;
1349
1350	falcon_reset_macs(efx);
1351}
1352
1353static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx)
1354{
1355	ef4_oword_t reg;
1356
1357	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
1358		return;
1359
1360	/* Isolate the MAC -> RX */
1361	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1362	EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1363	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1364
1365	/* Isolate TX -> MAC */
1366	falcon_drain_tx_fifo(efx);
1367}
1368
1369static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
1370{
1371	struct ef4_link_state *link_state = &efx->link_state;
1372	ef4_oword_t reg;
1373	int link_speed, isolate;
1374
1375	isolate = !!READ_ONCE(efx->reset_pending);
1376
1377	switch (link_state->speed) {
1378	case 10000: link_speed = 3; break;
1379	case 1000:  link_speed = 2; break;
1380	case 100:   link_speed = 1; break;
1381	default:    link_speed = 0; break;
1382	}
1383
1384	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1385	 * as advertised.  Disable to ensure packets are not
1386	 * indefinitely held and TX queue can be flushed at any point
1387	 * while the link is down. */
1388	EF4_POPULATE_OWORD_5(reg,
1389			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1390			     FRF_AB_MAC_BCAD_ACPT, 1,
1391			     FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
1392			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1393			     FRF_AB_MAC_SPEED, link_speed);
1394	/* On B0, MAC backpressure can be disabled and packets get
1395	 * discarded. */
1396	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1397		EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1398				    !link_state->up || isolate);
1399	}
1400
1401	ef4_writeo(efx, &reg, FR_AB_MAC_CTRL);
1402
1403	/* Restore the multicast hash registers. */
1404	falcon_push_multicast_hash(efx);
1405
1406	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1407	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
1408	 * initialisation but it may read back as 0) */
1409	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1410	/* Unisolate the MAC -> RX */
1411	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1412		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1413	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1414}
1415
1416static void falcon_stats_request(struct ef4_nic *efx)
1417{
1418	struct falcon_nic_data *nic_data = efx->nic_data;
1419	ef4_oword_t reg;
1420
1421	WARN_ON(nic_data->stats_pending);
1422	WARN_ON(nic_data->stats_disable_count);
1423
1424	FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
1425	nic_data->stats_pending = true;
1426	wmb(); /* ensure done flag is clear */
1427
1428	/* Initiate DMA transfer of stats */
1429	EF4_POPULATE_OWORD_2(reg,
1430			     FRF_AB_MAC_STAT_DMA_CMD, 1,
1431			     FRF_AB_MAC_STAT_DMA_ADR,
1432			     efx->stats_buffer.dma_addr);
1433	ef4_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1434
1435	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1436}
1437
1438static void falcon_stats_complete(struct ef4_nic *efx)
1439{
1440	struct falcon_nic_data *nic_data = efx->nic_data;
1441
1442	if (!nic_data->stats_pending)
1443		return;
1444
1445	nic_data->stats_pending = false;
1446	if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
1447		rmb(); /* read the done flag before the stats */
1448		ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1449				     falcon_stat_mask, nic_data->stats,
1450				     efx->stats_buffer.addr, true);
1451	} else {
1452		netif_err(efx, hw, efx->net_dev,
1453			  "timed out waiting for statistics\n");
1454	}
1455}
1456
1457static void falcon_stats_timer_func(struct timer_list *t)
1458{
1459	struct falcon_nic_data *nic_data = from_timer(nic_data, t,
1460						      stats_timer);
1461	struct ef4_nic *efx = nic_data->efx;
1462
1463	spin_lock(&efx->stats_lock);
1464
1465	falcon_stats_complete(efx);
1466	if (nic_data->stats_disable_count == 0)
1467		falcon_stats_request(efx);
1468
1469	spin_unlock(&efx->stats_lock);
1470}
1471
1472static bool falcon_loopback_link_poll(struct ef4_nic *efx)
1473{
1474	struct ef4_link_state old_state = efx->link_state;
1475
1476	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1477	WARN_ON(!LOOPBACK_INTERNAL(efx));
1478
1479	efx->link_state.fd = true;
1480	efx->link_state.fc = efx->wanted_fc;
1481	efx->link_state.up = true;
1482	efx->link_state.speed = 10000;
1483
1484	return !ef4_link_state_equal(&efx->link_state, &old_state);
1485}
1486
1487static int falcon_reconfigure_port(struct ef4_nic *efx)
1488{
1489	int rc;
1490
1491	WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
1492
1493	/* Poll the PHY link state *before* reconfiguring it. This means we
1494	 * will pick up the correct speed (in loopback) to select the correct
1495	 * MAC.
1496	 */
1497	if (LOOPBACK_INTERNAL(efx))
1498		falcon_loopback_link_poll(efx);
1499	else
1500		efx->phy_op->poll(efx);
1501
1502	falcon_stop_nic_stats(efx);
1503	falcon_deconfigure_mac_wrapper(efx);
1504
1505	falcon_reset_macs(efx);
1506
1507	efx->phy_op->reconfigure(efx);
1508	rc = falcon_reconfigure_xmac(efx);
1509	BUG_ON(rc);
1510
1511	falcon_start_nic_stats(efx);
1512
1513	/* Synchronise efx->link_state with the kernel */
1514	ef4_link_status_changed(efx);
1515
1516	return 0;
1517}
1518
1519/* TX flow control may automatically turn itself off if the link
1520 * partner (intermittently) stops responding to pause frames. There
1521 * isn't any indication that this has happened, so the best we do is
1522 * leave it up to the user to spot this and fix it by cycling transmit
1523 * flow control on this end.
1524 */
1525
1526static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
1527{
1528	/* Schedule a reset to recover */
1529	ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1530}
1531
1532static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
1533{
1534	/* Recover by resetting the EM block */
1535	falcon_stop_nic_stats(efx);
1536	falcon_drain_tx_fifo(efx);
1537	falcon_reconfigure_xmac(efx);
1538	falcon_start_nic_stats(efx);
1539}
1540
1541/**************************************************************************
1542 *
1543 * PHY access via GMII
1544 *
1545 **************************************************************************
1546 */
1547
1548/* Wait for GMII access to complete */
1549static int falcon_gmii_wait(struct ef4_nic *efx)
1550{
1551	ef4_oword_t md_stat;
1552	int count;
1553
1554	/* wait up to 50ms - taken max from datasheet */
1555	for (count = 0; count < 5000; count++) {
1556		ef4_reado(efx, &md_stat, FR_AB_MD_STAT);
1557		if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1558			if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1559			    EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1560				netif_err(efx, hw, efx->net_dev,
1561					  "error from GMII access "
1562					  EF4_OWORD_FMT"\n",
1563					  EF4_OWORD_VAL(md_stat));
1564				return -EIO;
1565			}
1566			return 0;
1567		}
1568		udelay(10);
1569	}
1570	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1571	return -ETIMEDOUT;
1572}
1573
1574/* Write an MDIO register of a PHY connected to Falcon. */
1575static int falcon_mdio_write(struct net_device *net_dev,
1576			     int prtad, int devad, u16 addr, u16 value)
1577{
1578	struct ef4_nic *efx = netdev_priv(net_dev);
1579	struct falcon_nic_data *nic_data = efx->nic_data;
1580	ef4_oword_t reg;
1581	int rc;
1582
1583	netif_vdbg(efx, hw, efx->net_dev,
1584		   "writing MDIO %d register %d.%d with 0x%04x\n",
1585		    prtad, devad, addr, value);
1586
1587	mutex_lock(&nic_data->mdio_lock);
1588
1589	/* Check MDIO not currently being accessed */
1590	rc = falcon_gmii_wait(efx);
1591	if (rc)
1592		goto out;
1593
1594	/* Write the address/ID register */
1595	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1596	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1597
1598	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1599			     FRF_AB_MD_DEV_ADR, devad);
1600	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1601
1602	/* Write data */
1603	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1604	ef4_writeo(efx, &reg, FR_AB_MD_TXD);
1605
1606	EF4_POPULATE_OWORD_2(reg,
1607			     FRF_AB_MD_WRC, 1,
1608			     FRF_AB_MD_GC, 0);
1609	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1610
1611	/* Wait for data to be written */
1612	rc = falcon_gmii_wait(efx);
1613	if (rc) {
1614		/* Abort the write operation */
1615		EF4_POPULATE_OWORD_2(reg,
1616				     FRF_AB_MD_WRC, 0,
1617				     FRF_AB_MD_GC, 1);
1618		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1619		udelay(10);
1620	}
1621
1622out:
1623	mutex_unlock(&nic_data->mdio_lock);
1624	return rc;
1625}
1626
1627/* Read an MDIO register of a PHY connected to Falcon. */
1628static int falcon_mdio_read(struct net_device *net_dev,
1629			    int prtad, int devad, u16 addr)
1630{
1631	struct ef4_nic *efx = netdev_priv(net_dev);
1632	struct falcon_nic_data *nic_data = efx->nic_data;
1633	ef4_oword_t reg;
1634	int rc;
1635
1636	mutex_lock(&nic_data->mdio_lock);
1637
1638	/* Check MDIO not currently being accessed */
1639	rc = falcon_gmii_wait(efx);
1640	if (rc)
1641		goto out;
1642
1643	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1644	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1645
1646	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1647			     FRF_AB_MD_DEV_ADR, devad);
1648	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1649
1650	/* Request data to be read */
1651	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1652	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1653
1654	/* Wait for data to become available */
1655	rc = falcon_gmii_wait(efx);
1656	if (rc == 0) {
1657		ef4_reado(efx, &reg, FR_AB_MD_RXD);
1658		rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1659		netif_vdbg(efx, hw, efx->net_dev,
1660			   "read from MDIO %d register %d.%d, got %04x\n",
1661			   prtad, devad, addr, rc);
1662	} else {
1663		/* Abort the read operation */
1664		EF4_POPULATE_OWORD_2(reg,
1665				     FRF_AB_MD_RIC, 0,
1666				     FRF_AB_MD_GC, 1);
1667		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1668
1669		netif_dbg(efx, hw, efx->net_dev,
1670			  "read from MDIO %d register %d.%d, got error %d\n",
1671			  prtad, devad, addr, rc);
1672	}
1673
1674out:
1675	mutex_unlock(&nic_data->mdio_lock);
1676	return rc;
1677}
1678
1679/* This call is responsible for hooking in the MAC and PHY operations */
1680static int falcon_probe_port(struct ef4_nic *efx)
1681{
1682	struct falcon_nic_data *nic_data = efx->nic_data;
1683	int rc;
1684
1685	switch (efx->phy_type) {
1686	case PHY_TYPE_SFX7101:
1687		efx->phy_op = &falcon_sfx7101_phy_ops;
1688		break;
1689	case PHY_TYPE_QT2022C2:
1690	case PHY_TYPE_QT2025C:
1691		efx->phy_op = &falcon_qt202x_phy_ops;
1692		break;
1693	case PHY_TYPE_TXC43128:
1694		efx->phy_op = &falcon_txc_phy_ops;
1695		break;
1696	default:
1697		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1698			  efx->phy_type);
1699		return -ENODEV;
1700	}
1701
1702	/* Fill out MDIO structure and loopback modes */
1703	mutex_init(&nic_data->mdio_lock);
1704	efx->mdio.mdio_read = falcon_mdio_read;
1705	efx->mdio.mdio_write = falcon_mdio_write;
1706	rc = efx->phy_op->probe(efx);
1707	if (rc != 0)
1708		return rc;
1709
1710	/* Initial assumption */
1711	efx->link_state.speed = 10000;
1712	efx->link_state.fd = true;
1713
1714	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1715	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1716		efx->wanted_fc = EF4_FC_RX | EF4_FC_TX;
1717	else
1718		efx->wanted_fc = EF4_FC_RX;
1719	if (efx->mdio.mmds & MDIO_DEVS_AN)
1720		efx->wanted_fc |= EF4_FC_AUTO;
1721
1722	/* Allocate buffer for stats */
1723	rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer,
1724				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1725	if (rc)
1726		return rc;
1727	netif_dbg(efx, probe, efx->net_dev,
1728		  "stats buffer at %llx (virt %p phys %llx)\n",
1729		  (u64)efx->stats_buffer.dma_addr,
1730		  efx->stats_buffer.addr,
1731		  (u64)virt_to_phys(efx->stats_buffer.addr));
1732
1733	return 0;
1734}
1735
1736static void falcon_remove_port(struct ef4_nic *efx)
1737{
1738	efx->phy_op->remove(efx);
1739	ef4_nic_free_buffer(efx, &efx->stats_buffer);
1740}
1741
1742/* Global events are basically PHY events */
1743static bool
1744falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event)
1745{
1746	struct ef4_nic *efx = channel->efx;
1747	struct falcon_nic_data *nic_data = efx->nic_data;
1748
1749	if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1750	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1751	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1752		/* Ignored */
1753		return true;
1754
1755	if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) &&
1756	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1757		nic_data->xmac_poll_required = true;
1758		return true;
1759	}
1760
1761	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ?
1762	    EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1763	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1764		netif_err(efx, rx_err, efx->net_dev,
1765			  "channel %d seen global RX_RESET event. Resetting.\n",
1766			  channel->channel);
1767
1768		atomic_inc(&efx->rx_reset);
1769		ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ?
1770				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1771		return true;
1772	}
1773
1774	return false;
1775}
1776
1777/**************************************************************************
1778 *
1779 * Falcon test code
1780 *
1781 **************************************************************************/
1782
1783static int
1784falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out)
1785{
1786	struct falcon_nic_data *nic_data = efx->nic_data;
1787	struct falcon_nvconfig *nvconfig;
1788	struct falcon_spi_device *spi;
1789	void *region;
1790	int rc, magic_num, struct_ver;
1791	__le16 *word, *limit;
1792	u32 csum;
1793
1794	if (falcon_spi_present(&nic_data->spi_flash))
1795		spi = &nic_data->spi_flash;
1796	else if (falcon_spi_present(&nic_data->spi_eeprom))
1797		spi = &nic_data->spi_eeprom;
1798	else
1799		return -EINVAL;
1800
1801	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1802	if (!region)
1803		return -ENOMEM;
1804	nvconfig = region + FALCON_NVCONFIG_OFFSET;
1805
1806	mutex_lock(&nic_data->spi_lock);
1807	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1808	mutex_unlock(&nic_data->spi_lock);
1809	if (rc) {
1810		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1811			  falcon_spi_present(&nic_data->spi_flash) ?
1812			  "flash" : "EEPROM");
1813		rc = -EIO;
1814		goto out;
1815	}
1816
1817	magic_num = le16_to_cpu(nvconfig->board_magic_num);
1818	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1819
1820	rc = -EINVAL;
1821	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1822		netif_err(efx, hw, efx->net_dev,
1823			  "NVRAM bad magic 0x%x\n", magic_num);
1824		goto out;
1825	}
1826	if (struct_ver < 2) {
1827		netif_err(efx, hw, efx->net_dev,
1828			  "NVRAM has ancient version 0x%x\n", struct_ver);
1829		goto out;
1830	} else if (struct_ver < 4) {
1831		word = &nvconfig->board_magic_num;
1832		limit = (__le16 *) (nvconfig + 1);
1833	} else {
1834		word = region;
1835		limit = region + FALCON_NVCONFIG_END;
1836	}
1837	for (csum = 0; word < limit; ++word)
1838		csum += le16_to_cpu(*word);
1839
1840	if (~csum & 0xffff) {
1841		netif_err(efx, hw, efx->net_dev,
1842			  "NVRAM has incorrect checksum\n");
1843		goto out;
1844	}
1845
1846	rc = 0;
1847	if (nvconfig_out)
1848		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1849
1850 out:
1851	kfree(region);
1852	return rc;
1853}
1854
1855static int falcon_test_nvram(struct ef4_nic *efx)
1856{
1857	return falcon_read_nvram(efx, NULL);
1858}
1859
1860static const struct ef4_farch_register_test falcon_b0_register_tests[] = {
1861	{ FR_AZ_ADR_REGION,
1862	  EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1863	{ FR_AZ_RX_CFG,
1864	  EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1865	{ FR_AZ_TX_CFG,
1866	  EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1867	{ FR_AZ_TX_RESERVED,
1868	  EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1869	{ FR_AB_MAC_CTRL,
1870	  EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1871	{ FR_AZ_SRM_TX_DC_CFG,
1872	  EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1873	{ FR_AZ_RX_DC_CFG,
1874	  EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1875	{ FR_AZ_RX_DC_PF_WM,
1876	  EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1877	{ FR_BZ_DP_CTRL,
1878	  EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1879	{ FR_AB_GM_CFG2,
1880	  EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1881	{ FR_AB_GMF_CFG0,
1882	  EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1883	{ FR_AB_XM_GLB_CFG,
1884	  EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1885	{ FR_AB_XM_TX_CFG,
1886	  EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1887	{ FR_AB_XM_RX_CFG,
1888	  EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1889	{ FR_AB_XM_RX_PARAM,
1890	  EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1891	{ FR_AB_XM_FC,
1892	  EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1893	{ FR_AB_XM_ADR_LO,
1894	  EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1895	{ FR_AB_XX_SD_CTL,
1896	  EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1897};
1898
1899static int
1900falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests)
1901{
1902	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1903	int rc, rc2;
1904
1905	mutex_lock(&efx->mac_lock);
1906	if (efx->loopback_modes) {
1907		/* We need the 312 clock from the PHY to test the XMAC
1908		 * registers, so move into XGMII loopback if available */
1909		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1910			efx->loopback_mode = LOOPBACK_XGMII;
1911		else
1912			efx->loopback_mode = __ffs(efx->loopback_modes);
1913	}
1914	__ef4_reconfigure_port(efx);
1915	mutex_unlock(&efx->mac_lock);
1916
1917	ef4_reset_down(efx, reset_method);
1918
1919	tests->registers =
1920		ef4_farch_test_registers(efx, falcon_b0_register_tests,
1921					 ARRAY_SIZE(falcon_b0_register_tests))
1922		? -1 : 1;
1923
1924	rc = falcon_reset_hw(efx, reset_method);
1925	rc2 = ef4_reset_up(efx, reset_method, rc == 0);
1926	return rc ? rc : rc2;
1927}
1928
1929/**************************************************************************
1930 *
1931 * Device reset
1932 *
1933 **************************************************************************
1934 */
1935
1936static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1937{
1938	switch (reason) {
1939	case RESET_TYPE_RX_RECOVERY:
1940	case RESET_TYPE_DMA_ERROR:
1941	case RESET_TYPE_TX_SKIP:
1942		/* These can occasionally occur due to hardware bugs.
1943		 * We try to reset without disrupting the link.
1944		 */
1945		return RESET_TYPE_INVISIBLE;
1946	default:
1947		return RESET_TYPE_ALL;
1948	}
1949}
1950
1951static int falcon_map_reset_flags(u32 *flags)
1952{
1953	enum {
1954		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1955					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1956		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1957		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1958	};
1959
1960	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1961		*flags &= ~FALCON_RESET_WORLD;
1962		return RESET_TYPE_WORLD;
1963	}
1964
1965	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1966		*flags &= ~FALCON_RESET_ALL;
1967		return RESET_TYPE_ALL;
1968	}
1969
1970	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1971		*flags &= ~FALCON_RESET_INVISIBLE;
1972		return RESET_TYPE_INVISIBLE;
1973	}
1974
1975	return -EINVAL;
1976}
1977
1978/* Resets NIC to known state.  This routine must be called in process
1979 * context and is allowed to sleep. */
1980static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
1981{
1982	struct falcon_nic_data *nic_data = efx->nic_data;
1983	ef4_oword_t glb_ctl_reg_ker;
1984	int rc;
1985
1986	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1987		  RESET_TYPE(method));
1988
1989	/* Initiate device reset */
1990	if (method == RESET_TYPE_WORLD) {
1991		rc = pci_save_state(efx->pci_dev);
1992		if (rc) {
1993			netif_err(efx, drv, efx->net_dev,
1994				  "failed to backup PCI state of primary "
1995				  "function prior to hardware reset\n");
1996			goto fail1;
1997		}
1998		if (ef4_nic_is_dual_func(efx)) {
1999			rc = pci_save_state(nic_data->pci_dev2);
2000			if (rc) {
2001				netif_err(efx, drv, efx->net_dev,
2002					  "failed to backup PCI state of "
2003					  "secondary function prior to "
2004					  "hardware reset\n");
2005				goto fail2;
2006			}
2007		}
2008
2009		EF4_POPULATE_OWORD_2(glb_ctl_reg_ker,
2010				     FRF_AB_EXT_PHY_RST_DUR,
2011				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2012				     FRF_AB_SWRST, 1);
2013	} else {
2014		EF4_POPULATE_OWORD_7(glb_ctl_reg_ker,
2015				     /* exclude PHY from "invisible" reset */
2016				     FRF_AB_EXT_PHY_RST_CTL,
2017				     method == RESET_TYPE_INVISIBLE,
2018				     /* exclude EEPROM/flash and PCIe */
2019				     FRF_AB_PCIE_CORE_RST_CTL, 1,
2020				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2021				     FRF_AB_PCIE_SD_RST_CTL, 1,
2022				     FRF_AB_EE_RST_CTL, 1,
2023				     FRF_AB_EXT_PHY_RST_DUR,
2024				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2025				     FRF_AB_SWRST, 1);
2026	}
2027	ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2028
2029	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
2030	schedule_timeout_uninterruptible(HZ / 20);
2031
2032	/* Restore PCI configuration if needed */
2033	if (method == RESET_TYPE_WORLD) {
2034		if (ef4_nic_is_dual_func(efx))
2035			pci_restore_state(nic_data->pci_dev2);
2036		pci_restore_state(efx->pci_dev);
2037		netif_dbg(efx, drv, efx->net_dev,
2038			  "successfully restored PCI config\n");
2039	}
2040
2041	/* Assert that reset complete */
2042	ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2043	if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2044		rc = -ETIMEDOUT;
2045		netif_err(efx, hw, efx->net_dev,
2046			  "timed out waiting for hardware reset\n");
2047		goto fail3;
2048	}
2049	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
2050
2051	return 0;
2052
2053	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
2054fail2:
2055	pci_restore_state(efx->pci_dev);
2056fail1:
2057fail3:
2058	return rc;
2059}
2060
2061static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
2062{
2063	struct falcon_nic_data *nic_data = efx->nic_data;
2064	int rc;
2065
2066	mutex_lock(&nic_data->spi_lock);
2067	rc = __falcon_reset_hw(efx, method);
2068	mutex_unlock(&nic_data->spi_lock);
2069
2070	return rc;
2071}
2072
2073static void falcon_monitor(struct ef4_nic *efx)
2074{
2075	bool link_changed;
2076	int rc;
2077
2078	BUG_ON(!mutex_is_locked(&efx->mac_lock));
2079
2080	rc = falcon_board(efx)->type->monitor(efx);
2081	if (rc) {
2082		netif_err(efx, hw, efx->net_dev,
2083			  "Board sensor %s; shutting down PHY\n",
2084			  (rc == -ERANGE) ? "reported fault" : "failed");
2085		efx->phy_mode |= PHY_MODE_LOW_POWER;
2086		rc = __ef4_reconfigure_port(efx);
2087		WARN_ON(rc);
2088	}
2089
2090	if (LOOPBACK_INTERNAL(efx))
2091		link_changed = falcon_loopback_link_poll(efx);
2092	else
2093		link_changed = efx->phy_op->poll(efx);
2094
2095	if (link_changed) {
2096		falcon_stop_nic_stats(efx);
2097		falcon_deconfigure_mac_wrapper(efx);
2098
2099		falcon_reset_macs(efx);
2100		rc = falcon_reconfigure_xmac(efx);
2101		BUG_ON(rc);
2102
2103		falcon_start_nic_stats(efx);
2104
2105		ef4_link_status_changed(efx);
2106	}
2107
2108	falcon_poll_xmac(efx);
2109}
2110
2111/* Zeroes out the SRAM contents.  This routine must be called in
2112 * process context and is allowed to sleep.
2113 */
2114static int falcon_reset_sram(struct ef4_nic *efx)
2115{
2116	ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2117	int count;
2118
2119	/* Set the SRAM wake/sleep GPIO appropriately. */
2120	ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2121	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2122	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2123	ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2124
2125	/* Initiate SRAM reset */
2126	EF4_POPULATE_OWORD_2(srm_cfg_reg_ker,
2127			     FRF_AZ_SRM_INIT_EN, 1,
2128			     FRF_AZ_SRM_NB_SZ, 0);
2129	ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2130
2131	/* Wait for SRAM reset to complete */
2132	count = 0;
2133	do {
2134		netif_dbg(efx, hw, efx->net_dev,
2135			  "waiting for SRAM reset (attempt %d)...\n", count);
2136
2137		/* SRAM reset is slow; expect around 16ms */
2138		schedule_timeout_uninterruptible(HZ / 50);
2139
2140		/* Check for reset complete */
2141		ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2142		if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2143			netif_dbg(efx, hw, efx->net_dev,
2144				  "SRAM reset complete\n");
2145
2146			return 0;
2147		}
2148	} while (++count < 20);	/* wait up to 0.4 sec */
2149
2150	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
2151	return -ETIMEDOUT;
2152}
2153
2154static void falcon_spi_device_init(struct ef4_nic *efx,
2155				  struct falcon_spi_device *spi_device,
2156				  unsigned int device_id, u32 device_type)
2157{
2158	if (device_type != 0) {
2159		spi_device->device_id = device_id;
2160		spi_device->size =
2161			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2162		spi_device->addr_len =
2163			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2164		spi_device->munge_address = (spi_device->size == 1 << 9 &&
2165					     spi_device->addr_len == 1);
2166		spi_device->erase_command =
2167			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2168		spi_device->erase_size =
2169			1 << SPI_DEV_TYPE_FIELD(device_type,
2170						SPI_DEV_TYPE_ERASE_SIZE);
2171		spi_device->block_size =
2172			1 << SPI_DEV_TYPE_FIELD(device_type,
2173						SPI_DEV_TYPE_BLOCK_SIZE);
2174	} else {
2175		spi_device->size = 0;
2176	}
2177}
2178
2179/* Extract non-volatile configuration */
2180static int falcon_probe_nvconfig(struct ef4_nic *efx)
2181{
2182	struct falcon_nic_data *nic_data = efx->nic_data;
2183	struct falcon_nvconfig *nvconfig;
2184	int rc;
2185
2186	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2187	if (!nvconfig)
2188		return -ENOMEM;
2189
2190	rc = falcon_read_nvram(efx, nvconfig);
2191	if (rc)
2192		goto out;
2193
2194	efx->phy_type = nvconfig->board_v2.port0_phy_type;
2195	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
2196
2197	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2198		falcon_spi_device_init(
2199			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2200			le32_to_cpu(nvconfig->board_v3
2201				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
2202		falcon_spi_device_init(
2203			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2204			le32_to_cpu(nvconfig->board_v3
2205				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
2206	}
2207
2208	/* Read the MAC addresses */
2209	ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2210
2211	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2212		  efx->phy_type, efx->mdio.prtad);
2213
2214	rc = falcon_probe_board(efx,
2215				le16_to_cpu(nvconfig->board_v2.board_revision));
2216out:
2217	kfree(nvconfig);
2218	return rc;
2219}
2220
2221static int falcon_dimension_resources(struct ef4_nic *efx)
2222{
2223	efx->rx_dc_base = 0x20000;
2224	efx->tx_dc_base = 0x26000;
2225	return 0;
2226}
2227
2228/* Probe all SPI devices on the NIC */
2229static void falcon_probe_spi_devices(struct ef4_nic *efx)
2230{
2231	struct falcon_nic_data *nic_data = efx->nic_data;
2232	ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2233	int boot_dev;
2234
2235	ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2236	ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2237	ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2238
2239	if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2240		boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2241			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2242		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
2243			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
2244			  "flash" : "EEPROM");
2245	} else {
2246		/* Disable VPD and set clock dividers to safe
2247		 * values for initial programming. */
2248		boot_dev = -1;
2249		netif_dbg(efx, probe, efx->net_dev,
2250			  "Booted from internal ASIC settings;"
2251			  " setting SPI config\n");
2252		EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2253				     /* 125 MHz / 7 ~= 20 MHz */
2254				     FRF_AB_EE_SF_CLOCK_DIV, 7,
2255				     /* 125 MHz / 63 ~= 2 MHz */
2256				     FRF_AB_EE_EE_CLOCK_DIV, 63);
2257		ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2258	}
2259
2260	mutex_init(&nic_data->spi_lock);
2261
2262	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2263		falcon_spi_device_init(efx, &nic_data->spi_flash,
2264				       FFE_AB_SPI_DEVICE_FLASH,
2265				       default_flash_type);
2266	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2267		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
2268				       FFE_AB_SPI_DEVICE_EEPROM,
2269				       large_eeprom_type);
2270}
2271
2272static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx)
2273{
2274	return 0x20000;
2275}
2276
2277static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx)
2278{
2279	/* Map everything up to and including the RSS indirection table.
2280	 * The PCI core takes care of mapping the MSI-X tables.
2281	 */
2282	return FR_BZ_RX_INDIRECTION_TBL +
2283		FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2284}
2285
2286static int falcon_probe_nic(struct ef4_nic *efx)
2287{
2288	struct falcon_nic_data *nic_data;
2289	struct falcon_board *board;
2290	int rc;
2291
2292	efx->primary = efx; /* only one usable function per controller */
2293
2294	/* Allocate storage for hardware specific data */
2295	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2296	if (!nic_data)
2297		return -ENOMEM;
2298	efx->nic_data = nic_data;
2299	nic_data->efx = efx;
2300
2301	rc = -ENODEV;
2302
2303	if (ef4_farch_fpga_ver(efx) != 0) {
2304		netif_err(efx, probe, efx->net_dev,
2305			  "Falcon FPGA not supported\n");
2306		goto fail1;
2307	}
2308
2309	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2310		ef4_oword_t nic_stat;
2311		struct pci_dev *dev;
2312		u8 pci_rev = efx->pci_dev->revision;
2313
2314		if ((pci_rev == 0xff) || (pci_rev == 0)) {
2315			netif_err(efx, probe, efx->net_dev,
2316				  "Falcon rev A0 not supported\n");
2317			goto fail1;
2318		}
2319		ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2320		if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2321			netif_err(efx, probe, efx->net_dev,
2322				  "Falcon rev A1 1G not supported\n");
2323			goto fail1;
2324		}
2325		if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2326			netif_err(efx, probe, efx->net_dev,
2327				  "Falcon rev A1 PCI-X not supported\n");
2328			goto fail1;
2329		}
2330
2331		dev = pci_dev_get(efx->pci_dev);
2332		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2333					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2334					     dev))) {
2335			if (dev->bus == efx->pci_dev->bus &&
2336			    dev->devfn == efx->pci_dev->devfn + 1) {
2337				nic_data->pci_dev2 = dev;
2338				break;
2339			}
2340		}
2341		if (!nic_data->pci_dev2) {
2342			netif_err(efx, probe, efx->net_dev,
2343				  "failed to find secondary function\n");
2344			rc = -ENODEV;
2345			goto fail2;
2346		}
2347	}
2348
2349	/* Now we can reset the NIC */
2350	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2351	if (rc) {
2352		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2353		goto fail3;
2354	}
2355
2356	/* Allocate memory for INT_KER */
2357	rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t),
2358				  GFP_KERNEL);
2359	if (rc)
2360		goto fail4;
2361	BUG_ON(efx->irq_status.dma_addr & 0x0f);
2362
2363	netif_dbg(efx, probe, efx->net_dev,
2364		  "INT_KER at %llx (virt %p phys %llx)\n",
2365		  (u64)efx->irq_status.dma_addr,
2366		  efx->irq_status.addr,
2367		  (u64)virt_to_phys(efx->irq_status.addr));
2368
2369	falcon_probe_spi_devices(efx);
2370
2371	/* Read in the non-volatile configuration */
2372	rc = falcon_probe_nvconfig(efx);
2373	if (rc) {
2374		if (rc == -EINVAL)
2375			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2376		goto fail5;
2377	}
2378
2379	efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 :
2380			     EF4_MAX_CHANNELS);
2381	efx->max_tx_channels = efx->max_channels;
2382	efx->timer_quantum_ns = 4968; /* 621 cycles */
2383	efx->timer_max_ns = efx->type->timer_period_max *
2384			    efx->timer_quantum_ns;
2385
2386	/* Initialise I2C adapter */
2387	board = falcon_board(efx);
2388	board->i2c_adap.owner = THIS_MODULE;
2389	board->i2c_data = falcon_i2c_bit_operations;
2390	board->i2c_data.data = efx;
2391	board->i2c_adap.algo_data = &board->i2c_data;
2392	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2393	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2394		sizeof(board->i2c_adap.name));
2395	rc = i2c_bit_add_bus(&board->i2c_adap);
2396	if (rc)
2397		goto fail5;
2398
2399	rc = falcon_board(efx)->type->init(efx);
2400	if (rc) {
2401		netif_err(efx, probe, efx->net_dev,
2402			  "failed to initialise board\n");
2403		goto fail6;
2404	}
2405
2406	nic_data->stats_disable_count = 1;
2407	timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
2408
2409	return 0;
2410
2411 fail6:
2412	i2c_del_adapter(&board->i2c_adap);
2413	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2414 fail5:
2415	ef4_nic_free_buffer(efx, &efx->irq_status);
2416 fail4:
2417 fail3:
2418	if (nic_data->pci_dev2) {
2419		pci_dev_put(nic_data->pci_dev2);
2420		nic_data->pci_dev2 = NULL;
2421	}
2422 fail2:
2423 fail1:
2424	kfree(efx->nic_data);
2425	return rc;
2426}
2427
2428static void falcon_init_rx_cfg(struct ef4_nic *efx)
2429{
2430	/* RX control FIFO thresholds (32 entries) */
2431	const unsigned ctrl_xon_thr = 20;
2432	const unsigned ctrl_xoff_thr = 25;
2433	ef4_oword_t reg;
2434
2435	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
2436	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2437		/* Data FIFO size is 5.5K.  The RX DMA engine only
2438		 * supports scattering for user-mode queues, but will
2439		 * split DMA writes at intervals of RX_USR_BUF_SIZE
2440		 * (32-byte units) even for kernel-mode queues.  We
2441		 * set it to be so large that that never happens.
2442		 */
2443		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2444		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2445				    (3 * 4096) >> 5);
2446		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2447		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2448		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2449		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2450	} else {
2451		/* Data FIFO size is 80K; register fields moved */
2452		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2453		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2454				    EF4_RX_USR_BUF_SIZE >> 5);
2455		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
2456		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2457		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2458		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2459		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2460		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2461
2462		/* Enable hash insertion. This is broken for the
2463		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2464		 * IPv4 hashes. */
2465		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2466		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2467		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2468	}
2469	/* Always enable XOFF signal from RX FIFO.  We enable
2470	 * or disable transmission of pause frames at the MAC. */
2471	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2472	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
2473}
2474
2475/* This call performs hardware-specific global initialisation, such as
2476 * defining the descriptor cache sizes and number of RSS channels.
2477 * It does not set up any buffers, descriptor rings or event queues.
2478 */
2479static int falcon_init_nic(struct ef4_nic *efx)
2480{
2481	ef4_oword_t temp;
2482	int rc;
2483
2484	/* Use on-chip SRAM */
2485	ef4_reado(efx, &temp, FR_AB_NIC_STAT);
2486	EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2487	ef4_writeo(efx, &temp, FR_AB_NIC_STAT);
2488
2489	rc = falcon_reset_sram(efx);
2490	if (rc)
2491		return rc;
2492
2493	/* Clear the parity enables on the TX data fifos as
2494	 * they produce false parity errors because of timing issues
2495	 */
2496	if (EF4_WORKAROUND_5129(efx)) {
2497		ef4_reado(efx, &temp, FR_AZ_CSR_SPARE);
2498		EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2499		ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2500	}
2501
2502	if (EF4_WORKAROUND_7244(efx)) {
2503		ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2504		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2505		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2506		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2507		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2508		ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2509	}
2510
2511	/* XXX This is documented only for Falcon A0/A1 */
2512	/* Setup RX.  Wait for descriptor is broken and must
2513	 * be disabled.  RXDP recovery shouldn't be needed, but is.
2514	 */
2515	ef4_reado(efx, &temp, FR_AA_RX_SELF_RST);
2516	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2517	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2518	if (EF4_WORKAROUND_5583(efx))
2519		EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2520	ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2521
2522	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2523	 * descriptors (which is bad).
2524	 */
2525	ef4_reado(efx, &temp, FR_AZ_TX_CFG);
2526	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2527	ef4_writeo(efx, &temp, FR_AZ_TX_CFG);
2528
2529	falcon_init_rx_cfg(efx);
2530
2531	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2532		falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
2533
2534		/* Set destination of both TX and RX Flush events */
2535		EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2536		ef4_writeo(efx, &temp, FR_BZ_DP_CTRL);
2537	}
2538
2539	ef4_farch_init_common(efx);
2540
2541	return 0;
2542}
2543
2544static void falcon_remove_nic(struct ef4_nic *efx)
2545{
2546	struct falcon_nic_data *nic_data = efx->nic_data;
2547	struct falcon_board *board = falcon_board(efx);
2548
2549	board->type->fini(efx);
2550
2551	/* Remove I2C adapter and clear it in preparation for a retry */
2552	i2c_del_adapter(&board->i2c_adap);
2553	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2554
2555	ef4_nic_free_buffer(efx, &efx->irq_status);
2556
2557	__falcon_reset_hw(efx, RESET_TYPE_ALL);
2558
2559	/* Release the second function after the reset */
2560	if (nic_data->pci_dev2) {
2561		pci_dev_put(nic_data->pci_dev2);
2562		nic_data->pci_dev2 = NULL;
2563	}
2564
2565	/* Tear down the private nic state */
2566	kfree(efx->nic_data);
2567	efx->nic_data = NULL;
2568}
2569
2570static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
2571{
2572	return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2573				      falcon_stat_mask, names);
2574}
2575
2576static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats,
2577				      struct rtnl_link_stats64 *core_stats)
2578{
2579	struct falcon_nic_data *nic_data = efx->nic_data;
2580	u64 *stats = nic_data->stats;
2581	ef4_oword_t cnt;
2582
2583	if (!nic_data->stats_disable_count) {
2584		ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2585		stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2586			EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2587
2588		if (nic_data->stats_pending &&
2589		    FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2590			nic_data->stats_pending = false;
2591			rmb(); /* read the done flag before the stats */
2592			ef4_nic_update_stats(
2593				falcon_stat_desc, FALCON_STAT_COUNT,
2594				falcon_stat_mask,
2595				stats, efx->stats_buffer.addr, true);
2596		}
2597
2598		/* Update derived statistic */
2599		ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
2600				     stats[FALCON_STAT_rx_bytes] -
2601				     stats[FALCON_STAT_rx_good_bytes] -
2602				     stats[FALCON_STAT_rx_control] * 64);
2603		ef4_update_sw_stats(efx, stats);
2604	}
2605
2606	if (full_stats)
2607		memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
2608
2609	if (core_stats) {
2610		core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2611		core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2612		core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2613		core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2614		core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
2615					 stats[GENERIC_STAT_rx_nodesc_trunc] +
2616					 stats[GENERIC_STAT_rx_noskb_drops];
2617		core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2618		core_stats->rx_length_errors =
2619			stats[FALCON_STAT_rx_gtjumbo] +
2620			stats[FALCON_STAT_rx_length_error];
2621		core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2622		core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2623		core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2624
2625		core_stats->rx_errors = (core_stats->rx_length_errors +
2626					 core_stats->rx_crc_errors +
2627					 core_stats->rx_frame_errors +
2628					 stats[FALCON_STAT_rx_symbol_error]);
2629	}
2630
2631	return FALCON_STAT_COUNT;
2632}
2633
2634void falcon_start_nic_stats(struct ef4_nic *efx)
2635{
2636	struct falcon_nic_data *nic_data = efx->nic_data;
2637
2638	spin_lock_bh(&efx->stats_lock);
2639	if (--nic_data->stats_disable_count == 0)
2640		falcon_stats_request(efx);
2641	spin_unlock_bh(&efx->stats_lock);
2642}
2643
2644/* We don't acutally pull stats on falcon. Wait 10ms so that
2645 * they arrive when we call this just after start_stats
2646 */
2647static void falcon_pull_nic_stats(struct ef4_nic *efx)
2648{
2649	msleep(10);
2650}
2651
2652void falcon_stop_nic_stats(struct ef4_nic *efx)
2653{
2654	struct falcon_nic_data *nic_data = efx->nic_data;
2655	int i;
2656
2657	might_sleep();
2658
2659	spin_lock_bh(&efx->stats_lock);
2660	++nic_data->stats_disable_count;
2661	spin_unlock_bh(&efx->stats_lock);
2662
2663	del_timer_sync(&nic_data->stats_timer);
2664
2665	/* Wait enough time for the most recent transfer to
2666	 * complete. */
2667	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2668		if (FALCON_XMAC_STATS_DMA_FLAG(efx))
2669			break;
2670		msleep(1);
2671	}
2672
2673	spin_lock_bh(&efx->stats_lock);
2674	falcon_stats_complete(efx);
2675	spin_unlock_bh(&efx->stats_lock);
2676}
2677
2678static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
2679{
2680	falcon_board(efx)->type->set_id_led(efx, mode);
2681}
2682
2683/**************************************************************************
2684 *
2685 * Wake on LAN
2686 *
2687 **************************************************************************
2688 */
2689
2690static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol)
2691{
2692	wol->supported = 0;
2693	wol->wolopts = 0;
2694	memset(&wol->sopass, 0, sizeof(wol->sopass));
2695}
2696
2697static int falcon_set_wol(struct ef4_nic *efx, u32 type)
2698{
2699	if (type != 0)
2700		return -EINVAL;
2701	return 0;
2702}
2703
2704/**************************************************************************
2705 *
2706 * Revision-dependent attributes used by efx.c and nic.c
2707 *
2708 **************************************************************************
2709 */
2710
2711const struct ef4_nic_type falcon_a1_nic_type = {
2712	.mem_bar = EF4_MEM_BAR,
2713	.mem_map_size = falcon_a1_mem_map_size,
2714	.probe = falcon_probe_nic,
2715	.remove = falcon_remove_nic,
2716	.init = falcon_init_nic,
2717	.dimension_resources = falcon_dimension_resources,
2718	.fini = falcon_irq_ack_a1,
2719	.monitor = falcon_monitor,
2720	.map_reset_reason = falcon_map_reset_reason,
2721	.map_reset_flags = falcon_map_reset_flags,
2722	.reset = falcon_reset_hw,
2723	.probe_port = falcon_probe_port,
2724	.remove_port = falcon_remove_port,
2725	.handle_global_event = falcon_handle_global_event,
2726	.fini_dmaq = ef4_farch_fini_dmaq,
2727	.prepare_flush = falcon_prepare_flush,
2728	.finish_flush = ef4_port_dummy_op_void,
2729	.prepare_flr = ef4_port_dummy_op_void,
2730	.finish_flr = ef4_farch_finish_flr,
2731	.describe_stats = falcon_describe_nic_stats,
2732	.update_stats = falcon_update_nic_stats,
2733	.start_stats = falcon_start_nic_stats,
2734	.pull_stats = falcon_pull_nic_stats,
2735	.stop_stats = falcon_stop_nic_stats,
2736	.set_id_led = falcon_set_id_led,
2737	.push_irq_moderation = falcon_push_irq_moderation,
2738	.reconfigure_port = falcon_reconfigure_port,
2739	.prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2740	.reconfigure_mac = falcon_reconfigure_xmac,
2741	.check_mac_fault = falcon_xmac_check_fault,
2742	.get_wol = falcon_get_wol,
2743	.set_wol = falcon_set_wol,
2744	.resume_wol = ef4_port_dummy_op_void,
2745	.test_nvram = falcon_test_nvram,
2746	.irq_enable_master = ef4_farch_irq_enable_master,
2747	.irq_test_generate = ef4_farch_irq_test_generate,
2748	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2749	.irq_handle_msi = ef4_farch_msi_interrupt,
2750	.irq_handle_legacy = falcon_legacy_interrupt_a1,
2751	.tx_probe = ef4_farch_tx_probe,
2752	.tx_init = ef4_farch_tx_init,
2753	.tx_remove = ef4_farch_tx_remove,
2754	.tx_write = ef4_farch_tx_write,
2755	.tx_limit_len = ef4_farch_tx_limit_len,
2756	.rx_push_rss_config = dummy_rx_push_rss_config,
2757	.rx_probe = ef4_farch_rx_probe,
2758	.rx_init = ef4_farch_rx_init,
2759	.rx_remove = ef4_farch_rx_remove,
2760	.rx_write = ef4_farch_rx_write,
2761	.rx_defer_refill = ef4_farch_rx_defer_refill,
2762	.ev_probe = ef4_farch_ev_probe,
2763	.ev_init = ef4_farch_ev_init,
2764	.ev_fini = ef4_farch_ev_fini,
2765	.ev_remove = ef4_farch_ev_remove,
2766	.ev_process = ef4_farch_ev_process,
2767	.ev_read_ack = ef4_farch_ev_read_ack,
2768	.ev_test_generate = ef4_farch_ev_test_generate,
2769
2770	/* We don't expose the filter table on Falcon A1 as it is not
2771	 * mapped into function 0, but these implementations still
2772	 * work with a degenerate case of all tables set to size 0.
2773	 */
2774	.filter_table_probe = ef4_farch_filter_table_probe,
2775	.filter_table_restore = ef4_farch_filter_table_restore,
2776	.filter_table_remove = ef4_farch_filter_table_remove,
2777	.filter_insert = ef4_farch_filter_insert,
2778	.filter_remove_safe = ef4_farch_filter_remove_safe,
2779	.filter_get_safe = ef4_farch_filter_get_safe,
2780	.filter_clear_rx = ef4_farch_filter_clear_rx,
2781	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2782	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2783	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2784
2785#ifdef CONFIG_SFC_FALCON_MTD
2786	.mtd_probe = falcon_mtd_probe,
2787	.mtd_rename = falcon_mtd_rename,
2788	.mtd_read = falcon_mtd_read,
2789	.mtd_erase = falcon_mtd_erase,
2790	.mtd_write = falcon_mtd_write,
2791	.mtd_sync = falcon_mtd_sync,
2792#endif
2793
2794	.revision = EF4_REV_FALCON_A1,
2795	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2796	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2797	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2798	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2799	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2800	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2801	.rx_buffer_padding = 0x24,
2802	.can_rx_scatter = false,
2803	.max_interrupt_mode = EF4_INT_MODE_MSI,
2804	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2805	.offload_features = NETIF_F_IP_CSUM,
2806};
2807
2808const struct ef4_nic_type falcon_b0_nic_type = {
2809	.mem_bar = EF4_MEM_BAR,
2810	.mem_map_size = falcon_b0_mem_map_size,
2811	.probe = falcon_probe_nic,
2812	.remove = falcon_remove_nic,
2813	.init = falcon_init_nic,
2814	.dimension_resources = falcon_dimension_resources,
2815	.fini = ef4_port_dummy_op_void,
2816	.monitor = falcon_monitor,
2817	.map_reset_reason = falcon_map_reset_reason,
2818	.map_reset_flags = falcon_map_reset_flags,
2819	.reset = falcon_reset_hw,
2820	.probe_port = falcon_probe_port,
2821	.remove_port = falcon_remove_port,
2822	.handle_global_event = falcon_handle_global_event,
2823	.fini_dmaq = ef4_farch_fini_dmaq,
2824	.prepare_flush = falcon_prepare_flush,
2825	.finish_flush = ef4_port_dummy_op_void,
2826	.prepare_flr = ef4_port_dummy_op_void,
2827	.finish_flr = ef4_farch_finish_flr,
2828	.describe_stats = falcon_describe_nic_stats,
2829	.update_stats = falcon_update_nic_stats,
2830	.start_stats = falcon_start_nic_stats,
2831	.pull_stats = falcon_pull_nic_stats,
2832	.stop_stats = falcon_stop_nic_stats,
2833	.set_id_led = falcon_set_id_led,
2834	.push_irq_moderation = falcon_push_irq_moderation,
2835	.reconfigure_port = falcon_reconfigure_port,
2836	.prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2837	.reconfigure_mac = falcon_reconfigure_xmac,
2838	.check_mac_fault = falcon_xmac_check_fault,
2839	.get_wol = falcon_get_wol,
2840	.set_wol = falcon_set_wol,
2841	.resume_wol = ef4_port_dummy_op_void,
2842	.test_chip = falcon_b0_test_chip,
2843	.test_nvram = falcon_test_nvram,
2844	.irq_enable_master = ef4_farch_irq_enable_master,
2845	.irq_test_generate = ef4_farch_irq_test_generate,
2846	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2847	.irq_handle_msi = ef4_farch_msi_interrupt,
2848	.irq_handle_legacy = ef4_farch_legacy_interrupt,
2849	.tx_probe = ef4_farch_tx_probe,
2850	.tx_init = ef4_farch_tx_init,
2851	.tx_remove = ef4_farch_tx_remove,
2852	.tx_write = ef4_farch_tx_write,
2853	.tx_limit_len = ef4_farch_tx_limit_len,
2854	.rx_push_rss_config = falcon_b0_rx_push_rss_config,
2855	.rx_probe = ef4_farch_rx_probe,
2856	.rx_init = ef4_farch_rx_init,
2857	.rx_remove = ef4_farch_rx_remove,
2858	.rx_write = ef4_farch_rx_write,
2859	.rx_defer_refill = ef4_farch_rx_defer_refill,
2860	.ev_probe = ef4_farch_ev_probe,
2861	.ev_init = ef4_farch_ev_init,
2862	.ev_fini = ef4_farch_ev_fini,
2863	.ev_remove = ef4_farch_ev_remove,
2864	.ev_process = ef4_farch_ev_process,
2865	.ev_read_ack = ef4_farch_ev_read_ack,
2866	.ev_test_generate = ef4_farch_ev_test_generate,
2867	.filter_table_probe = ef4_farch_filter_table_probe,
2868	.filter_table_restore = ef4_farch_filter_table_restore,
2869	.filter_table_remove = ef4_farch_filter_table_remove,
2870	.filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter,
2871	.filter_insert = ef4_farch_filter_insert,
2872	.filter_remove_safe = ef4_farch_filter_remove_safe,
2873	.filter_get_safe = ef4_farch_filter_get_safe,
2874	.filter_clear_rx = ef4_farch_filter_clear_rx,
2875	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2876	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2877	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2878#ifdef CONFIG_RFS_ACCEL
2879	.filter_rfs_insert = ef4_farch_filter_rfs_insert,
2880	.filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one,
2881#endif
2882#ifdef CONFIG_SFC_FALCON_MTD
2883	.mtd_probe = falcon_mtd_probe,
2884	.mtd_rename = falcon_mtd_rename,
2885	.mtd_read = falcon_mtd_read,
2886	.mtd_erase = falcon_mtd_erase,
2887	.mtd_write = falcon_mtd_write,
2888	.mtd_sync = falcon_mtd_sync,
2889#endif
2890
2891	.revision = EF4_REV_FALCON_B0,
2892	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2893	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2894	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2895	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2896	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2897	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2898	.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2899	.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
2900	.rx_buffer_padding = 0,
2901	.can_rx_scatter = true,
2902	.max_interrupt_mode = EF4_INT_MODE_MSIX,
2903	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2904	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2905	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2906};
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2006-2013 Solarflare Communications Inc.
 
 
 
 
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/delay.h>
  10#include <linux/pci.h>
  11#include <linux/module.h>
  12#include <linux/seq_file.h>
  13#include <linux/i2c.h>
  14#include <linux/mii.h>
  15#include <linux/slab.h>
  16#include <linux/sched/signal.h>
  17
  18#include "net_driver.h"
  19#include "bitfield.h"
  20#include "efx.h"
  21#include "nic.h"
  22#include "farch_regs.h"
  23#include "io.h"
  24#include "phy.h"
  25#include "workarounds.h"
  26#include "selftest.h"
  27#include "mdio_10g.h"
  28
  29/* Hardware control for SFC4000 (aka Falcon). */
  30
  31/**************************************************************************
  32 *
  33 * NIC stats
  34 *
  35 **************************************************************************
  36 */
  37
  38#define FALCON_MAC_STATS_SIZE 0x100
  39
  40#define XgRxOctets_offset 0x0
  41#define XgRxOctets_WIDTH 48
  42#define XgRxOctetsOK_offset 0x8
  43#define XgRxOctetsOK_WIDTH 48
  44#define XgRxPkts_offset 0x10
  45#define XgRxPkts_WIDTH 32
  46#define XgRxPktsOK_offset 0x14
  47#define XgRxPktsOK_WIDTH 32
  48#define XgRxBroadcastPkts_offset 0x18
  49#define XgRxBroadcastPkts_WIDTH 32
  50#define XgRxMulticastPkts_offset 0x1C
  51#define XgRxMulticastPkts_WIDTH 32
  52#define XgRxUnicastPkts_offset 0x20
  53#define XgRxUnicastPkts_WIDTH 32
  54#define XgRxUndersizePkts_offset 0x24
  55#define XgRxUndersizePkts_WIDTH 32
  56#define XgRxOversizePkts_offset 0x28
  57#define XgRxOversizePkts_WIDTH 32
  58#define XgRxJabberPkts_offset 0x2C
  59#define XgRxJabberPkts_WIDTH 32
  60#define XgRxUndersizeFCSerrorPkts_offset 0x30
  61#define XgRxUndersizeFCSerrorPkts_WIDTH 32
  62#define XgRxDropEvents_offset 0x34
  63#define XgRxDropEvents_WIDTH 32
  64#define XgRxFCSerrorPkts_offset 0x38
  65#define XgRxFCSerrorPkts_WIDTH 32
  66#define XgRxAlignError_offset 0x3C
  67#define XgRxAlignError_WIDTH 32
  68#define XgRxSymbolError_offset 0x40
  69#define XgRxSymbolError_WIDTH 32
  70#define XgRxInternalMACError_offset 0x44
  71#define XgRxInternalMACError_WIDTH 32
  72#define XgRxControlPkts_offset 0x48
  73#define XgRxControlPkts_WIDTH 32
  74#define XgRxPausePkts_offset 0x4C
  75#define XgRxPausePkts_WIDTH 32
  76#define XgRxPkts64Octets_offset 0x50
  77#define XgRxPkts64Octets_WIDTH 32
  78#define XgRxPkts65to127Octets_offset 0x54
  79#define XgRxPkts65to127Octets_WIDTH 32
  80#define XgRxPkts128to255Octets_offset 0x58
  81#define XgRxPkts128to255Octets_WIDTH 32
  82#define XgRxPkts256to511Octets_offset 0x5C
  83#define XgRxPkts256to511Octets_WIDTH 32
  84#define XgRxPkts512to1023Octets_offset 0x60
  85#define XgRxPkts512to1023Octets_WIDTH 32
  86#define XgRxPkts1024to15xxOctets_offset 0x64
  87#define XgRxPkts1024to15xxOctets_WIDTH 32
  88#define XgRxPkts15xxtoMaxOctets_offset 0x68
  89#define XgRxPkts15xxtoMaxOctets_WIDTH 32
  90#define XgRxLengthError_offset 0x6C
  91#define XgRxLengthError_WIDTH 32
  92#define XgTxPkts_offset 0x80
  93#define XgTxPkts_WIDTH 32
  94#define XgTxOctets_offset 0x88
  95#define XgTxOctets_WIDTH 48
  96#define XgTxMulticastPkts_offset 0x90
  97#define XgTxMulticastPkts_WIDTH 32
  98#define XgTxBroadcastPkts_offset 0x94
  99#define XgTxBroadcastPkts_WIDTH 32
 100#define XgTxUnicastPkts_offset 0x98
 101#define XgTxUnicastPkts_WIDTH 32
 102#define XgTxControlPkts_offset 0x9C
 103#define XgTxControlPkts_WIDTH 32
 104#define XgTxPausePkts_offset 0xA0
 105#define XgTxPausePkts_WIDTH 32
 106#define XgTxPkts64Octets_offset 0xA4
 107#define XgTxPkts64Octets_WIDTH 32
 108#define XgTxPkts65to127Octets_offset 0xA8
 109#define XgTxPkts65to127Octets_WIDTH 32
 110#define XgTxPkts128to255Octets_offset 0xAC
 111#define XgTxPkts128to255Octets_WIDTH 32
 112#define XgTxPkts256to511Octets_offset 0xB0
 113#define XgTxPkts256to511Octets_WIDTH 32
 114#define XgTxPkts512to1023Octets_offset 0xB4
 115#define XgTxPkts512to1023Octets_WIDTH 32
 116#define XgTxPkts1024to15xxOctets_offset 0xB8
 117#define XgTxPkts1024to15xxOctets_WIDTH 32
 118#define XgTxPkts1519toMaxOctets_offset 0xBC
 119#define XgTxPkts1519toMaxOctets_WIDTH 32
 120#define XgTxUndersizePkts_offset 0xC0
 121#define XgTxUndersizePkts_WIDTH 32
 122#define XgTxOversizePkts_offset 0xC4
 123#define XgTxOversizePkts_WIDTH 32
 124#define XgTxNonTcpUdpPkt_offset 0xC8
 125#define XgTxNonTcpUdpPkt_WIDTH 16
 126#define XgTxMacSrcErrPkt_offset 0xCC
 127#define XgTxMacSrcErrPkt_WIDTH 16
 128#define XgTxIpSrcErrPkt_offset 0xD0
 129#define XgTxIpSrcErrPkt_WIDTH 16
 130#define XgDmaDone_offset 0xD4
 131#define XgDmaDone_WIDTH 32
 132
 133#define FALCON_XMAC_STATS_DMA_FLAG(efx)				\
 134	(*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
 135
 136#define FALCON_DMA_STAT(ext_name, hw_name)				\
 137	[FALCON_STAT_ ## ext_name] =					\
 138	{ #ext_name,							\
 139	  /* 48-bit stats are zero-padded to 64 on DMA */		\
 140	  hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH,	\
 141	  hw_name ## _ ## offset }
 142#define FALCON_OTHER_STAT(ext_name)					\
 143	[FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 144#define GENERIC_SW_STAT(ext_name)				\
 145	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 146
 147static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
 148	FALCON_DMA_STAT(tx_bytes, XgTxOctets),
 149	FALCON_DMA_STAT(tx_packets, XgTxPkts),
 150	FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
 151	FALCON_DMA_STAT(tx_control, XgTxControlPkts),
 152	FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
 153	FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
 154	FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
 155	FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
 156	FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
 157	FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
 158	FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
 159	FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
 160	FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
 161	FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
 162	FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
 163	FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
 164	FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
 165	FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
 166	FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
 167	FALCON_DMA_STAT(rx_bytes, XgRxOctets),
 168	FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
 169	FALCON_OTHER_STAT(rx_bad_bytes),
 170	FALCON_DMA_STAT(rx_packets, XgRxPkts),
 171	FALCON_DMA_STAT(rx_good, XgRxPktsOK),
 172	FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
 173	FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
 174	FALCON_DMA_STAT(rx_control, XgRxControlPkts),
 175	FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
 176	FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
 177	FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
 178	FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
 179	FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
 180	FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
 181	FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
 182	FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
 183	FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
 184	FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
 185	FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
 186	FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
 187	FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
 188	FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
 189	FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
 190	FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
 191	FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
 192	FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
 193	FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
 194	FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
 195	GENERIC_SW_STAT(rx_nodesc_trunc),
 196	GENERIC_SW_STAT(rx_noskb_drops),
 197};
 198static const unsigned long falcon_stat_mask[] = {
 199	[0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
 200};
 201
 202/**************************************************************************
 203 *
 204 * Basic SPI command set and bit definitions
 205 *
 206 *************************************************************************/
 207
 208#define SPI_WRSR 0x01		/* Write status register */
 209#define SPI_WRITE 0x02		/* Write data to memory array */
 210#define SPI_READ 0x03		/* Read data from memory array */
 211#define SPI_WRDI 0x04		/* Reset write enable latch */
 212#define SPI_RDSR 0x05		/* Read status register */
 213#define SPI_WREN 0x06		/* Set write enable latch */
 214#define SPI_SST_EWSR 0x50	/* SST: Enable write to status register */
 215
 216#define SPI_STATUS_WPEN 0x80	/* Write-protect pin enabled */
 217#define SPI_STATUS_BP2 0x10	/* Block protection bit 2 */
 218#define SPI_STATUS_BP1 0x08	/* Block protection bit 1 */
 219#define SPI_STATUS_BP0 0x04	/* Block protection bit 0 */
 220#define SPI_STATUS_WEN 0x02	/* State of the write enable latch */
 221#define SPI_STATUS_NRDY 0x01	/* Device busy flag */
 222
 223/**************************************************************************
 224 *
 225 * Non-volatile memory layout
 226 *
 227 **************************************************************************
 228 */
 229
 230/* SFC4000 flash is partitioned into:
 231 *     0-0x400       chip and board config (see struct falcon_nvconfig)
 232 *     0x400-0x8000  unused (or may contain VPD if EEPROM not present)
 233 *     0x8000-end    boot code (mapped to PCI expansion ROM)
 234 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
 235 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
 236 *     0-0x400       chip and board config
 237 *     configurable  VPD
 238 *     0x800-0x1800  boot config
 239 * Aside from the chip and board config, all of these are optional and may
 240 * be absent or truncated depending on the devices used.
 241 */
 242#define FALCON_NVCONFIG_END 0x400U
 243#define FALCON_FLASH_BOOTCODE_START 0x8000U
 244#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
 245#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
 246
 247/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
 248struct falcon_nvconfig_board_v2 {
 249	__le16 nports;
 250	u8 port0_phy_addr;
 251	u8 port0_phy_type;
 252	u8 port1_phy_addr;
 253	u8 port1_phy_type;
 254	__le16 asic_sub_revision;
 255	__le16 board_revision;
 256} __packed;
 257
 258/* Board configuration v3 extra information */
 259struct falcon_nvconfig_board_v3 {
 260	__le32 spi_device_type[2];
 261} __packed;
 262
 263/* Bit numbers for spi_device_type */
 264#define SPI_DEV_TYPE_SIZE_LBN 0
 265#define SPI_DEV_TYPE_SIZE_WIDTH 5
 266#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
 267#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
 268#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
 269#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
 270#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
 271#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
 272#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
 273#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
 274#define SPI_DEV_TYPE_FIELD(type, field)					\
 275	(((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field)))
 276
 277#define FALCON_NVCONFIG_OFFSET 0x300
 278
 279#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
 280struct falcon_nvconfig {
 281	ef4_oword_t ee_vpd_cfg_reg;			/* 0x300 */
 282	u8 mac_address[2][8];			/* 0x310 */
 283	ef4_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
 284	ef4_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
 285	ef4_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
 286	ef4_oword_t hw_init_reg;			/* 0x350 */
 287	ef4_oword_t nic_stat_reg;			/* 0x360 */
 288	ef4_oword_t glb_ctl_reg;			/* 0x370 */
 289	ef4_oword_t srm_cfg_reg;			/* 0x380 */
 290	ef4_oword_t spare_reg;				/* 0x390 */
 291	__le16 board_magic_num;			/* 0x3A0 */
 292	__le16 board_struct_ver;
 293	__le16 board_checksum;
 294	struct falcon_nvconfig_board_v2 board_v2;
 295	ef4_oword_t ee_base_page_reg;			/* 0x3B0 */
 296	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
 297} __packed;
 298
 299/*************************************************************************/
 300
 301static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method);
 302static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx);
 303
 304static const unsigned int
 305/* "Large" EEPROM device: Atmel AT25640 or similar
 306 * 8 KB, 16-bit address, 32 B write block */
 307large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
 308		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
 309		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
 310/* Default flash device: Atmel AT25F1024
 311 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
 312default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
 313		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
 314		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
 315		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
 316		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
 317
 318/**************************************************************************
 319 *
 320 * I2C bus - this is a bit-bashing interface using GPIO pins
 321 * Note that it uses the output enables to tristate the outputs
 322 * SDA is the data pin and SCL is the clock
 323 *
 324 **************************************************************************
 325 */
 326static void falcon_setsda(void *data, int state)
 327{
 328	struct ef4_nic *efx = (struct ef4_nic *)data;
 329	ef4_oword_t reg;
 330
 331	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 332	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
 333	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
 334}
 335
 336static void falcon_setscl(void *data, int state)
 337{
 338	struct ef4_nic *efx = (struct ef4_nic *)data;
 339	ef4_oword_t reg;
 340
 341	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 342	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
 343	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
 344}
 345
 346static int falcon_getsda(void *data)
 347{
 348	struct ef4_nic *efx = (struct ef4_nic *)data;
 349	ef4_oword_t reg;
 350
 351	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 352	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
 353}
 354
 355static int falcon_getscl(void *data)
 356{
 357	struct ef4_nic *efx = (struct ef4_nic *)data;
 358	ef4_oword_t reg;
 359
 360	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
 361	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
 362}
 363
 364static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
 365	.setsda		= falcon_setsda,
 366	.setscl		= falcon_setscl,
 367	.getsda		= falcon_getsda,
 368	.getscl		= falcon_getscl,
 369	.udelay		= 5,
 370	/* Wait up to 50 ms for slave to let us pull SCL high */
 371	.timeout	= DIV_ROUND_UP(HZ, 20),
 372};
 373
 374static void falcon_push_irq_moderation(struct ef4_channel *channel)
 375{
 376	ef4_dword_t timer_cmd;
 377	struct ef4_nic *efx = channel->efx;
 378
 379	/* Set timer register */
 380	if (channel->irq_moderation_us) {
 381		unsigned int ticks;
 382
 383		ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us);
 384		EF4_POPULATE_DWORD_2(timer_cmd,
 385				     FRF_AB_TC_TIMER_MODE,
 386				     FFE_BB_TIMER_MODE_INT_HLDOFF,
 387				     FRF_AB_TC_TIMER_VAL,
 388				     ticks - 1);
 389	} else {
 390		EF4_POPULATE_DWORD_2(timer_cmd,
 391				     FRF_AB_TC_TIMER_MODE,
 392				     FFE_BB_TIMER_MODE_DIS,
 393				     FRF_AB_TC_TIMER_VAL, 0);
 394	}
 395	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
 396	ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
 397			       channel->channel);
 398}
 399
 400static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx);
 401
 402static void falcon_prepare_flush(struct ef4_nic *efx)
 403{
 404	falcon_deconfigure_mac_wrapper(efx);
 405
 406	/* Wait for the tx and rx fifo's to get to the next packet boundary
 407	 * (~1ms without back-pressure), then to drain the remainder of the
 408	 * fifo's at data path speeds (negligible), with a healthy margin. */
 409	msleep(10);
 410}
 411
 412/* Acknowledge a legacy interrupt from Falcon
 413 *
 414 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
 415 *
 416 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
 417 * BIU. Interrupt acknowledge is read sensitive so must write instead
 418 * (then read to ensure the BIU collector is flushed)
 419 *
 420 * NB most hardware supports MSI interrupts
 421 */
 422static inline void falcon_irq_ack_a1(struct ef4_nic *efx)
 423{
 424	ef4_dword_t reg;
 425
 426	EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
 427	ef4_writed(efx, &reg, FR_AA_INT_ACK_KER);
 428	ef4_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
 429}
 430
 431static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 432{
 433	struct ef4_nic *efx = dev_id;
 434	ef4_oword_t *int_ker = efx->irq_status.addr;
 435	int syserr;
 436	int queues;
 437
 438	/* Check to see if this is our interrupt.  If it isn't, we
 439	 * exit without having touched the hardware.
 440	 */
 441	if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
 442		netif_vdbg(efx, intr, efx->net_dev,
 443			   "IRQ %d on CPU %d not for me\n", irq,
 444			   raw_smp_processor_id());
 445		return IRQ_NONE;
 446	}
 447	efx->last_irq_cpu = raw_smp_processor_id();
 448	netif_vdbg(efx, intr, efx->net_dev,
 449		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
 450		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
 451
 452	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
 453		return IRQ_HANDLED;
 454
 455	/* Check to see if we have a serious error condition */
 456	syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
 457	if (unlikely(syserr))
 458		return ef4_farch_fatal_interrupt(efx);
 459
 460	/* Determine interrupting queues, clear interrupt status
 461	 * register and acknowledge the device interrupt.
 462	 */
 463	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
 464	queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
 465	EF4_ZERO_OWORD(*int_ker);
 466	wmb(); /* Ensure the vector is cleared before interrupt ack */
 467	falcon_irq_ack_a1(efx);
 468
 469	if (queues & 1)
 470		ef4_schedule_channel_irq(ef4_get_channel(efx, 0));
 471	if (queues & 2)
 472		ef4_schedule_channel_irq(ef4_get_channel(efx, 1));
 473	return IRQ_HANDLED;
 474}
 475
 476/**************************************************************************
 477 *
 478 * RSS
 479 *
 480 **************************************************************************
 481 */
 482static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
 483				    const u32 *rx_indir_table)
 484{
 485	(void) efx;
 486	(void) user;
 487	(void) rx_indir_table;
 488	return -ENOSYS;
 489}
 490
 491static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
 492					const u32 *rx_indir_table)
 493{
 494	ef4_oword_t temp;
 495
 496	(void) user;
 497	/* Set hash key for IPv4 */
 498	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
 499	ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
 500
 501	memcpy(efx->rx_indir_table, rx_indir_table,
 502	       sizeof(efx->rx_indir_table));
 503	ef4_farch_rx_push_indir_table(efx);
 504	return 0;
 505}
 506
 507/**************************************************************************
 508 *
 509 * EEPROM/flash
 510 *
 511 **************************************************************************
 512 */
 513
 514#define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t)
 515
 516static int falcon_spi_poll(struct ef4_nic *efx)
 517{
 518	ef4_oword_t reg;
 519	ef4_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
 520	return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
 521}
 522
 523/* Wait for SPI command completion */
 524static int falcon_spi_wait(struct ef4_nic *efx)
 525{
 526	/* Most commands will finish quickly, so we start polling at
 527	 * very short intervals.  Sometimes the command may have to
 528	 * wait for VPD or expansion ROM access outside of our
 529	 * control, so we allow up to 100 ms. */
 530	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
 531	int i;
 532
 533	for (i = 0; i < 10; i++) {
 534		if (!falcon_spi_poll(efx))
 535			return 0;
 536		udelay(10);
 537	}
 538
 539	for (;;) {
 540		if (!falcon_spi_poll(efx))
 541			return 0;
 542		if (time_after_eq(jiffies, timeout)) {
 543			netif_err(efx, hw, efx->net_dev,
 544				  "timed out waiting for SPI\n");
 545			return -ETIMEDOUT;
 546		}
 547		schedule_timeout_uninterruptible(1);
 548	}
 549}
 550
 551static int
 552falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi,
 553	       unsigned int command, int address,
 554	       const void *in, void *out, size_t len)
 555{
 556	bool addressed = (address >= 0);
 557	bool reading = (out != NULL);
 558	ef4_oword_t reg;
 559	int rc;
 560
 561	/* Input validation */
 562	if (len > FALCON_SPI_MAX_LEN)
 563		return -EINVAL;
 564
 565	/* Check that previous command is not still running */
 566	rc = falcon_spi_poll(efx);
 567	if (rc)
 568		return rc;
 569
 570	/* Program address register, if we have an address */
 571	if (addressed) {
 572		EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
 573		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
 574	}
 575
 576	/* Program data register, if we have data */
 577	if (in != NULL) {
 578		memcpy(&reg, in, len);
 579		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
 580	}
 581
 582	/* Issue read/write command */
 583	EF4_POPULATE_OWORD_7(reg,
 584			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
 585			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
 586			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
 587			     FRF_AB_EE_SPI_HCMD_READ, reading,
 588			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
 589			     FRF_AB_EE_SPI_HCMD_ADBCNT,
 590			     (addressed ? spi->addr_len : 0),
 591			     FRF_AB_EE_SPI_HCMD_ENC, command);
 592	ef4_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
 593
 594	/* Wait for read/write to complete */
 595	rc = falcon_spi_wait(efx);
 596	if (rc)
 597		return rc;
 598
 599	/* Read data */
 600	if (out != NULL) {
 601		ef4_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
 602		memcpy(out, &reg, len);
 603	}
 604
 605	return 0;
 606}
 607
 608static inline u8
 609falcon_spi_munge_command(const struct falcon_spi_device *spi,
 610			 const u8 command, const unsigned int address)
 611{
 612	return command | (((address >> 8) & spi->munge_address) << 3);
 613}
 614
 615static int
 616falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
 617		loff_t start, size_t len, size_t *retlen, u8 *buffer)
 618{
 619	size_t block_len, pos = 0;
 620	unsigned int command;
 621	int rc = 0;
 622
 623	while (pos < len) {
 624		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
 625
 626		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
 627		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
 628				    buffer + pos, block_len);
 629		if (rc)
 630			break;
 631		pos += block_len;
 632
 633		/* Avoid locking up the system */
 634		cond_resched();
 635		if (signal_pending(current)) {
 636			rc = -EINTR;
 637			break;
 638		}
 639	}
 640
 641	if (retlen)
 642		*retlen = pos;
 643	return rc;
 644}
 645
 646#ifdef CONFIG_SFC_FALCON_MTD
 647
 648struct falcon_mtd_partition {
 649	struct ef4_mtd_partition common;
 650	const struct falcon_spi_device *spi;
 651	size_t offset;
 652};
 653
 654#define to_falcon_mtd_partition(mtd)				\
 655	container_of(mtd, struct falcon_mtd_partition, common.mtd)
 656
 657static size_t
 658falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
 659{
 660	return min(FALCON_SPI_MAX_LEN,
 661		   (spi->block_size - (start & (spi->block_size - 1))));
 662}
 663
 664/* Wait up to 10 ms for buffered write completion */
 665static int
 666falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi)
 667{
 668	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
 669	u8 status;
 670	int rc;
 671
 672	for (;;) {
 673		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 674				    &status, sizeof(status));
 675		if (rc)
 676			return rc;
 677		if (!(status & SPI_STATUS_NRDY))
 678			return 0;
 679		if (time_after_eq(jiffies, timeout)) {
 680			netif_err(efx, hw, efx->net_dev,
 681				  "SPI write timeout on device %d"
 682				  " last status=0x%02x\n",
 683				  spi->device_id, status);
 684			return -ETIMEDOUT;
 685		}
 686		schedule_timeout_uninterruptible(1);
 687	}
 688}
 689
 690static int
 691falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
 692		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
 693{
 694	u8 verify_buffer[FALCON_SPI_MAX_LEN];
 695	size_t block_len, pos = 0;
 696	unsigned int command;
 697	int rc = 0;
 698
 699	while (pos < len) {
 700		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 701		if (rc)
 702			break;
 703
 704		block_len = min(len - pos,
 705				falcon_spi_write_limit(spi, start + pos));
 706		command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
 707		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 708				    buffer + pos, NULL, block_len);
 709		if (rc)
 710			break;
 711
 712		rc = falcon_spi_wait_write(efx, spi);
 713		if (rc)
 714			break;
 715
 716		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
 717		rc = falcon_spi_cmd(efx, spi, command, start + pos,
 718				    NULL, verify_buffer, block_len);
 719		if (memcmp(verify_buffer, buffer + pos, block_len)) {
 720			rc = -EIO;
 721			break;
 722		}
 723
 724		pos += block_len;
 725
 726		/* Avoid locking up the system */
 727		cond_resched();
 728		if (signal_pending(current)) {
 729			rc = -EINTR;
 730			break;
 731		}
 732	}
 733
 734	if (retlen)
 735		*retlen = pos;
 736	return rc;
 737}
 738
 739static int
 740falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
 741{
 742	const struct falcon_spi_device *spi = part->spi;
 743	struct ef4_nic *efx = part->common.mtd.priv;
 744	u8 status;
 745	int rc, i;
 746
 747	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
 748	for (i = 0; i < 40; i++) {
 749		__set_current_state(uninterruptible ?
 750				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
 751		schedule_timeout(HZ / 10);
 752		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 753				    &status, sizeof(status));
 754		if (rc)
 755			return rc;
 756		if (!(status & SPI_STATUS_NRDY))
 757			return 0;
 758		if (signal_pending(current))
 759			return -EINTR;
 760	}
 761	pr_err("%s: timed out waiting for %s\n",
 762	       part->common.name, part->common.dev_type_name);
 763	return -ETIMEDOUT;
 764}
 765
 766static int
 767falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi)
 768{
 769	const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
 770				SPI_STATUS_BP0);
 771	u8 status;
 772	int rc;
 773
 774	rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
 775			    &status, sizeof(status));
 776	if (rc)
 777		return rc;
 778
 779	if (!(status & unlock_mask))
 780		return 0; /* already unlocked */
 781
 782	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 783	if (rc)
 784		return rc;
 785	rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
 786	if (rc)
 787		return rc;
 788
 789	status &= ~unlock_mask;
 790	rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
 791			    NULL, sizeof(status));
 792	if (rc)
 793		return rc;
 794	rc = falcon_spi_wait_write(efx, spi);
 795	if (rc)
 796		return rc;
 797
 798	return 0;
 799}
 800
 801#define FALCON_SPI_VERIFY_BUF_LEN 16
 802
 803static int
 804falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
 805{
 806	const struct falcon_spi_device *spi = part->spi;
 807	struct ef4_nic *efx = part->common.mtd.priv;
 808	unsigned pos, block_len;
 809	u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
 810	u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
 811	int rc;
 812
 813	if (len != spi->erase_size)
 814		return -EINVAL;
 815
 816	if (spi->erase_command == 0)
 817		return -EOPNOTSUPP;
 818
 819	rc = falcon_spi_unlock(efx, spi);
 820	if (rc)
 821		return rc;
 822	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
 823	if (rc)
 824		return rc;
 825	rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
 826			    NULL, 0);
 827	if (rc)
 828		return rc;
 829	rc = falcon_spi_slow_wait(part, false);
 830
 831	/* Verify the entire region has been wiped */
 832	memset(empty, 0xff, sizeof(empty));
 833	for (pos = 0; pos < len; pos += block_len) {
 834		block_len = min(len - pos, sizeof(buffer));
 835		rc = falcon_spi_read(efx, spi, start + pos, block_len,
 836				     NULL, buffer);
 837		if (rc)
 838			return rc;
 839		if (memcmp(empty, buffer, block_len))
 840			return -EIO;
 841
 842		/* Avoid locking up the system */
 843		cond_resched();
 844		if (signal_pending(current))
 845			return -EINTR;
 846	}
 847
 848	return rc;
 849}
 850
 851static void falcon_mtd_rename(struct ef4_mtd_partition *part)
 852{
 853	struct ef4_nic *efx = part->mtd.priv;
 854
 855	snprintf(part->name, sizeof(part->name), "%s %s",
 856		 efx->name, part->type_name);
 857}
 858
 859static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
 860			   size_t len, size_t *retlen, u8 *buffer)
 861{
 862	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 863	struct ef4_nic *efx = mtd->priv;
 864	struct falcon_nic_data *nic_data = efx->nic_data;
 865	int rc;
 866
 867	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 868	if (rc)
 869		return rc;
 870	rc = falcon_spi_read(efx, part->spi, part->offset + start,
 871			     len, retlen, buffer);
 872	mutex_unlock(&nic_data->spi_lock);
 873	return rc;
 874}
 875
 876static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
 877{
 878	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 879	struct ef4_nic *efx = mtd->priv;
 880	struct falcon_nic_data *nic_data = efx->nic_data;
 881	int rc;
 882
 883	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 884	if (rc)
 885		return rc;
 886	rc = falcon_spi_erase(part, part->offset + start, len);
 887	mutex_unlock(&nic_data->spi_lock);
 888	return rc;
 889}
 890
 891static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
 892			    size_t len, size_t *retlen, const u8 *buffer)
 893{
 894	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 895	struct ef4_nic *efx = mtd->priv;
 896	struct falcon_nic_data *nic_data = efx->nic_data;
 897	int rc;
 898
 899	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 900	if (rc)
 901		return rc;
 902	rc = falcon_spi_write(efx, part->spi, part->offset + start,
 903			      len, retlen, buffer);
 904	mutex_unlock(&nic_data->spi_lock);
 905	return rc;
 906}
 907
 908static int falcon_mtd_sync(struct mtd_info *mtd)
 909{
 910	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
 911	struct ef4_nic *efx = mtd->priv;
 912	struct falcon_nic_data *nic_data = efx->nic_data;
 913	int rc;
 914
 915	mutex_lock(&nic_data->spi_lock);
 916	rc = falcon_spi_slow_wait(part, true);
 917	mutex_unlock(&nic_data->spi_lock);
 918	return rc;
 919}
 920
 921static int falcon_mtd_probe(struct ef4_nic *efx)
 922{
 923	struct falcon_nic_data *nic_data = efx->nic_data;
 924	struct falcon_mtd_partition *parts;
 925	struct falcon_spi_device *spi;
 926	size_t n_parts;
 927	int rc = -ENODEV;
 928
 929	ASSERT_RTNL();
 930
 931	/* Allocate space for maximum number of partitions */
 932	parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
 933	if (!parts)
 934		return -ENOMEM;
 935	n_parts = 0;
 936
 937	spi = &nic_data->spi_flash;
 938	if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
 939		parts[n_parts].spi = spi;
 940		parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
 941		parts[n_parts].common.dev_type_name = "flash";
 942		parts[n_parts].common.type_name = "sfc_flash_bootrom";
 943		parts[n_parts].common.mtd.type = MTD_NORFLASH;
 944		parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
 945		parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
 946		parts[n_parts].common.mtd.erasesize = spi->erase_size;
 947		n_parts++;
 948	}
 949
 950	spi = &nic_data->spi_eeprom;
 951	if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
 952		parts[n_parts].spi = spi;
 953		parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
 954		parts[n_parts].common.dev_type_name = "EEPROM";
 955		parts[n_parts].common.type_name = "sfc_bootconfig";
 956		parts[n_parts].common.mtd.type = MTD_RAM;
 957		parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
 958		parts[n_parts].common.mtd.size =
 959			min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
 960			FALCON_EEPROM_BOOTCONFIG_START;
 961		parts[n_parts].common.mtd.erasesize = spi->erase_size;
 962		n_parts++;
 963	}
 964
 965	rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
 966	if (rc)
 967		kfree(parts);
 968	return rc;
 969}
 970
 971#endif /* CONFIG_SFC_FALCON_MTD */
 972
 973/**************************************************************************
 974 *
 975 * XMAC operations
 976 *
 977 **************************************************************************
 978 */
 979
 980/* Configure the XAUI driver that is an output from Falcon */
 981static void falcon_setup_xaui(struct ef4_nic *efx)
 982{
 983	ef4_oword_t sdctl, txdrv;
 984
 985	/* Move the XAUI into low power, unless there is no PHY, in
 986	 * which case the XAUI will have to drive a cable. */
 987	if (efx->phy_type == PHY_TYPE_NONE)
 988		return;
 989
 990	ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
 991	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
 992	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
 993	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
 994	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
 995	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
 996	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
 997	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
 998	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
 999	ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
1000
1001	EF4_POPULATE_OWORD_8(txdrv,
1002			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
1003			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
1004			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
1005			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
1006			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
1007			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
1008			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
1009			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
1010	ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
1011}
1012
1013int falcon_reset_xaui(struct ef4_nic *efx)
1014{
1015	struct falcon_nic_data *nic_data = efx->nic_data;
1016	ef4_oword_t reg;
1017	int count;
1018
1019	/* Don't fetch MAC statistics over an XMAC reset */
1020	WARN_ON(nic_data->stats_disable_count == 0);
1021
1022	/* Start reset sequence */
1023	EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
1024	ef4_writeo(efx, &reg, FR_AB_XX_PWR_RST);
1025
1026	/* Wait up to 10 ms for completion, then reinitialise */
1027	for (count = 0; count < 1000; count++) {
1028		ef4_reado(efx, &reg, FR_AB_XX_PWR_RST);
1029		if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
1030		    EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
1031			falcon_setup_xaui(efx);
1032			return 0;
1033		}
1034		udelay(10);
1035	}
1036	netif_err(efx, hw, efx->net_dev,
1037		  "timed out waiting for XAUI/XGXS reset\n");
1038	return -ETIMEDOUT;
1039}
1040
1041static void falcon_ack_status_intr(struct ef4_nic *efx)
1042{
1043	struct falcon_nic_data *nic_data = efx->nic_data;
1044	ef4_oword_t reg;
1045
1046	if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1047		return;
1048
1049	/* We expect xgmii faults if the wireside link is down */
1050	if (!efx->link_state.up)
1051		return;
1052
1053	/* We can only use this interrupt to signal the negative edge of
1054	 * xaui_align [we have to poll the positive edge]. */
1055	if (nic_data->xmac_poll_required)
1056		return;
1057
1058	ef4_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1059}
1060
1061static bool falcon_xgxs_link_ok(struct ef4_nic *efx)
1062{
1063	ef4_oword_t reg;
1064	bool align_done, link_ok = false;
1065	int sync_status;
1066
1067	/* Read link status */
1068	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1069
1070	align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1071	sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1072	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1073		link_ok = true;
1074
1075	/* Clear link status ready for next read */
1076	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1077	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1078	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1079	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1080
1081	return link_ok;
1082}
1083
1084static bool falcon_xmac_link_ok(struct ef4_nic *efx)
1085{
1086	/*
1087	 * Check MAC's XGXS link status except when using XGMII loopback
1088	 * which bypasses the XGXS block.
1089	 * If possible, check PHY's XGXS link status except when using
1090	 * MAC loopback.
1091	 */
1092	return (efx->loopback_mode == LOOPBACK_XGMII ||
1093		falcon_xgxs_link_ok(efx)) &&
1094		(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1095		 LOOPBACK_INTERNAL(efx) ||
1096		 ef4_mdio_phyxgxs_lane_sync(efx));
1097}
1098
1099static void falcon_reconfigure_xmac_core(struct ef4_nic *efx)
1100{
1101	unsigned int max_frame_len;
1102	ef4_oword_t reg;
1103	bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX);
1104	bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX);
1105
1106	/* Configure MAC  - cut-thru mode is hard wired on */
1107	EF4_POPULATE_OWORD_3(reg,
1108			     FRF_AB_XM_RX_JUMBO_MODE, 1,
1109			     FRF_AB_XM_TX_STAT_EN, 1,
1110			     FRF_AB_XM_RX_STAT_EN, 1);
1111	ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1112
1113	/* Configure TX */
1114	EF4_POPULATE_OWORD_6(reg,
1115			     FRF_AB_XM_TXEN, 1,
1116			     FRF_AB_XM_TX_PRMBL, 1,
1117			     FRF_AB_XM_AUTO_PAD, 1,
1118			     FRF_AB_XM_TXCRC, 1,
1119			     FRF_AB_XM_FCNTL, tx_fc,
1120			     FRF_AB_XM_IPG, 0x3);
1121	ef4_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1122
1123	/* Configure RX */
1124	EF4_POPULATE_OWORD_5(reg,
1125			     FRF_AB_XM_RXEN, 1,
1126			     FRF_AB_XM_AUTO_DEPAD, 0,
1127			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
1128			     FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1129			     FRF_AB_XM_PASS_CRC_ERR, 1);
1130	ef4_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1131
1132	/* Set frame length */
1133	max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu);
1134	EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1135	ef4_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1136	EF4_POPULATE_OWORD_2(reg,
1137			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1138			     FRF_AB_XM_TX_JUMBO_MODE, 1);
1139	ef4_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1140
1141	EF4_POPULATE_OWORD_2(reg,
1142			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1143			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
1144	ef4_writeo(efx, &reg, FR_AB_XM_FC);
1145
1146	/* Set MAC address */
1147	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1148	ef4_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1149	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1150	ef4_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1151}
1152
1153static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx)
1154{
1155	ef4_oword_t reg;
1156	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1157	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1158	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1159	bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1160
1161	/* XGXS block is flaky and will need to be reset if moving
1162	 * into our out of XGMII, XGXS or XAUI loopbacks. */
1163	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1164	old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1165	old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1166
1167	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1168	old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1169
1170	/* The PHY driver may have turned XAUI off */
1171	if ((xgxs_loopback != old_xgxs_loopback) ||
1172	    (xaui_loopback != old_xaui_loopback) ||
1173	    (xgmii_loopback != old_xgmii_loopback))
1174		falcon_reset_xaui(efx);
1175
1176	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1177	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1178			    (xgxs_loopback || xaui_loopback) ?
1179			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1180	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1181	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1182	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1183
1184	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1185	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1186	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1187	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1188	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1189	ef4_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1190}
1191
1192
1193/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
1194static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
1195{
1196	bool mac_up = falcon_xmac_link_ok(efx);
1197
1198	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1199	    ef4_phy_mode_disabled(efx->phy_mode))
1200		/* XAUI link is expected to be down */
1201		return mac_up;
1202
1203	falcon_stop_nic_stats(efx);
1204
1205	while (!mac_up && tries) {
1206		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1207		falcon_reset_xaui(efx);
1208		udelay(200);
1209
1210		mac_up = falcon_xmac_link_ok(efx);
1211		--tries;
1212	}
1213
1214	falcon_start_nic_stats(efx);
1215
1216	return mac_up;
1217}
1218
1219static bool falcon_xmac_check_fault(struct ef4_nic *efx)
1220{
1221	return !falcon_xmac_link_ok_retry(efx, 5);
1222}
1223
1224static int falcon_reconfigure_xmac(struct ef4_nic *efx)
1225{
1226	struct falcon_nic_data *nic_data = efx->nic_data;
1227
1228	ef4_farch_filter_sync_rx_mode(efx);
1229
1230	falcon_reconfigure_xgxs_core(efx);
1231	falcon_reconfigure_xmac_core(efx);
1232
1233	falcon_reconfigure_mac_wrapper(efx);
1234
1235	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1236	falcon_ack_status_intr(efx);
1237
1238	return 0;
1239}
1240
1241static void falcon_poll_xmac(struct ef4_nic *efx)
1242{
1243	struct falcon_nic_data *nic_data = efx->nic_data;
1244
1245	/* We expect xgmii faults if the wireside link is down */
1246	if (!efx->link_state.up || !nic_data->xmac_poll_required)
1247		return;
1248
1249	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1250	falcon_ack_status_intr(efx);
1251}
1252
1253/**************************************************************************
1254 *
1255 * MAC wrapper
1256 *
1257 **************************************************************************
1258 */
1259
1260static void falcon_push_multicast_hash(struct ef4_nic *efx)
1261{
1262	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
1263
1264	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1265
1266	ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
1267	ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
1268}
1269
1270static void falcon_reset_macs(struct ef4_nic *efx)
1271{
1272	struct falcon_nic_data *nic_data = efx->nic_data;
1273	ef4_oword_t reg, mac_ctrl;
1274	int count;
1275
1276	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
1277		/* It's not safe to use GLB_CTL_REG to reset the
1278		 * macs, so instead use the internal MAC resets
1279		 */
1280		EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1281		ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1282
1283		for (count = 0; count < 10000; count++) {
1284			ef4_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1285			if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1286			    0)
1287				return;
1288			udelay(10);
1289		}
1290
1291		netif_err(efx, hw, efx->net_dev,
1292			  "timed out waiting for XMAC core reset\n");
1293	}
1294
1295	/* Mac stats will fail whist the TX fifo is draining */
1296	WARN_ON(nic_data->stats_disable_count == 0);
1297
1298	ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1299	EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1300	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1301
1302	ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1303	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1304	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1305	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1306	ef4_writeo(efx, &reg, FR_AB_GLB_CTL);
1307
1308	count = 0;
1309	while (1) {
1310		ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1311		if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1312		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1313		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1314			netif_dbg(efx, hw, efx->net_dev,
1315				  "Completed MAC reset after %d loops\n",
1316				  count);
1317			break;
1318		}
1319		if (count > 20) {
1320			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1321			break;
1322		}
1323		count++;
1324		udelay(10);
1325	}
1326
1327	/* Ensure the correct MAC is selected before statistics
1328	 * are re-enabled by the caller */
1329	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1330
1331	falcon_setup_xaui(efx);
1332}
1333
1334static void falcon_drain_tx_fifo(struct ef4_nic *efx)
1335{
1336	ef4_oword_t reg;
1337
1338	if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) ||
1339	    (efx->loopback_mode != LOOPBACK_NONE))
1340		return;
1341
1342	ef4_reado(efx, &reg, FR_AB_MAC_CTRL);
1343	/* There is no point in draining more than once */
1344	if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1345		return;
1346
1347	falcon_reset_macs(efx);
1348}
1349
1350static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx)
1351{
1352	ef4_oword_t reg;
1353
1354	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
1355		return;
1356
1357	/* Isolate the MAC -> RX */
1358	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1359	EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1360	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1361
1362	/* Isolate TX -> MAC */
1363	falcon_drain_tx_fifo(efx);
1364}
1365
1366static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
1367{
1368	struct ef4_link_state *link_state = &efx->link_state;
1369	ef4_oword_t reg;
1370	int link_speed, isolate;
1371
1372	isolate = !!READ_ONCE(efx->reset_pending);
1373
1374	switch (link_state->speed) {
1375	case 10000: link_speed = 3; break;
1376	case 1000:  link_speed = 2; break;
1377	case 100:   link_speed = 1; break;
1378	default:    link_speed = 0; break;
1379	}
1380
1381	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1382	 * as advertised.  Disable to ensure packets are not
1383	 * indefinitely held and TX queue can be flushed at any point
1384	 * while the link is down. */
1385	EF4_POPULATE_OWORD_5(reg,
1386			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1387			     FRF_AB_MAC_BCAD_ACPT, 1,
1388			     FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
1389			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1390			     FRF_AB_MAC_SPEED, link_speed);
1391	/* On B0, MAC backpressure can be disabled and packets get
1392	 * discarded. */
1393	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1394		EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1395				    !link_state->up || isolate);
1396	}
1397
1398	ef4_writeo(efx, &reg, FR_AB_MAC_CTRL);
1399
1400	/* Restore the multicast hash registers. */
1401	falcon_push_multicast_hash(efx);
1402
1403	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1404	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
1405	 * initialisation but it may read back as 0) */
1406	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1407	/* Unisolate the MAC -> RX */
1408	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1409		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1410	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1411}
1412
1413static void falcon_stats_request(struct ef4_nic *efx)
1414{
1415	struct falcon_nic_data *nic_data = efx->nic_data;
1416	ef4_oword_t reg;
1417
1418	WARN_ON(nic_data->stats_pending);
1419	WARN_ON(nic_data->stats_disable_count);
1420
1421	FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
1422	nic_data->stats_pending = true;
1423	wmb(); /* ensure done flag is clear */
1424
1425	/* Initiate DMA transfer of stats */
1426	EF4_POPULATE_OWORD_2(reg,
1427			     FRF_AB_MAC_STAT_DMA_CMD, 1,
1428			     FRF_AB_MAC_STAT_DMA_ADR,
1429			     efx->stats_buffer.dma_addr);
1430	ef4_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1431
1432	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1433}
1434
1435static void falcon_stats_complete(struct ef4_nic *efx)
1436{
1437	struct falcon_nic_data *nic_data = efx->nic_data;
1438
1439	if (!nic_data->stats_pending)
1440		return;
1441
1442	nic_data->stats_pending = false;
1443	if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
1444		rmb(); /* read the done flag before the stats */
1445		ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1446				     falcon_stat_mask, nic_data->stats,
1447				     efx->stats_buffer.addr, true);
1448	} else {
1449		netif_err(efx, hw, efx->net_dev,
1450			  "timed out waiting for statistics\n");
1451	}
1452}
1453
1454static void falcon_stats_timer_func(struct timer_list *t)
1455{
1456	struct falcon_nic_data *nic_data = from_timer(nic_data, t,
1457						      stats_timer);
1458	struct ef4_nic *efx = nic_data->efx;
1459
1460	spin_lock(&efx->stats_lock);
1461
1462	falcon_stats_complete(efx);
1463	if (nic_data->stats_disable_count == 0)
1464		falcon_stats_request(efx);
1465
1466	spin_unlock(&efx->stats_lock);
1467}
1468
1469static bool falcon_loopback_link_poll(struct ef4_nic *efx)
1470{
1471	struct ef4_link_state old_state = efx->link_state;
1472
1473	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1474	WARN_ON(!LOOPBACK_INTERNAL(efx));
1475
1476	efx->link_state.fd = true;
1477	efx->link_state.fc = efx->wanted_fc;
1478	efx->link_state.up = true;
1479	efx->link_state.speed = 10000;
1480
1481	return !ef4_link_state_equal(&efx->link_state, &old_state);
1482}
1483
1484static int falcon_reconfigure_port(struct ef4_nic *efx)
1485{
1486	int rc;
1487
1488	WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
1489
1490	/* Poll the PHY link state *before* reconfiguring it. This means we
1491	 * will pick up the correct speed (in loopback) to select the correct
1492	 * MAC.
1493	 */
1494	if (LOOPBACK_INTERNAL(efx))
1495		falcon_loopback_link_poll(efx);
1496	else
1497		efx->phy_op->poll(efx);
1498
1499	falcon_stop_nic_stats(efx);
1500	falcon_deconfigure_mac_wrapper(efx);
1501
1502	falcon_reset_macs(efx);
1503
1504	efx->phy_op->reconfigure(efx);
1505	rc = falcon_reconfigure_xmac(efx);
1506	BUG_ON(rc);
1507
1508	falcon_start_nic_stats(efx);
1509
1510	/* Synchronise efx->link_state with the kernel */
1511	ef4_link_status_changed(efx);
1512
1513	return 0;
1514}
1515
1516/* TX flow control may automatically turn itself off if the link
1517 * partner (intermittently) stops responding to pause frames. There
1518 * isn't any indication that this has happened, so the best we do is
1519 * leave it up to the user to spot this and fix it by cycling transmit
1520 * flow control on this end.
1521 */
1522
1523static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
1524{
1525	/* Schedule a reset to recover */
1526	ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1527}
1528
1529static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
1530{
1531	/* Recover by resetting the EM block */
1532	falcon_stop_nic_stats(efx);
1533	falcon_drain_tx_fifo(efx);
1534	falcon_reconfigure_xmac(efx);
1535	falcon_start_nic_stats(efx);
1536}
1537
1538/**************************************************************************
1539 *
1540 * PHY access via GMII
1541 *
1542 **************************************************************************
1543 */
1544
1545/* Wait for GMII access to complete */
1546static int falcon_gmii_wait(struct ef4_nic *efx)
1547{
1548	ef4_oword_t md_stat;
1549	int count;
1550
1551	/* wait up to 50ms - taken max from datasheet */
1552	for (count = 0; count < 5000; count++) {
1553		ef4_reado(efx, &md_stat, FR_AB_MD_STAT);
1554		if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1555			if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1556			    EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1557				netif_err(efx, hw, efx->net_dev,
1558					  "error from GMII access "
1559					  EF4_OWORD_FMT"\n",
1560					  EF4_OWORD_VAL(md_stat));
1561				return -EIO;
1562			}
1563			return 0;
1564		}
1565		udelay(10);
1566	}
1567	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1568	return -ETIMEDOUT;
1569}
1570
1571/* Write an MDIO register of a PHY connected to Falcon. */
1572static int falcon_mdio_write(struct net_device *net_dev,
1573			     int prtad, int devad, u16 addr, u16 value)
1574{
1575	struct ef4_nic *efx = netdev_priv(net_dev);
1576	struct falcon_nic_data *nic_data = efx->nic_data;
1577	ef4_oword_t reg;
1578	int rc;
1579
1580	netif_vdbg(efx, hw, efx->net_dev,
1581		   "writing MDIO %d register %d.%d with 0x%04x\n",
1582		    prtad, devad, addr, value);
1583
1584	mutex_lock(&nic_data->mdio_lock);
1585
1586	/* Check MDIO not currently being accessed */
1587	rc = falcon_gmii_wait(efx);
1588	if (rc)
1589		goto out;
1590
1591	/* Write the address/ID register */
1592	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1593	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1594
1595	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1596			     FRF_AB_MD_DEV_ADR, devad);
1597	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1598
1599	/* Write data */
1600	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1601	ef4_writeo(efx, &reg, FR_AB_MD_TXD);
1602
1603	EF4_POPULATE_OWORD_2(reg,
1604			     FRF_AB_MD_WRC, 1,
1605			     FRF_AB_MD_GC, 0);
1606	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1607
1608	/* Wait for data to be written */
1609	rc = falcon_gmii_wait(efx);
1610	if (rc) {
1611		/* Abort the write operation */
1612		EF4_POPULATE_OWORD_2(reg,
1613				     FRF_AB_MD_WRC, 0,
1614				     FRF_AB_MD_GC, 1);
1615		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1616		udelay(10);
1617	}
1618
1619out:
1620	mutex_unlock(&nic_data->mdio_lock);
1621	return rc;
1622}
1623
1624/* Read an MDIO register of a PHY connected to Falcon. */
1625static int falcon_mdio_read(struct net_device *net_dev,
1626			    int prtad, int devad, u16 addr)
1627{
1628	struct ef4_nic *efx = netdev_priv(net_dev);
1629	struct falcon_nic_data *nic_data = efx->nic_data;
1630	ef4_oword_t reg;
1631	int rc;
1632
1633	mutex_lock(&nic_data->mdio_lock);
1634
1635	/* Check MDIO not currently being accessed */
1636	rc = falcon_gmii_wait(efx);
1637	if (rc)
1638		goto out;
1639
1640	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1641	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1642
1643	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1644			     FRF_AB_MD_DEV_ADR, devad);
1645	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1646
1647	/* Request data to be read */
1648	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1649	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1650
1651	/* Wait for data to become available */
1652	rc = falcon_gmii_wait(efx);
1653	if (rc == 0) {
1654		ef4_reado(efx, &reg, FR_AB_MD_RXD);
1655		rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1656		netif_vdbg(efx, hw, efx->net_dev,
1657			   "read from MDIO %d register %d.%d, got %04x\n",
1658			   prtad, devad, addr, rc);
1659	} else {
1660		/* Abort the read operation */
1661		EF4_POPULATE_OWORD_2(reg,
1662				     FRF_AB_MD_RIC, 0,
1663				     FRF_AB_MD_GC, 1);
1664		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1665
1666		netif_dbg(efx, hw, efx->net_dev,
1667			  "read from MDIO %d register %d.%d, got error %d\n",
1668			  prtad, devad, addr, rc);
1669	}
1670
1671out:
1672	mutex_unlock(&nic_data->mdio_lock);
1673	return rc;
1674}
1675
1676/* This call is responsible for hooking in the MAC and PHY operations */
1677static int falcon_probe_port(struct ef4_nic *efx)
1678{
1679	struct falcon_nic_data *nic_data = efx->nic_data;
1680	int rc;
1681
1682	switch (efx->phy_type) {
1683	case PHY_TYPE_SFX7101:
1684		efx->phy_op = &falcon_sfx7101_phy_ops;
1685		break;
1686	case PHY_TYPE_QT2022C2:
1687	case PHY_TYPE_QT2025C:
1688		efx->phy_op = &falcon_qt202x_phy_ops;
1689		break;
1690	case PHY_TYPE_TXC43128:
1691		efx->phy_op = &falcon_txc_phy_ops;
1692		break;
1693	default:
1694		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1695			  efx->phy_type);
1696		return -ENODEV;
1697	}
1698
1699	/* Fill out MDIO structure and loopback modes */
1700	mutex_init(&nic_data->mdio_lock);
1701	efx->mdio.mdio_read = falcon_mdio_read;
1702	efx->mdio.mdio_write = falcon_mdio_write;
1703	rc = efx->phy_op->probe(efx);
1704	if (rc != 0)
1705		return rc;
1706
1707	/* Initial assumption */
1708	efx->link_state.speed = 10000;
1709	efx->link_state.fd = true;
1710
1711	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1712	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1713		efx->wanted_fc = EF4_FC_RX | EF4_FC_TX;
1714	else
1715		efx->wanted_fc = EF4_FC_RX;
1716	if (efx->mdio.mmds & MDIO_DEVS_AN)
1717		efx->wanted_fc |= EF4_FC_AUTO;
1718
1719	/* Allocate buffer for stats */
1720	rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer,
1721				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1722	if (rc)
1723		return rc;
1724	netif_dbg(efx, probe, efx->net_dev,
1725		  "stats buffer at %llx (virt %p phys %llx)\n",
1726		  (u64)efx->stats_buffer.dma_addr,
1727		  efx->stats_buffer.addr,
1728		  (u64)virt_to_phys(efx->stats_buffer.addr));
1729
1730	return 0;
1731}
1732
1733static void falcon_remove_port(struct ef4_nic *efx)
1734{
1735	efx->phy_op->remove(efx);
1736	ef4_nic_free_buffer(efx, &efx->stats_buffer);
1737}
1738
1739/* Global events are basically PHY events */
1740static bool
1741falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event)
1742{
1743	struct ef4_nic *efx = channel->efx;
1744	struct falcon_nic_data *nic_data = efx->nic_data;
1745
1746	if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1747	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1748	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1749		/* Ignored */
1750		return true;
1751
1752	if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) &&
1753	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1754		nic_data->xmac_poll_required = true;
1755		return true;
1756	}
1757
1758	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ?
1759	    EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1760	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1761		netif_err(efx, rx_err, efx->net_dev,
1762			  "channel %d seen global RX_RESET event. Resetting.\n",
1763			  channel->channel);
1764
1765		atomic_inc(&efx->rx_reset);
1766		ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ?
1767				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1768		return true;
1769	}
1770
1771	return false;
1772}
1773
1774/**************************************************************************
1775 *
1776 * Falcon test code
1777 *
1778 **************************************************************************/
1779
1780static int
1781falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out)
1782{
1783	struct falcon_nic_data *nic_data = efx->nic_data;
1784	struct falcon_nvconfig *nvconfig;
1785	struct falcon_spi_device *spi;
1786	void *region;
1787	int rc, magic_num, struct_ver;
1788	__le16 *word, *limit;
1789	u32 csum;
1790
1791	if (falcon_spi_present(&nic_data->spi_flash))
1792		spi = &nic_data->spi_flash;
1793	else if (falcon_spi_present(&nic_data->spi_eeprom))
1794		spi = &nic_data->spi_eeprom;
1795	else
1796		return -EINVAL;
1797
1798	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1799	if (!region)
1800		return -ENOMEM;
1801	nvconfig = region + FALCON_NVCONFIG_OFFSET;
1802
1803	mutex_lock(&nic_data->spi_lock);
1804	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1805	mutex_unlock(&nic_data->spi_lock);
1806	if (rc) {
1807		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1808			  falcon_spi_present(&nic_data->spi_flash) ?
1809			  "flash" : "EEPROM");
1810		rc = -EIO;
1811		goto out;
1812	}
1813
1814	magic_num = le16_to_cpu(nvconfig->board_magic_num);
1815	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1816
1817	rc = -EINVAL;
1818	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1819		netif_err(efx, hw, efx->net_dev,
1820			  "NVRAM bad magic 0x%x\n", magic_num);
1821		goto out;
1822	}
1823	if (struct_ver < 2) {
1824		netif_err(efx, hw, efx->net_dev,
1825			  "NVRAM has ancient version 0x%x\n", struct_ver);
1826		goto out;
1827	} else if (struct_ver < 4) {
1828		word = &nvconfig->board_magic_num;
1829		limit = (__le16 *) (nvconfig + 1);
1830	} else {
1831		word = region;
1832		limit = region + FALCON_NVCONFIG_END;
1833	}
1834	for (csum = 0; word < limit; ++word)
1835		csum += le16_to_cpu(*word);
1836
1837	if (~csum & 0xffff) {
1838		netif_err(efx, hw, efx->net_dev,
1839			  "NVRAM has incorrect checksum\n");
1840		goto out;
1841	}
1842
1843	rc = 0;
1844	if (nvconfig_out)
1845		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1846
1847 out:
1848	kfree(region);
1849	return rc;
1850}
1851
1852static int falcon_test_nvram(struct ef4_nic *efx)
1853{
1854	return falcon_read_nvram(efx, NULL);
1855}
1856
1857static const struct ef4_farch_register_test falcon_b0_register_tests[] = {
1858	{ FR_AZ_ADR_REGION,
1859	  EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1860	{ FR_AZ_RX_CFG,
1861	  EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1862	{ FR_AZ_TX_CFG,
1863	  EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1864	{ FR_AZ_TX_RESERVED,
1865	  EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1866	{ FR_AB_MAC_CTRL,
1867	  EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1868	{ FR_AZ_SRM_TX_DC_CFG,
1869	  EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1870	{ FR_AZ_RX_DC_CFG,
1871	  EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1872	{ FR_AZ_RX_DC_PF_WM,
1873	  EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1874	{ FR_BZ_DP_CTRL,
1875	  EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1876	{ FR_AB_GM_CFG2,
1877	  EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1878	{ FR_AB_GMF_CFG0,
1879	  EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1880	{ FR_AB_XM_GLB_CFG,
1881	  EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1882	{ FR_AB_XM_TX_CFG,
1883	  EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1884	{ FR_AB_XM_RX_CFG,
1885	  EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1886	{ FR_AB_XM_RX_PARAM,
1887	  EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1888	{ FR_AB_XM_FC,
1889	  EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1890	{ FR_AB_XM_ADR_LO,
1891	  EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1892	{ FR_AB_XX_SD_CTL,
1893	  EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1894};
1895
1896static int
1897falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests)
1898{
1899	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1900	int rc, rc2;
1901
1902	mutex_lock(&efx->mac_lock);
1903	if (efx->loopback_modes) {
1904		/* We need the 312 clock from the PHY to test the XMAC
1905		 * registers, so move into XGMII loopback if available */
1906		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1907			efx->loopback_mode = LOOPBACK_XGMII;
1908		else
1909			efx->loopback_mode = __ffs(efx->loopback_modes);
1910	}
1911	__ef4_reconfigure_port(efx);
1912	mutex_unlock(&efx->mac_lock);
1913
1914	ef4_reset_down(efx, reset_method);
1915
1916	tests->registers =
1917		ef4_farch_test_registers(efx, falcon_b0_register_tests,
1918					 ARRAY_SIZE(falcon_b0_register_tests))
1919		? -1 : 1;
1920
1921	rc = falcon_reset_hw(efx, reset_method);
1922	rc2 = ef4_reset_up(efx, reset_method, rc == 0);
1923	return rc ? rc : rc2;
1924}
1925
1926/**************************************************************************
1927 *
1928 * Device reset
1929 *
1930 **************************************************************************
1931 */
1932
1933static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1934{
1935	switch (reason) {
1936	case RESET_TYPE_RX_RECOVERY:
1937	case RESET_TYPE_DMA_ERROR:
1938	case RESET_TYPE_TX_SKIP:
1939		/* These can occasionally occur due to hardware bugs.
1940		 * We try to reset without disrupting the link.
1941		 */
1942		return RESET_TYPE_INVISIBLE;
1943	default:
1944		return RESET_TYPE_ALL;
1945	}
1946}
1947
1948static int falcon_map_reset_flags(u32 *flags)
1949{
1950	enum {
1951		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1952					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1953		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1954		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1955	};
1956
1957	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1958		*flags &= ~FALCON_RESET_WORLD;
1959		return RESET_TYPE_WORLD;
1960	}
1961
1962	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1963		*flags &= ~FALCON_RESET_ALL;
1964		return RESET_TYPE_ALL;
1965	}
1966
1967	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1968		*flags &= ~FALCON_RESET_INVISIBLE;
1969		return RESET_TYPE_INVISIBLE;
1970	}
1971
1972	return -EINVAL;
1973}
1974
1975/* Resets NIC to known state.  This routine must be called in process
1976 * context and is allowed to sleep. */
1977static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
1978{
1979	struct falcon_nic_data *nic_data = efx->nic_data;
1980	ef4_oword_t glb_ctl_reg_ker;
1981	int rc;
1982
1983	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1984		  RESET_TYPE(method));
1985
1986	/* Initiate device reset */
1987	if (method == RESET_TYPE_WORLD) {
1988		rc = pci_save_state(efx->pci_dev);
1989		if (rc) {
1990			netif_err(efx, drv, efx->net_dev,
1991				  "failed to backup PCI state of primary "
1992				  "function prior to hardware reset\n");
1993			goto fail1;
1994		}
1995		if (ef4_nic_is_dual_func(efx)) {
1996			rc = pci_save_state(nic_data->pci_dev2);
1997			if (rc) {
1998				netif_err(efx, drv, efx->net_dev,
1999					  "failed to backup PCI state of "
2000					  "secondary function prior to "
2001					  "hardware reset\n");
2002				goto fail2;
2003			}
2004		}
2005
2006		EF4_POPULATE_OWORD_2(glb_ctl_reg_ker,
2007				     FRF_AB_EXT_PHY_RST_DUR,
2008				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2009				     FRF_AB_SWRST, 1);
2010	} else {
2011		EF4_POPULATE_OWORD_7(glb_ctl_reg_ker,
2012				     /* exclude PHY from "invisible" reset */
2013				     FRF_AB_EXT_PHY_RST_CTL,
2014				     method == RESET_TYPE_INVISIBLE,
2015				     /* exclude EEPROM/flash and PCIe */
2016				     FRF_AB_PCIE_CORE_RST_CTL, 1,
2017				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2018				     FRF_AB_PCIE_SD_RST_CTL, 1,
2019				     FRF_AB_EE_RST_CTL, 1,
2020				     FRF_AB_EXT_PHY_RST_DUR,
2021				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2022				     FRF_AB_SWRST, 1);
2023	}
2024	ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2025
2026	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
2027	schedule_timeout_uninterruptible(HZ / 20);
2028
2029	/* Restore PCI configuration if needed */
2030	if (method == RESET_TYPE_WORLD) {
2031		if (ef4_nic_is_dual_func(efx))
2032			pci_restore_state(nic_data->pci_dev2);
2033		pci_restore_state(efx->pci_dev);
2034		netif_dbg(efx, drv, efx->net_dev,
2035			  "successfully restored PCI config\n");
2036	}
2037
2038	/* Assert that reset complete */
2039	ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2040	if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2041		rc = -ETIMEDOUT;
2042		netif_err(efx, hw, efx->net_dev,
2043			  "timed out waiting for hardware reset\n");
2044		goto fail3;
2045	}
2046	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
2047
2048	return 0;
2049
2050	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
2051fail2:
2052	pci_restore_state(efx->pci_dev);
2053fail1:
2054fail3:
2055	return rc;
2056}
2057
2058static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
2059{
2060	struct falcon_nic_data *nic_data = efx->nic_data;
2061	int rc;
2062
2063	mutex_lock(&nic_data->spi_lock);
2064	rc = __falcon_reset_hw(efx, method);
2065	mutex_unlock(&nic_data->spi_lock);
2066
2067	return rc;
2068}
2069
2070static void falcon_monitor(struct ef4_nic *efx)
2071{
2072	bool link_changed;
2073	int rc;
2074
2075	BUG_ON(!mutex_is_locked(&efx->mac_lock));
2076
2077	rc = falcon_board(efx)->type->monitor(efx);
2078	if (rc) {
2079		netif_err(efx, hw, efx->net_dev,
2080			  "Board sensor %s; shutting down PHY\n",
2081			  (rc == -ERANGE) ? "reported fault" : "failed");
2082		efx->phy_mode |= PHY_MODE_LOW_POWER;
2083		rc = __ef4_reconfigure_port(efx);
2084		WARN_ON(rc);
2085	}
2086
2087	if (LOOPBACK_INTERNAL(efx))
2088		link_changed = falcon_loopback_link_poll(efx);
2089	else
2090		link_changed = efx->phy_op->poll(efx);
2091
2092	if (link_changed) {
2093		falcon_stop_nic_stats(efx);
2094		falcon_deconfigure_mac_wrapper(efx);
2095
2096		falcon_reset_macs(efx);
2097		rc = falcon_reconfigure_xmac(efx);
2098		BUG_ON(rc);
2099
2100		falcon_start_nic_stats(efx);
2101
2102		ef4_link_status_changed(efx);
2103	}
2104
2105	falcon_poll_xmac(efx);
2106}
2107
2108/* Zeroes out the SRAM contents.  This routine must be called in
2109 * process context and is allowed to sleep.
2110 */
2111static int falcon_reset_sram(struct ef4_nic *efx)
2112{
2113	ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2114	int count;
2115
2116	/* Set the SRAM wake/sleep GPIO appropriately. */
2117	ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2118	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2119	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2120	ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2121
2122	/* Initiate SRAM reset */
2123	EF4_POPULATE_OWORD_2(srm_cfg_reg_ker,
2124			     FRF_AZ_SRM_INIT_EN, 1,
2125			     FRF_AZ_SRM_NB_SZ, 0);
2126	ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2127
2128	/* Wait for SRAM reset to complete */
2129	count = 0;
2130	do {
2131		netif_dbg(efx, hw, efx->net_dev,
2132			  "waiting for SRAM reset (attempt %d)...\n", count);
2133
2134		/* SRAM reset is slow; expect around 16ms */
2135		schedule_timeout_uninterruptible(HZ / 50);
2136
2137		/* Check for reset complete */
2138		ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2139		if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2140			netif_dbg(efx, hw, efx->net_dev,
2141				  "SRAM reset complete\n");
2142
2143			return 0;
2144		}
2145	} while (++count < 20);	/* wait up to 0.4 sec */
2146
2147	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
2148	return -ETIMEDOUT;
2149}
2150
2151static void falcon_spi_device_init(struct ef4_nic *efx,
2152				  struct falcon_spi_device *spi_device,
2153				  unsigned int device_id, u32 device_type)
2154{
2155	if (device_type != 0) {
2156		spi_device->device_id = device_id;
2157		spi_device->size =
2158			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2159		spi_device->addr_len =
2160			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2161		spi_device->munge_address = (spi_device->size == 1 << 9 &&
2162					     spi_device->addr_len == 1);
2163		spi_device->erase_command =
2164			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2165		spi_device->erase_size =
2166			1 << SPI_DEV_TYPE_FIELD(device_type,
2167						SPI_DEV_TYPE_ERASE_SIZE);
2168		spi_device->block_size =
2169			1 << SPI_DEV_TYPE_FIELD(device_type,
2170						SPI_DEV_TYPE_BLOCK_SIZE);
2171	} else {
2172		spi_device->size = 0;
2173	}
2174}
2175
2176/* Extract non-volatile configuration */
2177static int falcon_probe_nvconfig(struct ef4_nic *efx)
2178{
2179	struct falcon_nic_data *nic_data = efx->nic_data;
2180	struct falcon_nvconfig *nvconfig;
2181	int rc;
2182
2183	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2184	if (!nvconfig)
2185		return -ENOMEM;
2186
2187	rc = falcon_read_nvram(efx, nvconfig);
2188	if (rc)
2189		goto out;
2190
2191	efx->phy_type = nvconfig->board_v2.port0_phy_type;
2192	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
2193
2194	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2195		falcon_spi_device_init(
2196			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2197			le32_to_cpu(nvconfig->board_v3
2198				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
2199		falcon_spi_device_init(
2200			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2201			le32_to_cpu(nvconfig->board_v3
2202				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
2203	}
2204
2205	/* Read the MAC addresses */
2206	ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2207
2208	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2209		  efx->phy_type, efx->mdio.prtad);
2210
2211	rc = falcon_probe_board(efx,
2212				le16_to_cpu(nvconfig->board_v2.board_revision));
2213out:
2214	kfree(nvconfig);
2215	return rc;
2216}
2217
2218static int falcon_dimension_resources(struct ef4_nic *efx)
2219{
2220	efx->rx_dc_base = 0x20000;
2221	efx->tx_dc_base = 0x26000;
2222	return 0;
2223}
2224
2225/* Probe all SPI devices on the NIC */
2226static void falcon_probe_spi_devices(struct ef4_nic *efx)
2227{
2228	struct falcon_nic_data *nic_data = efx->nic_data;
2229	ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2230	int boot_dev;
2231
2232	ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2233	ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2234	ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2235
2236	if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2237		boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2238			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2239		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
2240			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
2241			  "flash" : "EEPROM");
2242	} else {
2243		/* Disable VPD and set clock dividers to safe
2244		 * values for initial programming. */
2245		boot_dev = -1;
2246		netif_dbg(efx, probe, efx->net_dev,
2247			  "Booted from internal ASIC settings;"
2248			  " setting SPI config\n");
2249		EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2250				     /* 125 MHz / 7 ~= 20 MHz */
2251				     FRF_AB_EE_SF_CLOCK_DIV, 7,
2252				     /* 125 MHz / 63 ~= 2 MHz */
2253				     FRF_AB_EE_EE_CLOCK_DIV, 63);
2254		ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2255	}
2256
2257	mutex_init(&nic_data->spi_lock);
2258
2259	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2260		falcon_spi_device_init(efx, &nic_data->spi_flash,
2261				       FFE_AB_SPI_DEVICE_FLASH,
2262				       default_flash_type);
2263	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2264		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
2265				       FFE_AB_SPI_DEVICE_EEPROM,
2266				       large_eeprom_type);
2267}
2268
2269static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx)
2270{
2271	return 0x20000;
2272}
2273
2274static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx)
2275{
2276	/* Map everything up to and including the RSS indirection table.
2277	 * The PCI core takes care of mapping the MSI-X tables.
2278	 */
2279	return FR_BZ_RX_INDIRECTION_TBL +
2280		FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2281}
2282
2283static int falcon_probe_nic(struct ef4_nic *efx)
2284{
2285	struct falcon_nic_data *nic_data;
2286	struct falcon_board *board;
2287	int rc;
2288
2289	efx->primary = efx; /* only one usable function per controller */
2290
2291	/* Allocate storage for hardware specific data */
2292	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2293	if (!nic_data)
2294		return -ENOMEM;
2295	efx->nic_data = nic_data;
2296	nic_data->efx = efx;
2297
2298	rc = -ENODEV;
2299
2300	if (ef4_farch_fpga_ver(efx) != 0) {
2301		netif_err(efx, probe, efx->net_dev,
2302			  "Falcon FPGA not supported\n");
2303		goto fail1;
2304	}
2305
2306	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2307		ef4_oword_t nic_stat;
2308		struct pci_dev *dev;
2309		u8 pci_rev = efx->pci_dev->revision;
2310
2311		if ((pci_rev == 0xff) || (pci_rev == 0)) {
2312			netif_err(efx, probe, efx->net_dev,
2313				  "Falcon rev A0 not supported\n");
2314			goto fail1;
2315		}
2316		ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2317		if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2318			netif_err(efx, probe, efx->net_dev,
2319				  "Falcon rev A1 1G not supported\n");
2320			goto fail1;
2321		}
2322		if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2323			netif_err(efx, probe, efx->net_dev,
2324				  "Falcon rev A1 PCI-X not supported\n");
2325			goto fail1;
2326		}
2327
2328		dev = pci_dev_get(efx->pci_dev);
2329		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2330					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2331					     dev))) {
2332			if (dev->bus == efx->pci_dev->bus &&
2333			    dev->devfn == efx->pci_dev->devfn + 1) {
2334				nic_data->pci_dev2 = dev;
2335				break;
2336			}
2337		}
2338		if (!nic_data->pci_dev2) {
2339			netif_err(efx, probe, efx->net_dev,
2340				  "failed to find secondary function\n");
2341			rc = -ENODEV;
2342			goto fail2;
2343		}
2344	}
2345
2346	/* Now we can reset the NIC */
2347	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2348	if (rc) {
2349		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2350		goto fail3;
2351	}
2352
2353	/* Allocate memory for INT_KER */
2354	rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t),
2355				  GFP_KERNEL);
2356	if (rc)
2357		goto fail4;
2358	BUG_ON(efx->irq_status.dma_addr & 0x0f);
2359
2360	netif_dbg(efx, probe, efx->net_dev,
2361		  "INT_KER at %llx (virt %p phys %llx)\n",
2362		  (u64)efx->irq_status.dma_addr,
2363		  efx->irq_status.addr,
2364		  (u64)virt_to_phys(efx->irq_status.addr));
2365
2366	falcon_probe_spi_devices(efx);
2367
2368	/* Read in the non-volatile configuration */
2369	rc = falcon_probe_nvconfig(efx);
2370	if (rc) {
2371		if (rc == -EINVAL)
2372			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2373		goto fail5;
2374	}
2375
2376	efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 :
2377			     EF4_MAX_CHANNELS);
2378	efx->max_tx_channels = efx->max_channels;
2379	efx->timer_quantum_ns = 4968; /* 621 cycles */
2380	efx->timer_max_ns = efx->type->timer_period_max *
2381			    efx->timer_quantum_ns;
2382
2383	/* Initialise I2C adapter */
2384	board = falcon_board(efx);
2385	board->i2c_adap.owner = THIS_MODULE;
2386	board->i2c_data = falcon_i2c_bit_operations;
2387	board->i2c_data.data = efx;
2388	board->i2c_adap.algo_data = &board->i2c_data;
2389	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2390	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2391		sizeof(board->i2c_adap.name));
2392	rc = i2c_bit_add_bus(&board->i2c_adap);
2393	if (rc)
2394		goto fail5;
2395
2396	rc = falcon_board(efx)->type->init(efx);
2397	if (rc) {
2398		netif_err(efx, probe, efx->net_dev,
2399			  "failed to initialise board\n");
2400		goto fail6;
2401	}
2402
2403	nic_data->stats_disable_count = 1;
2404	timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
2405
2406	return 0;
2407
2408 fail6:
2409	i2c_del_adapter(&board->i2c_adap);
2410	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2411 fail5:
2412	ef4_nic_free_buffer(efx, &efx->irq_status);
2413 fail4:
2414 fail3:
2415	if (nic_data->pci_dev2) {
2416		pci_dev_put(nic_data->pci_dev2);
2417		nic_data->pci_dev2 = NULL;
2418	}
2419 fail2:
2420 fail1:
2421	kfree(efx->nic_data);
2422	return rc;
2423}
2424
2425static void falcon_init_rx_cfg(struct ef4_nic *efx)
2426{
2427	/* RX control FIFO thresholds (32 entries) */
2428	const unsigned ctrl_xon_thr = 20;
2429	const unsigned ctrl_xoff_thr = 25;
2430	ef4_oword_t reg;
2431
2432	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
2433	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2434		/* Data FIFO size is 5.5K.  The RX DMA engine only
2435		 * supports scattering for user-mode queues, but will
2436		 * split DMA writes at intervals of RX_USR_BUF_SIZE
2437		 * (32-byte units) even for kernel-mode queues.  We
2438		 * set it to be so large that that never happens.
2439		 */
2440		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2441		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2442				    (3 * 4096) >> 5);
2443		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2444		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2445		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2446		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2447	} else {
2448		/* Data FIFO size is 80K; register fields moved */
2449		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2450		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2451				    EF4_RX_USR_BUF_SIZE >> 5);
2452		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
2453		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2454		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2455		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2456		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2457		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2458
2459		/* Enable hash insertion. This is broken for the
2460		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2461		 * IPv4 hashes. */
2462		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2463		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2464		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2465	}
2466	/* Always enable XOFF signal from RX FIFO.  We enable
2467	 * or disable transmission of pause frames at the MAC. */
2468	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2469	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
2470}
2471
2472/* This call performs hardware-specific global initialisation, such as
2473 * defining the descriptor cache sizes and number of RSS channels.
2474 * It does not set up any buffers, descriptor rings or event queues.
2475 */
2476static int falcon_init_nic(struct ef4_nic *efx)
2477{
2478	ef4_oword_t temp;
2479	int rc;
2480
2481	/* Use on-chip SRAM */
2482	ef4_reado(efx, &temp, FR_AB_NIC_STAT);
2483	EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2484	ef4_writeo(efx, &temp, FR_AB_NIC_STAT);
2485
2486	rc = falcon_reset_sram(efx);
2487	if (rc)
2488		return rc;
2489
2490	/* Clear the parity enables on the TX data fifos as
2491	 * they produce false parity errors because of timing issues
2492	 */
2493	if (EF4_WORKAROUND_5129(efx)) {
2494		ef4_reado(efx, &temp, FR_AZ_CSR_SPARE);
2495		EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2496		ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2497	}
2498
2499	if (EF4_WORKAROUND_7244(efx)) {
2500		ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2501		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2502		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2503		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2504		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2505		ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2506	}
2507
2508	/* XXX This is documented only for Falcon A0/A1 */
2509	/* Setup RX.  Wait for descriptor is broken and must
2510	 * be disabled.  RXDP recovery shouldn't be needed, but is.
2511	 */
2512	ef4_reado(efx, &temp, FR_AA_RX_SELF_RST);
2513	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2514	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2515	if (EF4_WORKAROUND_5583(efx))
2516		EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2517	ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2518
2519	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2520	 * descriptors (which is bad).
2521	 */
2522	ef4_reado(efx, &temp, FR_AZ_TX_CFG);
2523	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2524	ef4_writeo(efx, &temp, FR_AZ_TX_CFG);
2525
2526	falcon_init_rx_cfg(efx);
2527
2528	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2529		falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
2530
2531		/* Set destination of both TX and RX Flush events */
2532		EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2533		ef4_writeo(efx, &temp, FR_BZ_DP_CTRL);
2534	}
2535
2536	ef4_farch_init_common(efx);
2537
2538	return 0;
2539}
2540
2541static void falcon_remove_nic(struct ef4_nic *efx)
2542{
2543	struct falcon_nic_data *nic_data = efx->nic_data;
2544	struct falcon_board *board = falcon_board(efx);
2545
2546	board->type->fini(efx);
2547
2548	/* Remove I2C adapter and clear it in preparation for a retry */
2549	i2c_del_adapter(&board->i2c_adap);
2550	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2551
2552	ef4_nic_free_buffer(efx, &efx->irq_status);
2553
2554	__falcon_reset_hw(efx, RESET_TYPE_ALL);
2555
2556	/* Release the second function after the reset */
2557	if (nic_data->pci_dev2) {
2558		pci_dev_put(nic_data->pci_dev2);
2559		nic_data->pci_dev2 = NULL;
2560	}
2561
2562	/* Tear down the private nic state */
2563	kfree(efx->nic_data);
2564	efx->nic_data = NULL;
2565}
2566
2567static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
2568{
2569	return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2570				      falcon_stat_mask, names);
2571}
2572
2573static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats,
2574				      struct rtnl_link_stats64 *core_stats)
2575{
2576	struct falcon_nic_data *nic_data = efx->nic_data;
2577	u64 *stats = nic_data->stats;
2578	ef4_oword_t cnt;
2579
2580	if (!nic_data->stats_disable_count) {
2581		ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2582		stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2583			EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2584
2585		if (nic_data->stats_pending &&
2586		    FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2587			nic_data->stats_pending = false;
2588			rmb(); /* read the done flag before the stats */
2589			ef4_nic_update_stats(
2590				falcon_stat_desc, FALCON_STAT_COUNT,
2591				falcon_stat_mask,
2592				stats, efx->stats_buffer.addr, true);
2593		}
2594
2595		/* Update derived statistic */
2596		ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
2597				     stats[FALCON_STAT_rx_bytes] -
2598				     stats[FALCON_STAT_rx_good_bytes] -
2599				     stats[FALCON_STAT_rx_control] * 64);
2600		ef4_update_sw_stats(efx, stats);
2601	}
2602
2603	if (full_stats)
2604		memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
2605
2606	if (core_stats) {
2607		core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2608		core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2609		core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2610		core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2611		core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
2612					 stats[GENERIC_STAT_rx_nodesc_trunc] +
2613					 stats[GENERIC_STAT_rx_noskb_drops];
2614		core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2615		core_stats->rx_length_errors =
2616			stats[FALCON_STAT_rx_gtjumbo] +
2617			stats[FALCON_STAT_rx_length_error];
2618		core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2619		core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2620		core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2621
2622		core_stats->rx_errors = (core_stats->rx_length_errors +
2623					 core_stats->rx_crc_errors +
2624					 core_stats->rx_frame_errors +
2625					 stats[FALCON_STAT_rx_symbol_error]);
2626	}
2627
2628	return FALCON_STAT_COUNT;
2629}
2630
2631void falcon_start_nic_stats(struct ef4_nic *efx)
2632{
2633	struct falcon_nic_data *nic_data = efx->nic_data;
2634
2635	spin_lock_bh(&efx->stats_lock);
2636	if (--nic_data->stats_disable_count == 0)
2637		falcon_stats_request(efx);
2638	spin_unlock_bh(&efx->stats_lock);
2639}
2640
2641/* We don't acutally pull stats on falcon. Wait 10ms so that
2642 * they arrive when we call this just after start_stats
2643 */
2644static void falcon_pull_nic_stats(struct ef4_nic *efx)
2645{
2646	msleep(10);
2647}
2648
2649void falcon_stop_nic_stats(struct ef4_nic *efx)
2650{
2651	struct falcon_nic_data *nic_data = efx->nic_data;
2652	int i;
2653
2654	might_sleep();
2655
2656	spin_lock_bh(&efx->stats_lock);
2657	++nic_data->stats_disable_count;
2658	spin_unlock_bh(&efx->stats_lock);
2659
2660	del_timer_sync(&nic_data->stats_timer);
2661
2662	/* Wait enough time for the most recent transfer to
2663	 * complete. */
2664	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2665		if (FALCON_XMAC_STATS_DMA_FLAG(efx))
2666			break;
2667		msleep(1);
2668	}
2669
2670	spin_lock_bh(&efx->stats_lock);
2671	falcon_stats_complete(efx);
2672	spin_unlock_bh(&efx->stats_lock);
2673}
2674
2675static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
2676{
2677	falcon_board(efx)->type->set_id_led(efx, mode);
2678}
2679
2680/**************************************************************************
2681 *
2682 * Wake on LAN
2683 *
2684 **************************************************************************
2685 */
2686
2687static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol)
2688{
2689	wol->supported = 0;
2690	wol->wolopts = 0;
2691	memset(&wol->sopass, 0, sizeof(wol->sopass));
2692}
2693
2694static int falcon_set_wol(struct ef4_nic *efx, u32 type)
2695{
2696	if (type != 0)
2697		return -EINVAL;
2698	return 0;
2699}
2700
2701/**************************************************************************
2702 *
2703 * Revision-dependent attributes used by efx.c and nic.c
2704 *
2705 **************************************************************************
2706 */
2707
2708const struct ef4_nic_type falcon_a1_nic_type = {
2709	.mem_bar = EF4_MEM_BAR,
2710	.mem_map_size = falcon_a1_mem_map_size,
2711	.probe = falcon_probe_nic,
2712	.remove = falcon_remove_nic,
2713	.init = falcon_init_nic,
2714	.dimension_resources = falcon_dimension_resources,
2715	.fini = falcon_irq_ack_a1,
2716	.monitor = falcon_monitor,
2717	.map_reset_reason = falcon_map_reset_reason,
2718	.map_reset_flags = falcon_map_reset_flags,
2719	.reset = falcon_reset_hw,
2720	.probe_port = falcon_probe_port,
2721	.remove_port = falcon_remove_port,
2722	.handle_global_event = falcon_handle_global_event,
2723	.fini_dmaq = ef4_farch_fini_dmaq,
2724	.prepare_flush = falcon_prepare_flush,
2725	.finish_flush = ef4_port_dummy_op_void,
2726	.prepare_flr = ef4_port_dummy_op_void,
2727	.finish_flr = ef4_farch_finish_flr,
2728	.describe_stats = falcon_describe_nic_stats,
2729	.update_stats = falcon_update_nic_stats,
2730	.start_stats = falcon_start_nic_stats,
2731	.pull_stats = falcon_pull_nic_stats,
2732	.stop_stats = falcon_stop_nic_stats,
2733	.set_id_led = falcon_set_id_led,
2734	.push_irq_moderation = falcon_push_irq_moderation,
2735	.reconfigure_port = falcon_reconfigure_port,
2736	.prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2737	.reconfigure_mac = falcon_reconfigure_xmac,
2738	.check_mac_fault = falcon_xmac_check_fault,
2739	.get_wol = falcon_get_wol,
2740	.set_wol = falcon_set_wol,
2741	.resume_wol = ef4_port_dummy_op_void,
2742	.test_nvram = falcon_test_nvram,
2743	.irq_enable_master = ef4_farch_irq_enable_master,
2744	.irq_test_generate = ef4_farch_irq_test_generate,
2745	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2746	.irq_handle_msi = ef4_farch_msi_interrupt,
2747	.irq_handle_legacy = falcon_legacy_interrupt_a1,
2748	.tx_probe = ef4_farch_tx_probe,
2749	.tx_init = ef4_farch_tx_init,
2750	.tx_remove = ef4_farch_tx_remove,
2751	.tx_write = ef4_farch_tx_write,
2752	.tx_limit_len = ef4_farch_tx_limit_len,
2753	.rx_push_rss_config = dummy_rx_push_rss_config,
2754	.rx_probe = ef4_farch_rx_probe,
2755	.rx_init = ef4_farch_rx_init,
2756	.rx_remove = ef4_farch_rx_remove,
2757	.rx_write = ef4_farch_rx_write,
2758	.rx_defer_refill = ef4_farch_rx_defer_refill,
2759	.ev_probe = ef4_farch_ev_probe,
2760	.ev_init = ef4_farch_ev_init,
2761	.ev_fini = ef4_farch_ev_fini,
2762	.ev_remove = ef4_farch_ev_remove,
2763	.ev_process = ef4_farch_ev_process,
2764	.ev_read_ack = ef4_farch_ev_read_ack,
2765	.ev_test_generate = ef4_farch_ev_test_generate,
2766
2767	/* We don't expose the filter table on Falcon A1 as it is not
2768	 * mapped into function 0, but these implementations still
2769	 * work with a degenerate case of all tables set to size 0.
2770	 */
2771	.filter_table_probe = ef4_farch_filter_table_probe,
2772	.filter_table_restore = ef4_farch_filter_table_restore,
2773	.filter_table_remove = ef4_farch_filter_table_remove,
2774	.filter_insert = ef4_farch_filter_insert,
2775	.filter_remove_safe = ef4_farch_filter_remove_safe,
2776	.filter_get_safe = ef4_farch_filter_get_safe,
2777	.filter_clear_rx = ef4_farch_filter_clear_rx,
2778	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2779	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2780	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2781
2782#ifdef CONFIG_SFC_FALCON_MTD
2783	.mtd_probe = falcon_mtd_probe,
2784	.mtd_rename = falcon_mtd_rename,
2785	.mtd_read = falcon_mtd_read,
2786	.mtd_erase = falcon_mtd_erase,
2787	.mtd_write = falcon_mtd_write,
2788	.mtd_sync = falcon_mtd_sync,
2789#endif
2790
2791	.revision = EF4_REV_FALCON_A1,
2792	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2793	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2794	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2795	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2796	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2797	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2798	.rx_buffer_padding = 0x24,
2799	.can_rx_scatter = false,
2800	.max_interrupt_mode = EF4_INT_MODE_MSI,
2801	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2802	.offload_features = NETIF_F_IP_CSUM,
2803};
2804
2805const struct ef4_nic_type falcon_b0_nic_type = {
2806	.mem_bar = EF4_MEM_BAR,
2807	.mem_map_size = falcon_b0_mem_map_size,
2808	.probe = falcon_probe_nic,
2809	.remove = falcon_remove_nic,
2810	.init = falcon_init_nic,
2811	.dimension_resources = falcon_dimension_resources,
2812	.fini = ef4_port_dummy_op_void,
2813	.monitor = falcon_monitor,
2814	.map_reset_reason = falcon_map_reset_reason,
2815	.map_reset_flags = falcon_map_reset_flags,
2816	.reset = falcon_reset_hw,
2817	.probe_port = falcon_probe_port,
2818	.remove_port = falcon_remove_port,
2819	.handle_global_event = falcon_handle_global_event,
2820	.fini_dmaq = ef4_farch_fini_dmaq,
2821	.prepare_flush = falcon_prepare_flush,
2822	.finish_flush = ef4_port_dummy_op_void,
2823	.prepare_flr = ef4_port_dummy_op_void,
2824	.finish_flr = ef4_farch_finish_flr,
2825	.describe_stats = falcon_describe_nic_stats,
2826	.update_stats = falcon_update_nic_stats,
2827	.start_stats = falcon_start_nic_stats,
2828	.pull_stats = falcon_pull_nic_stats,
2829	.stop_stats = falcon_stop_nic_stats,
2830	.set_id_led = falcon_set_id_led,
2831	.push_irq_moderation = falcon_push_irq_moderation,
2832	.reconfigure_port = falcon_reconfigure_port,
2833	.prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2834	.reconfigure_mac = falcon_reconfigure_xmac,
2835	.check_mac_fault = falcon_xmac_check_fault,
2836	.get_wol = falcon_get_wol,
2837	.set_wol = falcon_set_wol,
2838	.resume_wol = ef4_port_dummy_op_void,
2839	.test_chip = falcon_b0_test_chip,
2840	.test_nvram = falcon_test_nvram,
2841	.irq_enable_master = ef4_farch_irq_enable_master,
2842	.irq_test_generate = ef4_farch_irq_test_generate,
2843	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2844	.irq_handle_msi = ef4_farch_msi_interrupt,
2845	.irq_handle_legacy = ef4_farch_legacy_interrupt,
2846	.tx_probe = ef4_farch_tx_probe,
2847	.tx_init = ef4_farch_tx_init,
2848	.tx_remove = ef4_farch_tx_remove,
2849	.tx_write = ef4_farch_tx_write,
2850	.tx_limit_len = ef4_farch_tx_limit_len,
2851	.rx_push_rss_config = falcon_b0_rx_push_rss_config,
2852	.rx_probe = ef4_farch_rx_probe,
2853	.rx_init = ef4_farch_rx_init,
2854	.rx_remove = ef4_farch_rx_remove,
2855	.rx_write = ef4_farch_rx_write,
2856	.rx_defer_refill = ef4_farch_rx_defer_refill,
2857	.ev_probe = ef4_farch_ev_probe,
2858	.ev_init = ef4_farch_ev_init,
2859	.ev_fini = ef4_farch_ev_fini,
2860	.ev_remove = ef4_farch_ev_remove,
2861	.ev_process = ef4_farch_ev_process,
2862	.ev_read_ack = ef4_farch_ev_read_ack,
2863	.ev_test_generate = ef4_farch_ev_test_generate,
2864	.filter_table_probe = ef4_farch_filter_table_probe,
2865	.filter_table_restore = ef4_farch_filter_table_restore,
2866	.filter_table_remove = ef4_farch_filter_table_remove,
2867	.filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter,
2868	.filter_insert = ef4_farch_filter_insert,
2869	.filter_remove_safe = ef4_farch_filter_remove_safe,
2870	.filter_get_safe = ef4_farch_filter_get_safe,
2871	.filter_clear_rx = ef4_farch_filter_clear_rx,
2872	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2873	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2874	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2875#ifdef CONFIG_RFS_ACCEL
2876	.filter_rfs_insert = ef4_farch_filter_rfs_insert,
2877	.filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one,
2878#endif
2879#ifdef CONFIG_SFC_FALCON_MTD
2880	.mtd_probe = falcon_mtd_probe,
2881	.mtd_rename = falcon_mtd_rename,
2882	.mtd_read = falcon_mtd_read,
2883	.mtd_erase = falcon_mtd_erase,
2884	.mtd_write = falcon_mtd_write,
2885	.mtd_sync = falcon_mtd_sync,
2886#endif
2887
2888	.revision = EF4_REV_FALCON_B0,
2889	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2890	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2891	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2892	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2893	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2894	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2895	.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2896	.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
2897	.rx_buffer_padding = 0,
2898	.can_rx_scatter = true,
2899	.max_interrupt_mode = EF4_INT_MODE_MSIX,
2900	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2901	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2902	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2903};