Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2005-2013 Solarflare Communications Inc.
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/delay.h>
  13#include <linux/notifier.h>
  14#include <linux/ip.h>
  15#include <linux/tcp.h>
  16#include <linux/in.h>
  17#include <linux/ethtool.h>
  18#include <linux/topology.h>
  19#include <linux/gfp.h>
 
  20#include <linux/interrupt.h>
  21#include "net_driver.h"
  22#include "efx.h"
  23#include "nic.h"
  24#include "selftest.h"
  25
  26#include "workarounds.h"
  27
  28/**************************************************************************
  29 *
  30 * Type name strings
  31 *
  32 **************************************************************************
  33 */
  34
  35/* Loopback mode names (see LOOPBACK_MODE()) */
  36const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
  37const char *const ef4_loopback_mode_names[] = {
  38	[LOOPBACK_NONE]		= "NONE",
  39	[LOOPBACK_DATA]		= "DATAPATH",
  40	[LOOPBACK_GMAC]		= "GMAC",
  41	[LOOPBACK_XGMII]	= "XGMII",
  42	[LOOPBACK_XGXS]		= "XGXS",
  43	[LOOPBACK_XAUI]		= "XAUI",
  44	[LOOPBACK_GMII]		= "GMII",
  45	[LOOPBACK_SGMII]	= "SGMII",
  46	[LOOPBACK_XGBR]		= "XGBR",
  47	[LOOPBACK_XFI]		= "XFI",
  48	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
  49	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
  50	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
  51	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
  52	[LOOPBACK_GPHY]		= "GPHY",
  53	[LOOPBACK_PHYXS]	= "PHYXS",
  54	[LOOPBACK_PCS]		= "PCS",
  55	[LOOPBACK_PMAPMD]	= "PMA/PMD",
  56	[LOOPBACK_XPORT]	= "XPORT",
  57	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
  58	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
  59	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
  60	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  61	[LOOPBACK_GMII_WS]	= "GMII_WS",
  62	[LOOPBACK_XFI_WS]	= "XFI_WS",
  63	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
  64	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
  65};
  66
  67const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
  68const char *const ef4_reset_type_names[] = {
  69	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
  70	[RESET_TYPE_ALL]                = "ALL",
  71	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
  72	[RESET_TYPE_WORLD]              = "WORLD",
  73	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
  74	[RESET_TYPE_DATAPATH]           = "DATAPATH",
  75	[RESET_TYPE_DISABLE]            = "DISABLE",
  76	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
  77	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
  78	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
  79	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
  80	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
  81};
  82
  83/* Reset workqueue. If any NIC has a hardware failure then a reset will be
  84 * queued onto this work queue. This is not a per-nic work queue, because
  85 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  86 */
  87static struct workqueue_struct *reset_workqueue;
  88
  89/* How often and how many times to poll for a reset while waiting for a
  90 * BIST that another function started to complete.
  91 */
  92#define BIST_WAIT_DELAY_MS	100
  93#define BIST_WAIT_DELAY_COUNT	100
  94
  95/**************************************************************************
  96 *
  97 * Configurable values
  98 *
  99 *************************************************************************/
 100
 101/*
 102 * Use separate channels for TX and RX events
 103 *
 104 * Set this to 1 to use separate channels for TX and RX. It allows us
 105 * to control interrupt affinity separately for TX and RX.
 106 *
 107 * This is only used in MSI-X interrupt mode
 108 */
 109bool ef4_separate_tx_channels;
 110module_param(ef4_separate_tx_channels, bool, 0444);
 111MODULE_PARM_DESC(ef4_separate_tx_channels,
 112		 "Use separate channels for TX and RX");
 113
 114/* This is the time (in jiffies) between invocations of the hardware
 115 * monitor.
 116 * On Falcon-based NICs, this will:
 117 * - Check the on-board hardware monitor;
 118 * - Poll the link state and reconfigure the hardware as necessary.
 119 * On Siena-based NICs for power systems with EEH support, this will give EEH a
 120 * chance to start.
 121 */
 122static unsigned int ef4_monitor_interval = 1 * HZ;
 123
 124/* Initial interrupt moderation settings.  They can be modified after
 125 * module load with ethtool.
 126 *
 127 * The default for RX should strike a balance between increasing the
 128 * round-trip latency and reducing overhead.
 129 */
 130static unsigned int rx_irq_mod_usec = 60;
 131
 132/* Initial interrupt moderation settings.  They can be modified after
 133 * module load with ethtool.
 134 *
 135 * This default is chosen to ensure that a 10G link does not go idle
 136 * while a TX queue is stopped after it has become full.  A queue is
 137 * restarted when it drops below half full.  The time this takes (assuming
 138 * worst case 3 descriptors per packet and 1024 descriptors) is
 139 *   512 / 3 * 1.2 = 205 usec.
 140 */
 141static unsigned int tx_irq_mod_usec = 150;
 142
 143/* This is the first interrupt mode to try out of:
 144 * 0 => MSI-X
 145 * 1 => MSI
 146 * 2 => legacy
 147 */
 148static unsigned int interrupt_mode;
 149
 150/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 151 * i.e. the number of CPUs among which we may distribute simultaneous
 152 * interrupt handling.
 153 *
 154 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
 155 * The default (0) means to assign an interrupt to each core.
 156 */
 157static unsigned int rss_cpus;
 158module_param(rss_cpus, uint, 0444);
 159MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
 160
 161static bool phy_flash_cfg;
 162module_param(phy_flash_cfg, bool, 0644);
 163MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
 164
 165static unsigned irq_adapt_low_thresh = 8000;
 166module_param(irq_adapt_low_thresh, uint, 0644);
 167MODULE_PARM_DESC(irq_adapt_low_thresh,
 168		 "Threshold score for reducing IRQ moderation");
 169
 170static unsigned irq_adapt_high_thresh = 16000;
 171module_param(irq_adapt_high_thresh, uint, 0644);
 172MODULE_PARM_DESC(irq_adapt_high_thresh,
 173		 "Threshold score for increasing IRQ moderation");
 174
 175static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 176			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
 177			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
 178			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
 179module_param(debug, uint, 0);
 180MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 181
 182/**************************************************************************
 183 *
 184 * Utility functions and prototypes
 185 *
 186 *************************************************************************/
 187
 188static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
 189static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
 190static void ef4_remove_channel(struct ef4_channel *channel);
 191static void ef4_remove_channels(struct ef4_nic *efx);
 192static const struct ef4_channel_type ef4_default_channel_type;
 193static void ef4_remove_port(struct ef4_nic *efx);
 194static void ef4_init_napi_channel(struct ef4_channel *channel);
 195static void ef4_fini_napi(struct ef4_nic *efx);
 196static void ef4_fini_napi_channel(struct ef4_channel *channel);
 197static void ef4_fini_struct(struct ef4_nic *efx);
 198static void ef4_start_all(struct ef4_nic *efx);
 199static void ef4_stop_all(struct ef4_nic *efx);
 200
 201#define EF4_ASSERT_RESET_SERIALISED(efx)		\
 202	do {						\
 203		if ((efx->state == STATE_READY) ||	\
 204		    (efx->state == STATE_RECOVERY) ||	\
 205		    (efx->state == STATE_DISABLED))	\
 206			ASSERT_RTNL();			\
 207	} while (0)
 208
 209static int ef4_check_disabled(struct ef4_nic *efx)
 210{
 211	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
 212		netif_err(efx, drv, efx->net_dev,
 213			  "device is disabled due to earlier errors\n");
 214		return -EIO;
 215	}
 216	return 0;
 217}
 218
 219/**************************************************************************
 220 *
 221 * Event queue processing
 222 *
 223 *************************************************************************/
 224
 225/* Process channel's event queue
 226 *
 227 * This function is responsible for processing the event queue of a
 228 * single channel.  The caller must guarantee that this function will
 229 * never be concurrently called more than once on the same channel,
 230 * though different channels may be being processed concurrently.
 231 */
 232static int ef4_process_channel(struct ef4_channel *channel, int budget)
 233{
 234	struct ef4_tx_queue *tx_queue;
 235	int spent;
 236
 237	if (unlikely(!channel->enabled))
 238		return 0;
 239
 240	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 241		tx_queue->pkts_compl = 0;
 242		tx_queue->bytes_compl = 0;
 243	}
 244
 245	spent = ef4_nic_process_eventq(channel, budget);
 246	if (spent && ef4_channel_has_rx_queue(channel)) {
 247		struct ef4_rx_queue *rx_queue =
 248			ef4_channel_get_rx_queue(channel);
 249
 250		ef4_rx_flush_packet(channel);
 251		ef4_fast_push_rx_descriptors(rx_queue, true);
 252	}
 253
 254	/* Update BQL */
 255	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 256		if (tx_queue->bytes_compl) {
 257			netdev_tx_completed_queue(tx_queue->core_txq,
 258				tx_queue->pkts_compl, tx_queue->bytes_compl);
 259		}
 260	}
 261
 262	return spent;
 263}
 264
 265/* NAPI poll handler
 266 *
 267 * NAPI guarantees serialisation of polls of the same device, which
 268 * provides the guarantee required by ef4_process_channel().
 269 */
 270static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
 271{
 272	int step = efx->irq_mod_step_us;
 273
 274	if (channel->irq_mod_score < irq_adapt_low_thresh) {
 275		if (channel->irq_moderation_us > step) {
 276			channel->irq_moderation_us -= step;
 277			efx->type->push_irq_moderation(channel);
 278		}
 279	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
 280		if (channel->irq_moderation_us <
 281		    efx->irq_rx_moderation_us) {
 282			channel->irq_moderation_us += step;
 283			efx->type->push_irq_moderation(channel);
 284		}
 285	}
 286
 287	channel->irq_count = 0;
 288	channel->irq_mod_score = 0;
 289}
 290
 291static int ef4_poll(struct napi_struct *napi, int budget)
 292{
 293	struct ef4_channel *channel =
 294		container_of(napi, struct ef4_channel, napi_str);
 295	struct ef4_nic *efx = channel->efx;
 296	int spent;
 297
 298	netif_vdbg(efx, intr, efx->net_dev,
 299		   "channel %d NAPI poll executing on CPU %d\n",
 300		   channel->channel, raw_smp_processor_id());
 301
 302	spent = ef4_process_channel(channel, budget);
 303
 304	if (spent < budget) {
 305		if (ef4_channel_has_rx_queue(channel) &&
 306		    efx->irq_rx_adaptive &&
 307		    unlikely(++channel->irq_count == 1000)) {
 308			ef4_update_irq_mod(efx, channel);
 309		}
 310
 311		ef4_filter_rfs_expire(channel);
 312
 313		/* There is no race here; although napi_disable() will
 314		 * only wait for napi_complete(), this isn't a problem
 315		 * since ef4_nic_eventq_read_ack() will have no effect if
 316		 * interrupts have already been disabled.
 317		 */
 318		napi_complete_done(napi, spent);
 319		ef4_nic_eventq_read_ack(channel);
 320	}
 321
 322	return spent;
 323}
 324
 325/* Create event queue
 326 * Event queue memory allocations are done only once.  If the channel
 327 * is reset, the memory buffer will be reused; this guards against
 328 * errors during channel reset and also simplifies interrupt handling.
 329 */
 330static int ef4_probe_eventq(struct ef4_channel *channel)
 331{
 332	struct ef4_nic *efx = channel->efx;
 333	unsigned long entries;
 334
 335	netif_dbg(efx, probe, efx->net_dev,
 336		  "chan %d create event queue\n", channel->channel);
 337
 338	/* Build an event queue with room for one event per tx and rx buffer,
 339	 * plus some extra for link state events and MCDI completions. */
 340	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
 341	EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
 342	channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
 343
 344	return ef4_nic_probe_eventq(channel);
 345}
 346
 347/* Prepare channel's event queue */
 348static int ef4_init_eventq(struct ef4_channel *channel)
 349{
 350	struct ef4_nic *efx = channel->efx;
 351	int rc;
 352
 353	EF4_WARN_ON_PARANOID(channel->eventq_init);
 354
 355	netif_dbg(efx, drv, efx->net_dev,
 356		  "chan %d init event queue\n", channel->channel);
 357
 358	rc = ef4_nic_init_eventq(channel);
 359	if (rc == 0) {
 360		efx->type->push_irq_moderation(channel);
 361		channel->eventq_read_ptr = 0;
 362		channel->eventq_init = true;
 363	}
 364	return rc;
 365}
 366
 367/* Enable event queue processing and NAPI */
 368void ef4_start_eventq(struct ef4_channel *channel)
 369{
 370	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
 371		  "chan %d start event queue\n", channel->channel);
 372
 373	/* Make sure the NAPI handler sees the enabled flag set */
 374	channel->enabled = true;
 375	smp_wmb();
 376
 377	napi_enable(&channel->napi_str);
 378	ef4_nic_eventq_read_ack(channel);
 379}
 380
 381/* Disable event queue processing and NAPI */
 382void ef4_stop_eventq(struct ef4_channel *channel)
 383{
 384	if (!channel->enabled)
 385		return;
 386
 387	napi_disable(&channel->napi_str);
 388	channel->enabled = false;
 389}
 390
 391static void ef4_fini_eventq(struct ef4_channel *channel)
 392{
 393	if (!channel->eventq_init)
 394		return;
 395
 396	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 397		  "chan %d fini event queue\n", channel->channel);
 398
 399	ef4_nic_fini_eventq(channel);
 400	channel->eventq_init = false;
 401}
 402
 403static void ef4_remove_eventq(struct ef4_channel *channel)
 404{
 405	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 406		  "chan %d remove event queue\n", channel->channel);
 407
 408	ef4_nic_remove_eventq(channel);
 409}
 410
 411/**************************************************************************
 412 *
 413 * Channel handling
 414 *
 415 *************************************************************************/
 416
 417/* Allocate and initialise a channel structure. */
 418static struct ef4_channel *
 419ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
 420{
 421	struct ef4_channel *channel;
 422	struct ef4_rx_queue *rx_queue;
 423	struct ef4_tx_queue *tx_queue;
 424	int j;
 425
 426	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 427	if (!channel)
 428		return NULL;
 429
 430	channel->efx = efx;
 431	channel->channel = i;
 432	channel->type = &ef4_default_channel_type;
 433
 434	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 435		tx_queue = &channel->tx_queue[j];
 436		tx_queue->efx = efx;
 437		tx_queue->queue = i * EF4_TXQ_TYPES + j;
 438		tx_queue->channel = channel;
 439	}
 440
 441	rx_queue = &channel->rx_queue;
 442	rx_queue->efx = efx;
 443	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 444
 445	return channel;
 446}
 447
 448/* Allocate and initialise a channel structure, copying parameters
 449 * (but not resources) from an old channel structure.
 450 */
 451static struct ef4_channel *
 452ef4_copy_channel(const struct ef4_channel *old_channel)
 453{
 454	struct ef4_channel *channel;
 455	struct ef4_rx_queue *rx_queue;
 456	struct ef4_tx_queue *tx_queue;
 457	int j;
 458
 459	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
 460	if (!channel)
 461		return NULL;
 462
 463	*channel = *old_channel;
 464
 465	channel->napi_dev = NULL;
 466	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
 467	channel->napi_str.napi_id = 0;
 468	channel->napi_str.state = 0;
 469	memset(&channel->eventq, 0, sizeof(channel->eventq));
 470
 471	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 472		tx_queue = &channel->tx_queue[j];
 473		if (tx_queue->channel)
 474			tx_queue->channel = channel;
 475		tx_queue->buffer = NULL;
 476		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
 477	}
 478
 479	rx_queue = &channel->rx_queue;
 480	rx_queue->buffer = NULL;
 481	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
 482	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 483
 484	return channel;
 485}
 486
 487static int ef4_probe_channel(struct ef4_channel *channel)
 488{
 489	struct ef4_tx_queue *tx_queue;
 490	struct ef4_rx_queue *rx_queue;
 491	int rc;
 492
 493	netif_dbg(channel->efx, probe, channel->efx->net_dev,
 494		  "creating channel %d\n", channel->channel);
 495
 496	rc = channel->type->pre_probe(channel);
 497	if (rc)
 498		goto fail;
 499
 500	rc = ef4_probe_eventq(channel);
 501	if (rc)
 502		goto fail;
 503
 504	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 505		rc = ef4_probe_tx_queue(tx_queue);
 506		if (rc)
 507			goto fail;
 508	}
 509
 510	ef4_for_each_channel_rx_queue(rx_queue, channel) {
 511		rc = ef4_probe_rx_queue(rx_queue);
 512		if (rc)
 513			goto fail;
 514	}
 515
 516	return 0;
 517
 518fail:
 519	ef4_remove_channel(channel);
 520	return rc;
 521}
 522
 523static void
 524ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
 525{
 526	struct ef4_nic *efx = channel->efx;
 527	const char *type;
 528	int number;
 529
 530	number = channel->channel;
 531	if (efx->tx_channel_offset == 0) {
 532		type = "";
 533	} else if (channel->channel < efx->tx_channel_offset) {
 534		type = "-rx";
 535	} else {
 536		type = "-tx";
 537		number -= efx->tx_channel_offset;
 538	}
 539	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
 540}
 541
 542static void ef4_set_channel_names(struct ef4_nic *efx)
 543{
 544	struct ef4_channel *channel;
 545
 546	ef4_for_each_channel(channel, efx)
 547		channel->type->get_name(channel,
 548					efx->msi_context[channel->channel].name,
 549					sizeof(efx->msi_context[0].name));
 550}
 551
 552static int ef4_probe_channels(struct ef4_nic *efx)
 553{
 554	struct ef4_channel *channel;
 555	int rc;
 556
 557	/* Restart special buffer allocation */
 558	efx->next_buffer_table = 0;
 559
 560	/* Probe channels in reverse, so that any 'extra' channels
 561	 * use the start of the buffer table. This allows the traffic
 562	 * channels to be resized without moving them or wasting the
 563	 * entries before them.
 564	 */
 565	ef4_for_each_channel_rev(channel, efx) {
 566		rc = ef4_probe_channel(channel);
 567		if (rc) {
 568			netif_err(efx, probe, efx->net_dev,
 569				  "failed to create channel %d\n",
 570				  channel->channel);
 571			goto fail;
 572		}
 573	}
 574	ef4_set_channel_names(efx);
 575
 576	return 0;
 577
 578fail:
 579	ef4_remove_channels(efx);
 580	return rc;
 581}
 582
 583/* Channels are shutdown and reinitialised whilst the NIC is running
 584 * to propagate configuration changes (mtu, checksum offload), or
 585 * to clear hardware error conditions
 586 */
 587static void ef4_start_datapath(struct ef4_nic *efx)
 588{
 589	netdev_features_t old_features = efx->net_dev->features;
 590	bool old_rx_scatter = efx->rx_scatter;
 591	struct ef4_tx_queue *tx_queue;
 592	struct ef4_rx_queue *rx_queue;
 593	struct ef4_channel *channel;
 594	size_t rx_buf_len;
 595
 596	/* Calculate the rx buffer allocation parameters required to
 597	 * support the current MTU, including padding for header
 598	 * alignment and overruns.
 599	 */
 600	efx->rx_dma_len = (efx->rx_prefix_size +
 601			   EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
 602			   efx->type->rx_buffer_padding);
 603	rx_buf_len = (sizeof(struct ef4_rx_page_state) +
 604		      efx->rx_ip_align + efx->rx_dma_len);
 605	if (rx_buf_len <= PAGE_SIZE) {
 606		efx->rx_scatter = efx->type->always_rx_scatter;
 607		efx->rx_buffer_order = 0;
 608	} else if (efx->type->can_rx_scatter) {
 609		BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
 610		BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
 611			     2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
 612				       EF4_RX_BUF_ALIGNMENT) >
 613			     PAGE_SIZE);
 614		efx->rx_scatter = true;
 615		efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
 616		efx->rx_buffer_order = 0;
 617	} else {
 618		efx->rx_scatter = false;
 619		efx->rx_buffer_order = get_order(rx_buf_len);
 620	}
 621
 622	ef4_rx_config_page_split(efx);
 623	if (efx->rx_buffer_order)
 624		netif_dbg(efx, drv, efx->net_dev,
 625			  "RX buf len=%u; page order=%u batch=%u\n",
 626			  efx->rx_dma_len, efx->rx_buffer_order,
 627			  efx->rx_pages_per_batch);
 628	else
 629		netif_dbg(efx, drv, efx->net_dev,
 630			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
 631			  efx->rx_dma_len, efx->rx_page_buf_step,
 632			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
 633
 634	/* Restore previously fixed features in hw_features and remove
 635	 * features which are fixed now
 636	 */
 637	efx->net_dev->hw_features |= efx->net_dev->features;
 638	efx->net_dev->hw_features &= ~efx->fixed_features;
 639	efx->net_dev->features |= efx->fixed_features;
 640	if (efx->net_dev->features != old_features)
 641		netdev_features_change(efx->net_dev);
 642
 643	/* RX filters may also have scatter-enabled flags */
 644	if (efx->rx_scatter != old_rx_scatter)
 645		efx->type->filter_update_rx_scatter(efx);
 646
 647	/* We must keep at least one descriptor in a TX ring empty.
 648	 * We could avoid this when the queue size does not exactly
 649	 * match the hardware ring size, but it's not that important.
 650	 * Therefore we stop the queue when one more skb might fill
 651	 * the ring completely.  We wake it when half way back to
 652	 * empty.
 653	 */
 654	efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
 655	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
 656
 657	/* Initialise the channels */
 658	ef4_for_each_channel(channel, efx) {
 659		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 660			ef4_init_tx_queue(tx_queue);
 661			atomic_inc(&efx->active_queues);
 662		}
 663
 664		ef4_for_each_channel_rx_queue(rx_queue, channel) {
 665			ef4_init_rx_queue(rx_queue);
 666			atomic_inc(&efx->active_queues);
 667			ef4_stop_eventq(channel);
 668			ef4_fast_push_rx_descriptors(rx_queue, false);
 669			ef4_start_eventq(channel);
 670		}
 671
 672		WARN_ON(channel->rx_pkt_n_frags);
 673	}
 674
 675	if (netif_device_present(efx->net_dev))
 676		netif_tx_wake_all_queues(efx->net_dev);
 677}
 678
 679static void ef4_stop_datapath(struct ef4_nic *efx)
 680{
 681	struct ef4_channel *channel;
 682	struct ef4_tx_queue *tx_queue;
 683	struct ef4_rx_queue *rx_queue;
 684	int rc;
 685
 686	EF4_ASSERT_RESET_SERIALISED(efx);
 687	BUG_ON(efx->port_enabled);
 688
 689	/* Stop RX refill */
 690	ef4_for_each_channel(channel, efx) {
 691		ef4_for_each_channel_rx_queue(rx_queue, channel)
 692			rx_queue->refill_enabled = false;
 693	}
 694
 695	ef4_for_each_channel(channel, efx) {
 696		/* RX packet processing is pipelined, so wait for the
 697		 * NAPI handler to complete.  At least event queue 0
 698		 * might be kept active by non-data events, so don't
 699		 * use napi_synchronize() but actually disable NAPI
 700		 * temporarily.
 701		 */
 702		if (ef4_channel_has_rx_queue(channel)) {
 703			ef4_stop_eventq(channel);
 704			ef4_start_eventq(channel);
 705		}
 706	}
 707
 708	rc = efx->type->fini_dmaq(efx);
 709	if (rc && EF4_WORKAROUND_7803(efx)) {
 710		/* Schedule a reset to recover from the flush failure. The
 711		 * descriptor caches reference memory we're about to free,
 712		 * but falcon_reconfigure_mac_wrapper() won't reconnect
 713		 * the MACs because of the pending reset.
 714		 */
 715		netif_err(efx, drv, efx->net_dev,
 716			  "Resetting to recover from flush failure\n");
 717		ef4_schedule_reset(efx, RESET_TYPE_ALL);
 718	} else if (rc) {
 719		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 720	} else {
 721		netif_dbg(efx, drv, efx->net_dev,
 722			  "successfully flushed all queues\n");
 723	}
 724
 725	ef4_for_each_channel(channel, efx) {
 726		ef4_for_each_channel_rx_queue(rx_queue, channel)
 727			ef4_fini_rx_queue(rx_queue);
 728		ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 729			ef4_fini_tx_queue(tx_queue);
 730	}
 731}
 732
 733static void ef4_remove_channel(struct ef4_channel *channel)
 734{
 735	struct ef4_tx_queue *tx_queue;
 736	struct ef4_rx_queue *rx_queue;
 737
 738	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 739		  "destroy chan %d\n", channel->channel);
 740
 741	ef4_for_each_channel_rx_queue(rx_queue, channel)
 742		ef4_remove_rx_queue(rx_queue);
 743	ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 744		ef4_remove_tx_queue(tx_queue);
 745	ef4_remove_eventq(channel);
 746	channel->type->post_remove(channel);
 747}
 748
 749static void ef4_remove_channels(struct ef4_nic *efx)
 750{
 751	struct ef4_channel *channel;
 752
 753	ef4_for_each_channel(channel, efx)
 754		ef4_remove_channel(channel);
 755}
 756
 757int
 758ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
 759{
 760	struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
 761	u32 old_rxq_entries, old_txq_entries;
 762	unsigned i, next_buffer_table = 0;
 763	int rc, rc2;
 764
 765	rc = ef4_check_disabled(efx);
 766	if (rc)
 767		return rc;
 768
 769	/* Not all channels should be reallocated. We must avoid
 770	 * reallocating their buffer table entries.
 771	 */
 772	ef4_for_each_channel(channel, efx) {
 773		struct ef4_rx_queue *rx_queue;
 774		struct ef4_tx_queue *tx_queue;
 775
 776		if (channel->type->copy)
 777			continue;
 778		next_buffer_table = max(next_buffer_table,
 779					channel->eventq.index +
 780					channel->eventq.entries);
 781		ef4_for_each_channel_rx_queue(rx_queue, channel)
 782			next_buffer_table = max(next_buffer_table,
 783						rx_queue->rxd.index +
 784						rx_queue->rxd.entries);
 785		ef4_for_each_channel_tx_queue(tx_queue, channel)
 786			next_buffer_table = max(next_buffer_table,
 787						tx_queue->txd.index +
 788						tx_queue->txd.entries);
 789	}
 790
 791	ef4_device_detach_sync(efx);
 792	ef4_stop_all(efx);
 793	ef4_soft_disable_interrupts(efx);
 794
 795	/* Clone channels (where possible) */
 796	memset(other_channel, 0, sizeof(other_channel));
 797	for (i = 0; i < efx->n_channels; i++) {
 798		channel = efx->channel[i];
 799		if (channel->type->copy)
 800			channel = channel->type->copy(channel);
 801		if (!channel) {
 802			rc = -ENOMEM;
 803			goto out;
 804		}
 805		other_channel[i] = channel;
 806	}
 807
 808	/* Swap entry counts and channel pointers */
 809	old_rxq_entries = efx->rxq_entries;
 810	old_txq_entries = efx->txq_entries;
 811	efx->rxq_entries = rxq_entries;
 812	efx->txq_entries = txq_entries;
 813	for (i = 0; i < efx->n_channels; i++) {
 814		swap(efx->channel[i], other_channel[i]);
 815	}
 816
 817	/* Restart buffer table allocation */
 818	efx->next_buffer_table = next_buffer_table;
 819
 820	for (i = 0; i < efx->n_channels; i++) {
 821		channel = efx->channel[i];
 822		if (!channel->type->copy)
 823			continue;
 824		rc = ef4_probe_channel(channel);
 825		if (rc)
 826			goto rollback;
 827		ef4_init_napi_channel(efx->channel[i]);
 828	}
 829
 830out:
 831	/* Destroy unused channel structures */
 832	for (i = 0; i < efx->n_channels; i++) {
 833		channel = other_channel[i];
 834		if (channel && channel->type->copy) {
 835			ef4_fini_napi_channel(channel);
 836			ef4_remove_channel(channel);
 837			kfree(channel);
 838		}
 839	}
 840
 841	rc2 = ef4_soft_enable_interrupts(efx);
 842	if (rc2) {
 843		rc = rc ? rc : rc2;
 844		netif_err(efx, drv, efx->net_dev,
 845			  "unable to restart interrupts on channel reallocation\n");
 846		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
 847	} else {
 848		ef4_start_all(efx);
 849		netif_device_attach(efx->net_dev);
 850	}
 851	return rc;
 852
 853rollback:
 854	/* Swap back */
 855	efx->rxq_entries = old_rxq_entries;
 856	efx->txq_entries = old_txq_entries;
 857	for (i = 0; i < efx->n_channels; i++) {
 858		swap(efx->channel[i], other_channel[i]);
 859	}
 860	goto out;
 861}
 862
 863void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
 864{
 865	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 866}
 867
 868static const struct ef4_channel_type ef4_default_channel_type = {
 869	.pre_probe		= ef4_channel_dummy_op_int,
 870	.post_remove		= ef4_channel_dummy_op_void,
 871	.get_name		= ef4_get_channel_name,
 872	.copy			= ef4_copy_channel,
 873	.keep_eventq		= false,
 874};
 875
 876int ef4_channel_dummy_op_int(struct ef4_channel *channel)
 877{
 878	return 0;
 879}
 880
 881void ef4_channel_dummy_op_void(struct ef4_channel *channel)
 882{
 883}
 884
 885/**************************************************************************
 886 *
 887 * Port handling
 888 *
 889 **************************************************************************/
 890
 891/* This ensures that the kernel is kept informed (via
 892 * netif_carrier_on/off) of the link status, and also maintains the
 893 * link status's stop on the port's TX queue.
 894 */
 895void ef4_link_status_changed(struct ef4_nic *efx)
 896{
 897	struct ef4_link_state *link_state = &efx->link_state;
 898
 899	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 900	 * that no events are triggered between unregister_netdev() and the
 901	 * driver unloading. A more general condition is that NETDEV_CHANGE
 902	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
 903	if (!netif_running(efx->net_dev))
 904		return;
 905
 906	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
 907		efx->n_link_state_changes++;
 908
 909		if (link_state->up)
 910			netif_carrier_on(efx->net_dev);
 911		else
 912			netif_carrier_off(efx->net_dev);
 913	}
 914
 915	/* Status message for kernel log */
 916	if (link_state->up)
 917		netif_info(efx, link, efx->net_dev,
 918			   "link up at %uMbps %s-duplex (MTU %d)\n",
 919			   link_state->speed, link_state->fd ? "full" : "half",
 920			   efx->net_dev->mtu);
 921	else
 922		netif_info(efx, link, efx->net_dev, "link down\n");
 923}
 924
 925void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
 926{
 927	efx->link_advertising = advertising;
 928	if (advertising) {
 929		if (advertising & ADVERTISED_Pause)
 930			efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
 931		else
 932			efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
 933		if (advertising & ADVERTISED_Asym_Pause)
 934			efx->wanted_fc ^= EF4_FC_TX;
 935	}
 936}
 937
 938void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
 939{
 940	efx->wanted_fc = wanted_fc;
 941	if (efx->link_advertising) {
 942		if (wanted_fc & EF4_FC_RX)
 943			efx->link_advertising |= (ADVERTISED_Pause |
 944						  ADVERTISED_Asym_Pause);
 945		else
 946			efx->link_advertising &= ~(ADVERTISED_Pause |
 947						   ADVERTISED_Asym_Pause);
 948		if (wanted_fc & EF4_FC_TX)
 949			efx->link_advertising ^= ADVERTISED_Asym_Pause;
 950	}
 951}
 952
 953static void ef4_fini_port(struct ef4_nic *efx);
 954
 955/* We assume that efx->type->reconfigure_mac will always try to sync RX
 956 * filters and therefore needs to read-lock the filter table against freeing
 957 */
 958void ef4_mac_reconfigure(struct ef4_nic *efx)
 959{
 960	down_read(&efx->filter_sem);
 961	efx->type->reconfigure_mac(efx);
 962	up_read(&efx->filter_sem);
 963}
 964
 965/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 966 * the MAC appropriately. All other PHY configuration changes are pushed
 967 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
 968 * through ef4_monitor().
 969 *
 970 * Callers must hold the mac_lock
 971 */
 972int __ef4_reconfigure_port(struct ef4_nic *efx)
 973{
 974	enum ef4_phy_mode phy_mode;
 975	int rc;
 976
 977	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 978
 979	/* Disable PHY transmit in mac level loopbacks */
 980	phy_mode = efx->phy_mode;
 981	if (LOOPBACK_INTERNAL(efx))
 982		efx->phy_mode |= PHY_MODE_TX_DISABLED;
 983	else
 984		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
 985
 986	rc = efx->type->reconfigure_port(efx);
 987
 988	if (rc)
 989		efx->phy_mode = phy_mode;
 990
 991	return rc;
 992}
 993
 994/* Reinitialise the MAC to pick up new PHY settings, even if the port is
 995 * disabled. */
 996int ef4_reconfigure_port(struct ef4_nic *efx)
 997{
 998	int rc;
 999
1000	EF4_ASSERT_RESET_SERIALISED(efx);
1001
1002	mutex_lock(&efx->mac_lock);
1003	rc = __ef4_reconfigure_port(efx);
1004	mutex_unlock(&efx->mac_lock);
1005
1006	return rc;
1007}
1008
1009/* Asynchronous work item for changing MAC promiscuity and multicast
1010 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1011 * MAC directly. */
1012static void ef4_mac_work(struct work_struct *data)
1013{
1014	struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1015
1016	mutex_lock(&efx->mac_lock);
1017	if (efx->port_enabled)
1018		ef4_mac_reconfigure(efx);
1019	mutex_unlock(&efx->mac_lock);
1020}
1021
1022static int ef4_probe_port(struct ef4_nic *efx)
1023{
1024	int rc;
1025
1026	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1027
1028	if (phy_flash_cfg)
1029		efx->phy_mode = PHY_MODE_SPECIAL;
1030
1031	/* Connect up MAC/PHY operations table */
1032	rc = efx->type->probe_port(efx);
1033	if (rc)
1034		return rc;
1035
1036	/* Initialise MAC address to permanent address */
1037	eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
1038
1039	return 0;
1040}
1041
1042static int ef4_init_port(struct ef4_nic *efx)
1043{
1044	int rc;
1045
1046	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1047
1048	mutex_lock(&efx->mac_lock);
1049
1050	rc = efx->phy_op->init(efx);
1051	if (rc)
1052		goto fail1;
1053
1054	efx->port_initialized = true;
1055
1056	/* Reconfigure the MAC before creating dma queues (required for
1057	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1058	ef4_mac_reconfigure(efx);
1059
1060	/* Ensure the PHY advertises the correct flow control settings */
1061	rc = efx->phy_op->reconfigure(efx);
1062	if (rc && rc != -EPERM)
1063		goto fail2;
1064
1065	mutex_unlock(&efx->mac_lock);
1066	return 0;
1067
1068fail2:
1069	efx->phy_op->fini(efx);
1070fail1:
1071	mutex_unlock(&efx->mac_lock);
1072	return rc;
1073}
1074
1075static void ef4_start_port(struct ef4_nic *efx)
1076{
1077	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1078	BUG_ON(efx->port_enabled);
1079
1080	mutex_lock(&efx->mac_lock);
1081	efx->port_enabled = true;
1082
1083	/* Ensure MAC ingress/egress is enabled */
1084	ef4_mac_reconfigure(efx);
1085
1086	mutex_unlock(&efx->mac_lock);
1087}
1088
1089/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1090 * and the async self-test, wait for them to finish and prevent them
1091 * being scheduled again.  This doesn't cover online resets, which
1092 * should only be cancelled when removing the device.
1093 */
1094static void ef4_stop_port(struct ef4_nic *efx)
1095{
1096	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1097
1098	EF4_ASSERT_RESET_SERIALISED(efx);
1099
1100	mutex_lock(&efx->mac_lock);
1101	efx->port_enabled = false;
1102	mutex_unlock(&efx->mac_lock);
1103
1104	/* Serialise against ef4_set_multicast_list() */
1105	netif_addr_lock_bh(efx->net_dev);
1106	netif_addr_unlock_bh(efx->net_dev);
1107
1108	cancel_delayed_work_sync(&efx->monitor_work);
1109	ef4_selftest_async_cancel(efx);
1110	cancel_work_sync(&efx->mac_work);
1111}
1112
1113static void ef4_fini_port(struct ef4_nic *efx)
1114{
1115	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1116
1117	if (!efx->port_initialized)
1118		return;
1119
1120	efx->phy_op->fini(efx);
1121	efx->port_initialized = false;
1122
1123	efx->link_state.up = false;
1124	ef4_link_status_changed(efx);
1125}
1126
1127static void ef4_remove_port(struct ef4_nic *efx)
1128{
1129	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1130
1131	efx->type->remove_port(efx);
1132}
1133
1134/**************************************************************************
1135 *
1136 * NIC handling
1137 *
1138 **************************************************************************/
1139
1140static LIST_HEAD(ef4_primary_list);
1141static LIST_HEAD(ef4_unassociated_list);
1142
1143static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
1144{
1145	return left->type == right->type &&
1146		left->vpd_sn && right->vpd_sn &&
1147		!strcmp(left->vpd_sn, right->vpd_sn);
1148}
1149
1150static void ef4_associate(struct ef4_nic *efx)
1151{
1152	struct ef4_nic *other, *next;
1153
1154	if (efx->primary == efx) {
1155		/* Adding primary function; look for secondaries */
1156
1157		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1158		list_add_tail(&efx->node, &ef4_primary_list);
1159
1160		list_for_each_entry_safe(other, next, &ef4_unassociated_list,
1161					 node) {
1162			if (ef4_same_controller(efx, other)) {
1163				list_del(&other->node);
1164				netif_dbg(other, probe, other->net_dev,
1165					  "moving to secondary list of %s %s\n",
1166					  pci_name(efx->pci_dev),
1167					  efx->net_dev->name);
1168				list_add_tail(&other->node,
1169					      &efx->secondary_list);
1170				other->primary = efx;
1171			}
1172		}
1173	} else {
1174		/* Adding secondary function; look for primary */
1175
1176		list_for_each_entry(other, &ef4_primary_list, node) {
1177			if (ef4_same_controller(efx, other)) {
1178				netif_dbg(efx, probe, efx->net_dev,
1179					  "adding to secondary list of %s %s\n",
1180					  pci_name(other->pci_dev),
1181					  other->net_dev->name);
1182				list_add_tail(&efx->node,
1183					      &other->secondary_list);
1184				efx->primary = other;
1185				return;
1186			}
1187		}
1188
1189		netif_dbg(efx, probe, efx->net_dev,
1190			  "adding to unassociated list\n");
1191		list_add_tail(&efx->node, &ef4_unassociated_list);
1192	}
1193}
1194
1195static void ef4_dissociate(struct ef4_nic *efx)
1196{
1197	struct ef4_nic *other, *next;
1198
1199	list_del(&efx->node);
1200	efx->primary = NULL;
1201
1202	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1203		list_del(&other->node);
1204		netif_dbg(other, probe, other->net_dev,
1205			  "moving to unassociated list\n");
1206		list_add_tail(&other->node, &ef4_unassociated_list);
1207		other->primary = NULL;
1208	}
1209}
1210
1211/* This configures the PCI device to enable I/O and DMA. */
1212static int ef4_init_io(struct ef4_nic *efx)
1213{
1214	struct pci_dev *pci_dev = efx->pci_dev;
1215	dma_addr_t dma_mask = efx->type->max_dma_mask;
1216	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1217	int rc, bar;
1218
1219	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1220
1221	bar = efx->type->mem_bar;
1222
1223	rc = pci_enable_device(pci_dev);
1224	if (rc) {
1225		netif_err(efx, probe, efx->net_dev,
1226			  "failed to enable PCI device\n");
1227		goto fail1;
1228	}
1229
1230	pci_set_master(pci_dev);
1231
1232	/* Set the PCI DMA mask.  Try all possibilities from our genuine mask
1233	 * down to 32 bits, because some architectures will allow 40 bit
1234	 * masks event though they reject 46 bit masks.
1235	 */
1236	while (dma_mask > 0x7fffffffUL) {
1237		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1238		if (rc == 0)
1239			break;
1240		dma_mask >>= 1;
1241	}
1242	if (rc) {
1243		netif_err(efx, probe, efx->net_dev,
1244			  "could not find a suitable DMA mask\n");
1245		goto fail2;
1246	}
1247	netif_dbg(efx, probe, efx->net_dev,
1248		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1249
1250	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1251	rc = pci_request_region(pci_dev, bar, "sfc");
1252	if (rc) {
1253		netif_err(efx, probe, efx->net_dev,
1254			  "request for memory BAR failed\n");
1255		rc = -EIO;
1256		goto fail3;
1257	}
1258	efx->membase = ioremap(efx->membase_phys, mem_map_size);
1259	if (!efx->membase) {
1260		netif_err(efx, probe, efx->net_dev,
1261			  "could not map memory BAR at %llx+%x\n",
1262			  (unsigned long long)efx->membase_phys, mem_map_size);
1263		rc = -ENOMEM;
1264		goto fail4;
1265	}
1266	netif_dbg(efx, probe, efx->net_dev,
1267		  "memory BAR at %llx+%x (virtual %p)\n",
1268		  (unsigned long long)efx->membase_phys, mem_map_size,
1269		  efx->membase);
1270
1271	return 0;
1272
1273 fail4:
1274	pci_release_region(efx->pci_dev, bar);
1275 fail3:
1276	efx->membase_phys = 0;
1277 fail2:
1278	pci_disable_device(efx->pci_dev);
1279 fail1:
1280	return rc;
1281}
1282
1283static void ef4_fini_io(struct ef4_nic *efx)
1284{
1285	int bar;
1286
1287	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1288
1289	if (efx->membase) {
1290		iounmap(efx->membase);
1291		efx->membase = NULL;
1292	}
1293
1294	if (efx->membase_phys) {
1295		bar = efx->type->mem_bar;
1296		pci_release_region(efx->pci_dev, bar);
1297		efx->membase_phys = 0;
1298	}
1299
1300	/* Don't disable bus-mastering if VFs are assigned */
1301	if (!pci_vfs_assigned(efx->pci_dev))
1302		pci_disable_device(efx->pci_dev);
1303}
1304
1305void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1306{
1307	size_t i;
1308
1309	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1310		efx->rx_indir_table[i] =
1311			ethtool_rxfh_indir_default(i, efx->rss_spread);
1312}
1313
1314static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1315{
1316	cpumask_var_t thread_mask;
1317	unsigned int count;
1318	int cpu;
1319
1320	if (rss_cpus) {
1321		count = rss_cpus;
1322	} else {
1323		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1324			netif_warn(efx, probe, efx->net_dev,
1325				   "RSS disabled due to allocation failure\n");
1326			return 1;
1327		}
1328
1329		count = 0;
1330		for_each_online_cpu(cpu) {
1331			if (!cpumask_test_cpu(cpu, thread_mask)) {
1332				++count;
1333				cpumask_or(thread_mask, thread_mask,
1334					   topology_sibling_cpumask(cpu));
1335			}
1336		}
1337
1338		free_cpumask_var(thread_mask);
1339	}
1340
1341	if (count > EF4_MAX_RX_QUEUES) {
1342		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1343			       "Reducing number of rx queues from %u to %u.\n",
1344			       count, EF4_MAX_RX_QUEUES);
1345		count = EF4_MAX_RX_QUEUES;
1346	}
1347
1348	return count;
1349}
1350
1351/* Probe the number and type of interrupts we are able to obtain, and
1352 * the resulting numbers of channels and RX queues.
1353 */
1354static int ef4_probe_interrupts(struct ef4_nic *efx)
1355{
1356	unsigned int extra_channels = 0;
1357	unsigned int i, j;
1358	int rc;
1359
1360	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
1361		if (efx->extra_channel_type[i])
1362			++extra_channels;
1363
1364	if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1365		struct msix_entry xentries[EF4_MAX_CHANNELS];
1366		unsigned int n_channels;
1367
1368		n_channels = ef4_wanted_parallelism(efx);
1369		if (ef4_separate_tx_channels)
1370			n_channels *= 2;
1371		n_channels += extra_channels;
1372		n_channels = min(n_channels, efx->max_channels);
1373
1374		for (i = 0; i < n_channels; i++)
1375			xentries[i].entry = i;
1376		rc = pci_enable_msix_range(efx->pci_dev,
1377					   xentries, 1, n_channels);
1378		if (rc < 0) {
1379			/* Fall back to single channel MSI */
1380			efx->interrupt_mode = EF4_INT_MODE_MSI;
1381			netif_err(efx, drv, efx->net_dev,
1382				  "could not enable MSI-X\n");
1383		} else if (rc < n_channels) {
1384			netif_err(efx, drv, efx->net_dev,
1385				  "WARNING: Insufficient MSI-X vectors"
1386				  " available (%d < %u).\n", rc, n_channels);
1387			netif_err(efx, drv, efx->net_dev,
1388				  "WARNING: Performance may be reduced.\n");
1389			n_channels = rc;
1390		}
1391
1392		if (rc > 0) {
1393			efx->n_channels = n_channels;
1394			if (n_channels > extra_channels)
1395				n_channels -= extra_channels;
1396			if (ef4_separate_tx_channels) {
1397				efx->n_tx_channels = min(max(n_channels / 2,
1398							     1U),
1399							 efx->max_tx_channels);
1400				efx->n_rx_channels = max(n_channels -
1401							 efx->n_tx_channels,
1402							 1U);
1403			} else {
1404				efx->n_tx_channels = min(n_channels,
1405							 efx->max_tx_channels);
1406				efx->n_rx_channels = n_channels;
1407			}
1408			for (i = 0; i < efx->n_channels; i++)
1409				ef4_get_channel(efx, i)->irq =
1410					xentries[i].vector;
1411		}
1412	}
1413
1414	/* Try single interrupt MSI */
1415	if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1416		efx->n_channels = 1;
1417		efx->n_rx_channels = 1;
1418		efx->n_tx_channels = 1;
1419		rc = pci_enable_msi(efx->pci_dev);
1420		if (rc == 0) {
1421			ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1422		} else {
1423			netif_err(efx, drv, efx->net_dev,
1424				  "could not enable MSI\n");
1425			efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1426		}
1427	}
1428
1429	/* Assume legacy interrupts */
1430	if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1431		efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1432		efx->n_rx_channels = 1;
1433		efx->n_tx_channels = 1;
1434		efx->legacy_irq = efx->pci_dev->irq;
1435	}
1436
1437	/* Assign extra channels if possible */
1438	j = efx->n_channels;
1439	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
1440		if (!efx->extra_channel_type[i])
1441			continue;
1442		if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1443		    efx->n_channels <= extra_channels) {
1444			efx->extra_channel_type[i]->handle_no_channel(efx);
1445		} else {
1446			--j;
1447			ef4_get_channel(efx, j)->type =
1448				efx->extra_channel_type[i];
1449		}
1450	}
1451
1452	efx->rss_spread = efx->n_rx_channels;
1453
1454	return 0;
1455}
1456
1457static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1458{
1459	struct ef4_channel *channel, *end_channel;
1460	int rc;
1461
1462	BUG_ON(efx->state == STATE_DISABLED);
1463
1464	efx->irq_soft_enabled = true;
1465	smp_wmb();
1466
1467	ef4_for_each_channel(channel, efx) {
1468		if (!channel->type->keep_eventq) {
1469			rc = ef4_init_eventq(channel);
1470			if (rc)
1471				goto fail;
1472		}
1473		ef4_start_eventq(channel);
1474	}
1475
1476	return 0;
1477fail:
1478	end_channel = channel;
1479	ef4_for_each_channel(channel, efx) {
1480		if (channel == end_channel)
1481			break;
1482		ef4_stop_eventq(channel);
1483		if (!channel->type->keep_eventq)
1484			ef4_fini_eventq(channel);
1485	}
1486
1487	return rc;
1488}
1489
1490static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1491{
1492	struct ef4_channel *channel;
1493
1494	if (efx->state == STATE_DISABLED)
1495		return;
1496
1497	efx->irq_soft_enabled = false;
1498	smp_wmb();
1499
1500	if (efx->legacy_irq)
1501		synchronize_irq(efx->legacy_irq);
1502
1503	ef4_for_each_channel(channel, efx) {
1504		if (channel->irq)
1505			synchronize_irq(channel->irq);
1506
1507		ef4_stop_eventq(channel);
1508		if (!channel->type->keep_eventq)
1509			ef4_fini_eventq(channel);
1510	}
1511}
1512
1513static int ef4_enable_interrupts(struct ef4_nic *efx)
1514{
1515	struct ef4_channel *channel, *end_channel;
1516	int rc;
1517
1518	BUG_ON(efx->state == STATE_DISABLED);
1519
1520	if (efx->eeh_disabled_legacy_irq) {
1521		enable_irq(efx->legacy_irq);
1522		efx->eeh_disabled_legacy_irq = false;
1523	}
1524
1525	efx->type->irq_enable_master(efx);
1526
1527	ef4_for_each_channel(channel, efx) {
1528		if (channel->type->keep_eventq) {
1529			rc = ef4_init_eventq(channel);
1530			if (rc)
1531				goto fail;
1532		}
1533	}
1534
1535	rc = ef4_soft_enable_interrupts(efx);
1536	if (rc)
1537		goto fail;
1538
1539	return 0;
1540
1541fail:
1542	end_channel = channel;
1543	ef4_for_each_channel(channel, efx) {
1544		if (channel == end_channel)
1545			break;
1546		if (channel->type->keep_eventq)
1547			ef4_fini_eventq(channel);
1548	}
1549
1550	efx->type->irq_disable_non_ev(efx);
1551
1552	return rc;
1553}
1554
1555static void ef4_disable_interrupts(struct ef4_nic *efx)
1556{
1557	struct ef4_channel *channel;
1558
1559	ef4_soft_disable_interrupts(efx);
1560
1561	ef4_for_each_channel(channel, efx) {
1562		if (channel->type->keep_eventq)
1563			ef4_fini_eventq(channel);
1564	}
1565
1566	efx->type->irq_disable_non_ev(efx);
1567}
1568
1569static void ef4_remove_interrupts(struct ef4_nic *efx)
1570{
1571	struct ef4_channel *channel;
1572
1573	/* Remove MSI/MSI-X interrupts */
1574	ef4_for_each_channel(channel, efx)
1575		channel->irq = 0;
1576	pci_disable_msi(efx->pci_dev);
1577	pci_disable_msix(efx->pci_dev);
1578
1579	/* Remove legacy interrupt */
1580	efx->legacy_irq = 0;
1581}
1582
1583static void ef4_set_channels(struct ef4_nic *efx)
1584{
1585	struct ef4_channel *channel;
1586	struct ef4_tx_queue *tx_queue;
1587
1588	efx->tx_channel_offset =
1589		ef4_separate_tx_channels ?
1590		efx->n_channels - efx->n_tx_channels : 0;
1591
1592	/* We need to mark which channels really have RX and TX
1593	 * queues, and adjust the TX queue numbers if we have separate
1594	 * RX-only and TX-only channels.
1595	 */
1596	ef4_for_each_channel(channel, efx) {
1597		if (channel->channel < efx->n_rx_channels)
1598			channel->rx_queue.core_index = channel->channel;
1599		else
1600			channel->rx_queue.core_index = -1;
1601
1602		ef4_for_each_channel_tx_queue(tx_queue, channel)
1603			tx_queue->queue -= (efx->tx_channel_offset *
1604					    EF4_TXQ_TYPES);
1605	}
1606}
1607
1608static int ef4_probe_nic(struct ef4_nic *efx)
1609{
1610	int rc;
1611
1612	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1613
1614	/* Carry out hardware-type specific initialisation */
1615	rc = efx->type->probe(efx);
1616	if (rc)
1617		return rc;
1618
1619	do {
1620		if (!efx->max_channels || !efx->max_tx_channels) {
1621			netif_err(efx, drv, efx->net_dev,
1622				  "Insufficient resources to allocate"
1623				  " any channels\n");
1624			rc = -ENOSPC;
1625			goto fail1;
1626		}
1627
1628		/* Determine the number of channels and queues by trying
1629		 * to hook in MSI-X interrupts.
1630		 */
1631		rc = ef4_probe_interrupts(efx);
1632		if (rc)
1633			goto fail1;
1634
1635		ef4_set_channels(efx);
1636
1637		/* dimension_resources can fail with EAGAIN */
1638		rc = efx->type->dimension_resources(efx);
1639		if (rc != 0 && rc != -EAGAIN)
1640			goto fail2;
1641
1642		if (rc == -EAGAIN)
1643			/* try again with new max_channels */
1644			ef4_remove_interrupts(efx);
1645
1646	} while (rc == -EAGAIN);
1647
1648	if (efx->n_channels > 1)
1649		netdev_rss_key_fill(&efx->rx_hash_key,
1650				    sizeof(efx->rx_hash_key));
1651	ef4_set_default_rx_indir_table(efx);
1652
1653	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1654	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1655
1656	/* Initialise the interrupt moderation settings */
1657	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1658	ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1659				true);
1660
1661	return 0;
1662
1663fail2:
1664	ef4_remove_interrupts(efx);
1665fail1:
1666	efx->type->remove(efx);
1667	return rc;
1668}
1669
1670static void ef4_remove_nic(struct ef4_nic *efx)
1671{
1672	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1673
1674	ef4_remove_interrupts(efx);
1675	efx->type->remove(efx);
1676}
1677
1678static int ef4_probe_filters(struct ef4_nic *efx)
1679{
1680	int rc;
1681
1682	spin_lock_init(&efx->filter_lock);
1683	init_rwsem(&efx->filter_sem);
1684	mutex_lock(&efx->mac_lock);
1685	down_write(&efx->filter_sem);
1686	rc = efx->type->filter_table_probe(efx);
1687	if (rc)
1688		goto out_unlock;
1689
1690#ifdef CONFIG_RFS_ACCEL
1691	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1692		struct ef4_channel *channel;
1693		int i, success = 1;
1694
1695		ef4_for_each_channel(channel, efx) {
1696			channel->rps_flow_id =
1697				kcalloc(efx->type->max_rx_ip_filters,
1698					sizeof(*channel->rps_flow_id),
1699					GFP_KERNEL);
1700			if (!channel->rps_flow_id)
1701				success = 0;
1702			else
1703				for (i = 0;
1704				     i < efx->type->max_rx_ip_filters;
1705				     ++i)
1706					channel->rps_flow_id[i] =
1707						RPS_FLOW_ID_INVALID;
1708		}
1709
1710		if (!success) {
1711			ef4_for_each_channel(channel, efx)
1712				kfree(channel->rps_flow_id);
1713			efx->type->filter_table_remove(efx);
1714			rc = -ENOMEM;
1715			goto out_unlock;
1716		}
1717
1718		efx->rps_expire_index = efx->rps_expire_channel = 0;
1719	}
1720#endif
1721out_unlock:
1722	up_write(&efx->filter_sem);
1723	mutex_unlock(&efx->mac_lock);
1724	return rc;
1725}
1726
1727static void ef4_remove_filters(struct ef4_nic *efx)
1728{
1729#ifdef CONFIG_RFS_ACCEL
1730	struct ef4_channel *channel;
1731
1732	ef4_for_each_channel(channel, efx)
1733		kfree(channel->rps_flow_id);
1734#endif
1735	down_write(&efx->filter_sem);
1736	efx->type->filter_table_remove(efx);
1737	up_write(&efx->filter_sem);
1738}
1739
1740static void ef4_restore_filters(struct ef4_nic *efx)
1741{
1742	down_read(&efx->filter_sem);
1743	efx->type->filter_table_restore(efx);
1744	up_read(&efx->filter_sem);
1745}
1746
1747/**************************************************************************
1748 *
1749 * NIC startup/shutdown
1750 *
1751 *************************************************************************/
1752
1753static int ef4_probe_all(struct ef4_nic *efx)
1754{
1755	int rc;
1756
1757	rc = ef4_probe_nic(efx);
1758	if (rc) {
1759		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1760		goto fail1;
1761	}
1762
1763	rc = ef4_probe_port(efx);
1764	if (rc) {
1765		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1766		goto fail2;
1767	}
1768
1769	BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
1770	if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1771		rc = -EINVAL;
1772		goto fail3;
1773	}
1774	efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1775
1776	rc = ef4_probe_filters(efx);
1777	if (rc) {
1778		netif_err(efx, probe, efx->net_dev,
1779			  "failed to create filter tables\n");
1780		goto fail4;
1781	}
1782
1783	rc = ef4_probe_channels(efx);
1784	if (rc)
1785		goto fail5;
1786
1787	return 0;
1788
1789 fail5:
1790	ef4_remove_filters(efx);
1791 fail4:
1792 fail3:
1793	ef4_remove_port(efx);
1794 fail2:
1795	ef4_remove_nic(efx);
1796 fail1:
1797	return rc;
1798}
1799
1800/* If the interface is supposed to be running but is not, start
1801 * the hardware and software data path, regular activity for the port
1802 * (MAC statistics, link polling, etc.) and schedule the port to be
1803 * reconfigured.  Interrupts must already be enabled.  This function
1804 * is safe to call multiple times, so long as the NIC is not disabled.
1805 * Requires the RTNL lock.
1806 */
1807static void ef4_start_all(struct ef4_nic *efx)
1808{
1809	EF4_ASSERT_RESET_SERIALISED(efx);
1810	BUG_ON(efx->state == STATE_DISABLED);
1811
1812	/* Check that it is appropriate to restart the interface. All
1813	 * of these flags are safe to read under just the rtnl lock */
1814	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1815	    efx->reset_pending)
1816		return;
1817
1818	ef4_start_port(efx);
1819	ef4_start_datapath(efx);
1820
1821	/* Start the hardware monitor if there is one */
1822	if (efx->type->monitor != NULL)
1823		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1824				   ef4_monitor_interval);
1825
1826	efx->type->start_stats(efx);
1827	efx->type->pull_stats(efx);
1828	spin_lock_bh(&efx->stats_lock);
1829	efx->type->update_stats(efx, NULL, NULL);
1830	spin_unlock_bh(&efx->stats_lock);
1831}
1832
1833/* Quiesce the hardware and software data path, and regular activity
1834 * for the port without bringing the link down.  Safe to call multiple
1835 * times with the NIC in almost any state, but interrupts should be
1836 * enabled.  Requires the RTNL lock.
1837 */
1838static void ef4_stop_all(struct ef4_nic *efx)
1839{
1840	EF4_ASSERT_RESET_SERIALISED(efx);
1841
1842	/* port_enabled can be read safely under the rtnl lock */
1843	if (!efx->port_enabled)
1844		return;
1845
1846	/* update stats before we go down so we can accurately count
1847	 * rx_nodesc_drops
1848	 */
1849	efx->type->pull_stats(efx);
1850	spin_lock_bh(&efx->stats_lock);
1851	efx->type->update_stats(efx, NULL, NULL);
1852	spin_unlock_bh(&efx->stats_lock);
1853	efx->type->stop_stats(efx);
1854	ef4_stop_port(efx);
1855
1856	/* Stop the kernel transmit interface.  This is only valid if
1857	 * the device is stopped or detached; otherwise the watchdog
1858	 * may fire immediately.
1859	 */
1860	WARN_ON(netif_running(efx->net_dev) &&
1861		netif_device_present(efx->net_dev));
1862	netif_tx_disable(efx->net_dev);
1863
1864	ef4_stop_datapath(efx);
1865}
1866
1867static void ef4_remove_all(struct ef4_nic *efx)
1868{
1869	ef4_remove_channels(efx);
1870	ef4_remove_filters(efx);
1871	ef4_remove_port(efx);
1872	ef4_remove_nic(efx);
1873}
1874
1875/**************************************************************************
1876 *
1877 * Interrupt moderation
1878 *
1879 **************************************************************************/
1880unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1881{
1882	if (usecs == 0)
1883		return 0;
1884	if (usecs * 1000 < efx->timer_quantum_ns)
1885		return 1; /* never round down to 0 */
1886	return usecs * 1000 / efx->timer_quantum_ns;
1887}
1888
 
 
 
 
 
 
 
 
1889/* Set interrupt moderation parameters */
1890int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1891			    unsigned int rx_usecs, bool rx_adaptive,
1892			    bool rx_may_override_tx)
1893{
1894	struct ef4_channel *channel;
1895	unsigned int timer_max_us;
1896
1897	EF4_ASSERT_RESET_SERIALISED(efx);
1898
1899	timer_max_us = efx->timer_max_ns / 1000;
1900
1901	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
1902		return -EINVAL;
1903
1904	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1905	    !rx_may_override_tx) {
1906		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1907			  "RX and TX IRQ moderation must be equal\n");
1908		return -EINVAL;
1909	}
1910
1911	efx->irq_rx_adaptive = rx_adaptive;
1912	efx->irq_rx_moderation_us = rx_usecs;
1913	ef4_for_each_channel(channel, efx) {
1914		if (ef4_channel_has_rx_queue(channel))
1915			channel->irq_moderation_us = rx_usecs;
1916		else if (ef4_channel_has_tx_queues(channel))
1917			channel->irq_moderation_us = tx_usecs;
1918	}
1919
1920	return 0;
1921}
1922
1923void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1924			    unsigned int *rx_usecs, bool *rx_adaptive)
1925{
1926	*rx_adaptive = efx->irq_rx_adaptive;
1927	*rx_usecs = efx->irq_rx_moderation_us;
1928
1929	/* If channels are shared between RX and TX, so is IRQ
1930	 * moderation.  Otherwise, IRQ moderation is the same for all
1931	 * TX channels and is not adaptive.
1932	 */
1933	if (efx->tx_channel_offset == 0) {
1934		*tx_usecs = *rx_usecs;
1935	} else {
1936		struct ef4_channel *tx_channel;
1937
1938		tx_channel = efx->channel[efx->tx_channel_offset];
1939		*tx_usecs = tx_channel->irq_moderation_us;
1940	}
1941}
1942
1943/**************************************************************************
1944 *
1945 * Hardware monitor
1946 *
1947 **************************************************************************/
1948
1949/* Run periodically off the general workqueue */
1950static void ef4_monitor(struct work_struct *data)
1951{
1952	struct ef4_nic *efx = container_of(data, struct ef4_nic,
1953					   monitor_work.work);
1954
1955	netif_vdbg(efx, timer, efx->net_dev,
1956		   "hardware monitor executing on CPU %d\n",
1957		   raw_smp_processor_id());
1958	BUG_ON(efx->type->monitor == NULL);
1959
1960	/* If the mac_lock is already held then it is likely a port
1961	 * reconfiguration is already in place, which will likely do
1962	 * most of the work of monitor() anyway. */
1963	if (mutex_trylock(&efx->mac_lock)) {
1964		if (efx->port_enabled)
1965			efx->type->monitor(efx);
1966		mutex_unlock(&efx->mac_lock);
1967	}
1968
1969	queue_delayed_work(efx->workqueue, &efx->monitor_work,
1970			   ef4_monitor_interval);
1971}
1972
1973/**************************************************************************
1974 *
1975 * ioctls
1976 *
1977 *************************************************************************/
1978
1979/* Net device ioctl
1980 * Context: process, rtnl_lock() held.
1981 */
1982static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1983{
1984	struct ef4_nic *efx = netdev_priv(net_dev);
1985	struct mii_ioctl_data *data = if_mii(ifr);
1986
1987	/* Convert phy_id from older PRTAD/DEVAD format */
1988	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1989	    (data->phy_id & 0xfc00) == 0x0400)
1990		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
1991
1992	return mdio_mii_ioctl(&efx->mdio, data, cmd);
1993}
1994
1995/**************************************************************************
1996 *
1997 * NAPI interface
1998 *
1999 **************************************************************************/
2000
2001static void ef4_init_napi_channel(struct ef4_channel *channel)
2002{
2003	struct ef4_nic *efx = channel->efx;
2004
2005	channel->napi_dev = efx->net_dev;
2006	netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
2007}
2008
2009static void ef4_init_napi(struct ef4_nic *efx)
2010{
2011	struct ef4_channel *channel;
2012
2013	ef4_for_each_channel(channel, efx)
2014		ef4_init_napi_channel(channel);
2015}
2016
2017static void ef4_fini_napi_channel(struct ef4_channel *channel)
2018{
2019	if (channel->napi_dev)
2020		netif_napi_del(&channel->napi_str);
2021
2022	channel->napi_dev = NULL;
2023}
2024
2025static void ef4_fini_napi(struct ef4_nic *efx)
2026{
2027	struct ef4_channel *channel;
2028
2029	ef4_for_each_channel(channel, efx)
2030		ef4_fini_napi_channel(channel);
2031}
2032
2033/**************************************************************************
2034 *
2035 * Kernel net device interface
2036 *
2037 *************************************************************************/
2038
2039/* Context: process, rtnl_lock() held. */
2040int ef4_net_open(struct net_device *net_dev)
2041{
2042	struct ef4_nic *efx = netdev_priv(net_dev);
2043	int rc;
2044
2045	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2046		  raw_smp_processor_id());
2047
2048	rc = ef4_check_disabled(efx);
2049	if (rc)
2050		return rc;
2051	if (efx->phy_mode & PHY_MODE_SPECIAL)
2052		return -EBUSY;
2053
2054	/* Notify the kernel of the link state polled during driver load,
2055	 * before the monitor starts running */
2056	ef4_link_status_changed(efx);
2057
2058	ef4_start_all(efx);
2059	ef4_selftest_async_start(efx);
2060	return 0;
2061}
2062
2063/* Context: process, rtnl_lock() held.
2064 * Note that the kernel will ignore our return code; this method
2065 * should really be a void.
2066 */
2067int ef4_net_stop(struct net_device *net_dev)
2068{
2069	struct ef4_nic *efx = netdev_priv(net_dev);
2070
2071	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2072		  raw_smp_processor_id());
2073
2074	/* Stop the device and flush all the channels */
2075	ef4_stop_all(efx);
2076
2077	return 0;
2078}
2079
2080/* Context: process, rcu_read_lock or RTNL held, non-blocking. */
2081static void ef4_net_stats(struct net_device *net_dev,
2082			  struct rtnl_link_stats64 *stats)
2083{
2084	struct ef4_nic *efx = netdev_priv(net_dev);
2085
2086	spin_lock_bh(&efx->stats_lock);
2087	efx->type->update_stats(efx, NULL, stats);
2088	spin_unlock_bh(&efx->stats_lock);
2089}
2090
2091/* Context: netif_tx_lock held, BHs disabled. */
2092static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
2093{
2094	struct ef4_nic *efx = netdev_priv(net_dev);
2095
2096	netif_err(efx, tx_err, efx->net_dev,
2097		  "TX stuck with port_enabled=%d: resetting channels\n",
2098		  efx->port_enabled);
2099
2100	ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2101}
2102
2103
2104/* Context: process, rtnl_lock() held. */
2105static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
2106{
2107	struct ef4_nic *efx = netdev_priv(net_dev);
2108	int rc;
2109
2110	rc = ef4_check_disabled(efx);
2111	if (rc)
2112		return rc;
2113
2114	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2115
2116	ef4_device_detach_sync(efx);
2117	ef4_stop_all(efx);
2118
2119	mutex_lock(&efx->mac_lock);
2120	WRITE_ONCE(net_dev->mtu, new_mtu);
2121	ef4_mac_reconfigure(efx);
2122	mutex_unlock(&efx->mac_lock);
2123
2124	ef4_start_all(efx);
2125	netif_device_attach(efx->net_dev);
2126	return 0;
2127}
2128
2129static int ef4_set_mac_address(struct net_device *net_dev, void *data)
2130{
2131	struct ef4_nic *efx = netdev_priv(net_dev);
2132	struct sockaddr *addr = data;
2133	u8 *new_addr = addr->sa_data;
2134	u8 old_addr[6];
2135	int rc;
2136
2137	if (!is_valid_ether_addr(new_addr)) {
2138		netif_err(efx, drv, efx->net_dev,
2139			  "invalid ethernet MAC address requested: %pM\n",
2140			  new_addr);
2141		return -EADDRNOTAVAIL;
2142	}
2143
2144	/* save old address */
2145	ether_addr_copy(old_addr, net_dev->dev_addr);
2146	eth_hw_addr_set(net_dev, new_addr);
2147	if (efx->type->set_mac_address) {
2148		rc = efx->type->set_mac_address(efx);
2149		if (rc) {
2150			eth_hw_addr_set(net_dev, old_addr);
2151			return rc;
2152		}
2153	}
2154
2155	/* Reconfigure the MAC */
2156	mutex_lock(&efx->mac_lock);
2157	ef4_mac_reconfigure(efx);
2158	mutex_unlock(&efx->mac_lock);
2159
2160	return 0;
2161}
2162
2163/* Context: netif_addr_lock held, BHs disabled. */
2164static void ef4_set_rx_mode(struct net_device *net_dev)
2165{
2166	struct ef4_nic *efx = netdev_priv(net_dev);
2167
2168	if (efx->port_enabled)
2169		queue_work(efx->workqueue, &efx->mac_work);
2170	/* Otherwise ef4_start_port() will do this */
2171}
2172
2173static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
2174{
2175	struct ef4_nic *efx = netdev_priv(net_dev);
2176	int rc;
2177
2178	/* If disabling RX n-tuple filtering, clear existing filters */
2179	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2180		rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2181		if (rc)
2182			return rc;
2183	}
2184
2185	/* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2186	if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2187		/* ef4_set_rx_mode() will schedule MAC work to update filters
2188		 * when a new features are finally set in net_dev.
2189		 */
2190		ef4_set_rx_mode(net_dev);
2191	}
2192
2193	return 0;
2194}
2195
2196static const struct net_device_ops ef4_netdev_ops = {
2197	.ndo_open		= ef4_net_open,
2198	.ndo_stop		= ef4_net_stop,
2199	.ndo_get_stats64	= ef4_net_stats,
2200	.ndo_tx_timeout		= ef4_watchdog,
2201	.ndo_start_xmit		= ef4_hard_start_xmit,
2202	.ndo_validate_addr	= eth_validate_addr,
2203	.ndo_eth_ioctl		= ef4_ioctl,
2204	.ndo_change_mtu		= ef4_change_mtu,
2205	.ndo_set_mac_address	= ef4_set_mac_address,
2206	.ndo_set_rx_mode	= ef4_set_rx_mode,
2207	.ndo_set_features	= ef4_set_features,
2208	.ndo_setup_tc		= ef4_setup_tc,
2209#ifdef CONFIG_RFS_ACCEL
2210	.ndo_rx_flow_steer	= ef4_filter_rfs,
2211#endif
2212};
2213
2214static void ef4_update_name(struct ef4_nic *efx)
2215{
2216	strcpy(efx->name, efx->net_dev->name);
2217	ef4_mtd_rename(efx);
2218	ef4_set_channel_names(efx);
2219}
2220
2221static int ef4_netdev_event(struct notifier_block *this,
2222			    unsigned long event, void *ptr)
2223{
2224	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2225
2226	if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
2227	    event == NETDEV_CHANGENAME)
2228		ef4_update_name(netdev_priv(net_dev));
2229
2230	return NOTIFY_DONE;
2231}
2232
2233static struct notifier_block ef4_netdev_notifier = {
2234	.notifier_call = ef4_netdev_event,
2235};
2236
2237static ssize_t
2238phy_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2239{
2240	struct ef4_nic *efx = dev_get_drvdata(dev);
2241	return sprintf(buf, "%d\n", efx->phy_type);
2242}
2243static DEVICE_ATTR_RO(phy_type);
2244
2245static int ef4_register_netdev(struct ef4_nic *efx)
2246{
2247	struct net_device *net_dev = efx->net_dev;
2248	struct ef4_channel *channel;
2249	int rc;
2250
2251	net_dev->watchdog_timeo = 5 * HZ;
2252	net_dev->irq = efx->pci_dev->irq;
2253	net_dev->netdev_ops = &ef4_netdev_ops;
2254	net_dev->ethtool_ops = &ef4_ethtool_ops;
2255	netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
2256	net_dev->min_mtu = EF4_MIN_MTU;
2257	net_dev->max_mtu = EF4_MAX_MTU;
2258
2259	rtnl_lock();
2260
2261	/* Enable resets to be scheduled and check whether any were
2262	 * already requested.  If so, the NIC is probably hosed so we
2263	 * abort.
2264	 */
2265	efx->state = STATE_READY;
2266	smp_mb(); /* ensure we change state before checking reset_pending */
2267	if (efx->reset_pending) {
2268		netif_err(efx, probe, efx->net_dev,
2269			  "aborting probe due to scheduled reset\n");
2270		rc = -EIO;
2271		goto fail_locked;
2272	}
2273
2274	rc = dev_alloc_name(net_dev, net_dev->name);
2275	if (rc < 0)
2276		goto fail_locked;
2277	ef4_update_name(efx);
2278
2279	/* Always start with carrier off; PHY events will detect the link */
2280	netif_carrier_off(net_dev);
2281
2282	rc = register_netdevice(net_dev);
2283	if (rc)
2284		goto fail_locked;
2285
2286	ef4_for_each_channel(channel, efx) {
2287		struct ef4_tx_queue *tx_queue;
2288		ef4_for_each_channel_tx_queue(tx_queue, channel)
2289			ef4_init_tx_queue_core_txq(tx_queue);
2290	}
2291
2292	ef4_associate(efx);
2293
2294	rtnl_unlock();
2295
2296	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2297	if (rc) {
2298		netif_err(efx, drv, efx->net_dev,
2299			  "failed to init net dev attributes\n");
2300		goto fail_registered;
2301	}
2302	return 0;
2303
2304fail_registered:
2305	rtnl_lock();
2306	ef4_dissociate(efx);
2307	unregister_netdevice(net_dev);
2308fail_locked:
2309	efx->state = STATE_UNINIT;
2310	rtnl_unlock();
2311	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2312	return rc;
2313}
2314
2315static void ef4_unregister_netdev(struct ef4_nic *efx)
2316{
2317	if (!efx->net_dev)
2318		return;
2319
2320	BUG_ON(netdev_priv(efx->net_dev) != efx);
2321
2322	if (ef4_dev_registered(efx)) {
2323		strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2324		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2325		unregister_netdev(efx->net_dev);
2326	}
2327}
2328
2329/**************************************************************************
2330 *
2331 * Device reset and suspend
2332 *
2333 **************************************************************************/
2334
2335/* Tears down the entire software state and most of the hardware state
2336 * before reset.  */
2337void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2338{
2339	EF4_ASSERT_RESET_SERIALISED(efx);
2340
2341	ef4_stop_all(efx);
2342	ef4_disable_interrupts(efx);
2343
2344	mutex_lock(&efx->mac_lock);
2345	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2346	    method != RESET_TYPE_DATAPATH)
2347		efx->phy_op->fini(efx);
2348	efx->type->fini(efx);
2349}
2350
2351/* This function will always ensure that the locks acquired in
2352 * ef4_reset_down() are released. A failure return code indicates
2353 * that we were unable to reinitialise the hardware, and the
2354 * driver should be disabled. If ok is false, then the rx and tx
2355 * engines are not restarted, pending a RESET_DISABLE. */
2356int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2357{
2358	int rc;
2359
2360	EF4_ASSERT_RESET_SERIALISED(efx);
2361
2362	/* Ensure that SRAM is initialised even if we're disabling the device */
2363	rc = efx->type->init(efx);
2364	if (rc) {
2365		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2366		goto fail;
2367	}
2368
2369	if (!ok)
2370		goto fail;
2371
2372	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2373	    method != RESET_TYPE_DATAPATH) {
2374		rc = efx->phy_op->init(efx);
2375		if (rc)
2376			goto fail;
2377		rc = efx->phy_op->reconfigure(efx);
2378		if (rc && rc != -EPERM)
2379			netif_err(efx, drv, efx->net_dev,
2380				  "could not restore PHY settings\n");
2381	}
2382
2383	rc = ef4_enable_interrupts(efx);
2384	if (rc)
2385		goto fail;
2386
2387	down_read(&efx->filter_sem);
2388	ef4_restore_filters(efx);
2389	up_read(&efx->filter_sem);
2390
2391	mutex_unlock(&efx->mac_lock);
2392
2393	ef4_start_all(efx);
2394
2395	return 0;
2396
2397fail:
2398	efx->port_initialized = false;
2399
2400	mutex_unlock(&efx->mac_lock);
2401
2402	return rc;
2403}
2404
2405/* Reset the NIC using the specified method.  Note that the reset may
2406 * fail, in which case the card will be left in an unusable state.
2407 *
2408 * Caller must hold the rtnl_lock.
2409 */
2410int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2411{
2412	int rc, rc2;
2413	bool disabled;
2414
2415	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2416		   RESET_TYPE(method));
2417
2418	ef4_device_detach_sync(efx);
2419	ef4_reset_down(efx, method);
2420
2421	rc = efx->type->reset(efx, method);
2422	if (rc) {
2423		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2424		goto out;
2425	}
2426
2427	/* Clear flags for the scopes we covered.  We assume the NIC and
2428	 * driver are now quiescent so that there is no race here.
2429	 */
2430	if (method < RESET_TYPE_MAX_METHOD)
2431		efx->reset_pending &= -(1 << (method + 1));
2432	else /* it doesn't fit into the well-ordered scope hierarchy */
2433		__clear_bit(method, &efx->reset_pending);
2434
2435	/* Reinitialise bus-mastering, which may have been turned off before
2436	 * the reset was scheduled. This is still appropriate, even in the
2437	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2438	 * can respond to requests. */
2439	pci_set_master(efx->pci_dev);
2440
2441out:
2442	/* Leave device stopped if necessary */
2443	disabled = rc ||
2444		method == RESET_TYPE_DISABLE ||
2445		method == RESET_TYPE_RECOVER_OR_DISABLE;
2446	rc2 = ef4_reset_up(efx, method, !disabled);
2447	if (rc2) {
2448		disabled = true;
2449		if (!rc)
2450			rc = rc2;
2451	}
2452
2453	if (disabled) {
2454		dev_close(efx->net_dev);
2455		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2456		efx->state = STATE_DISABLED;
2457	} else {
2458		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2459		netif_device_attach(efx->net_dev);
2460	}
2461	return rc;
2462}
2463
2464/* Try recovery mechanisms.
2465 * For now only EEH is supported.
2466 * Returns 0 if the recovery mechanisms are unsuccessful.
2467 * Returns a non-zero value otherwise.
2468 */
2469int ef4_try_recovery(struct ef4_nic *efx)
2470{
2471#ifdef CONFIG_EEH
2472	/* A PCI error can occur and not be seen by EEH because nothing
2473	 * happens on the PCI bus. In this case the driver may fail and
2474	 * schedule a 'recover or reset', leading to this recovery handler.
2475	 * Manually call the eeh failure check function.
2476	 */
2477	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2478	if (eeh_dev_check_failure(eehdev)) {
2479		/* The EEH mechanisms will handle the error and reset the
2480		 * device if necessary.
2481		 */
2482		return 1;
2483	}
2484#endif
2485	return 0;
2486}
2487
2488/* The worker thread exists so that code that cannot sleep can
2489 * schedule a reset for later.
2490 */
2491static void ef4_reset_work(struct work_struct *data)
2492{
2493	struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2494	unsigned long pending;
2495	enum reset_type method;
2496
2497	pending = READ_ONCE(efx->reset_pending);
2498	method = fls(pending) - 1;
2499
2500	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2501	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2502	    ef4_try_recovery(efx))
2503		return;
2504
2505	if (!pending)
2506		return;
2507
2508	rtnl_lock();
2509
2510	/* We checked the state in ef4_schedule_reset() but it may
2511	 * have changed by now.  Now that we have the RTNL lock,
2512	 * it cannot change again.
2513	 */
2514	if (efx->state == STATE_READY)
2515		(void)ef4_reset(efx, method);
2516
2517	rtnl_unlock();
2518}
2519
2520void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2521{
2522	enum reset_type method;
2523
2524	if (efx->state == STATE_RECOVERY) {
2525		netif_dbg(efx, drv, efx->net_dev,
2526			  "recovering: skip scheduling %s reset\n",
2527			  RESET_TYPE(type));
2528		return;
2529	}
2530
2531	switch (type) {
2532	case RESET_TYPE_INVISIBLE:
2533	case RESET_TYPE_ALL:
2534	case RESET_TYPE_RECOVER_OR_ALL:
2535	case RESET_TYPE_WORLD:
2536	case RESET_TYPE_DISABLE:
2537	case RESET_TYPE_RECOVER_OR_DISABLE:
2538	case RESET_TYPE_DATAPATH:
2539		method = type;
2540		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2541			  RESET_TYPE(method));
2542		break;
2543	default:
2544		method = efx->type->map_reset_reason(type);
2545		netif_dbg(efx, drv, efx->net_dev,
2546			  "scheduling %s reset for %s\n",
2547			  RESET_TYPE(method), RESET_TYPE(type));
2548		break;
2549	}
2550
2551	set_bit(method, &efx->reset_pending);
2552	smp_mb(); /* ensure we change reset_pending before checking state */
2553
2554	/* If we're not READY then just leave the flags set as the cue
2555	 * to abort probing or reschedule the reset later.
2556	 */
2557	if (READ_ONCE(efx->state) != STATE_READY)
2558		return;
2559
2560	queue_work(reset_workqueue, &efx->reset_work);
2561}
2562
2563/**************************************************************************
2564 *
2565 * List of NICs we support
2566 *
2567 **************************************************************************/
2568
2569/* PCI device ID table */
2570static const struct pci_device_id ef4_pci_table[] = {
2571	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2572		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2573	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2574	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2575		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2576	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2577	{0}			/* end of list */
2578};
2579
2580/**************************************************************************
2581 *
2582 * Dummy PHY/MAC operations
2583 *
2584 * Can be used for some unimplemented operations
2585 * Needed so all function pointers are valid and do not have to be tested
2586 * before use
2587 *
2588 **************************************************************************/
2589int ef4_port_dummy_op_int(struct ef4_nic *efx)
2590{
2591	return 0;
2592}
2593void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2594
2595static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2596{
2597	return false;
2598}
2599
2600static const struct ef4_phy_operations ef4_dummy_phy_operations = {
2601	.init		 = ef4_port_dummy_op_int,
2602	.reconfigure	 = ef4_port_dummy_op_int,
2603	.poll		 = ef4_port_dummy_op_poll,
2604	.fini		 = ef4_port_dummy_op_void,
2605};
2606
2607/**************************************************************************
2608 *
2609 * Data housekeeping
2610 *
2611 **************************************************************************/
2612
2613/* This zeroes out and then fills in the invariants in a struct
2614 * ef4_nic (including all sub-structures).
2615 */
2616static int ef4_init_struct(struct ef4_nic *efx,
2617			   struct pci_dev *pci_dev, struct net_device *net_dev)
2618{
2619	int i;
2620
2621	/* Initialise common structures */
2622	INIT_LIST_HEAD(&efx->node);
2623	INIT_LIST_HEAD(&efx->secondary_list);
2624	spin_lock_init(&efx->biu_lock);
2625#ifdef CONFIG_SFC_FALCON_MTD
2626	INIT_LIST_HEAD(&efx->mtd_list);
2627#endif
2628	INIT_WORK(&efx->reset_work, ef4_reset_work);
2629	INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2630	INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2631	efx->pci_dev = pci_dev;
2632	efx->msg_enable = debug;
2633	efx->state = STATE_UNINIT;
2634	strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2635
2636	efx->net_dev = net_dev;
2637	efx->rx_prefix_size = efx->type->rx_prefix_size;
2638	efx->rx_ip_align =
2639		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2640	efx->rx_packet_hash_offset =
2641		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2642	efx->rx_packet_ts_offset =
2643		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2644	spin_lock_init(&efx->stats_lock);
2645	mutex_init(&efx->mac_lock);
2646	efx->phy_op = &ef4_dummy_phy_operations;
2647	efx->mdio.dev = net_dev;
2648	INIT_WORK(&efx->mac_work, ef4_mac_work);
2649	init_waitqueue_head(&efx->flush_wq);
2650
2651	for (i = 0; i < EF4_MAX_CHANNELS; i++) {
2652		efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2653		if (!efx->channel[i])
2654			goto fail;
2655		efx->msi_context[i].efx = efx;
2656		efx->msi_context[i].index = i;
2657	}
2658
2659	/* Higher numbered interrupt modes are less capable! */
2660	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2661				  interrupt_mode);
2662
2663	/* Would be good to use the net_dev name, but we're too early */
2664	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2665		 pci_name(pci_dev));
2666	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2667	if (!efx->workqueue)
2668		goto fail;
2669
2670	return 0;
2671
2672fail:
2673	ef4_fini_struct(efx);
2674	return -ENOMEM;
2675}
2676
2677static void ef4_fini_struct(struct ef4_nic *efx)
2678{
2679	int i;
2680
2681	for (i = 0; i < EF4_MAX_CHANNELS; i++)
2682		kfree(efx->channel[i]);
2683
2684	kfree(efx->vpd_sn);
2685
2686	if (efx->workqueue) {
2687		destroy_workqueue(efx->workqueue);
2688		efx->workqueue = NULL;
2689	}
2690}
2691
2692void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2693{
2694	u64 n_rx_nodesc_trunc = 0;
2695	struct ef4_channel *channel;
2696
2697	ef4_for_each_channel(channel, efx)
2698		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2699	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2700	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2701}
2702
2703/**************************************************************************
2704 *
2705 * PCI interface
2706 *
2707 **************************************************************************/
2708
2709/* Main body of final NIC shutdown code
2710 * This is called only at module unload (or hotplug removal).
2711 */
2712static void ef4_pci_remove_main(struct ef4_nic *efx)
2713{
2714	/* Flush reset_work. It can no longer be scheduled since we
2715	 * are not READY.
2716	 */
2717	BUG_ON(efx->state == STATE_READY);
2718	cancel_work_sync(&efx->reset_work);
2719
2720	ef4_disable_interrupts(efx);
2721	ef4_nic_fini_interrupt(efx);
2722	ef4_fini_port(efx);
2723	efx->type->fini(efx);
2724	ef4_fini_napi(efx);
2725	ef4_remove_all(efx);
2726}
2727
2728/* Final NIC shutdown
2729 * This is called only at module unload (or hotplug removal).  A PF can call
2730 * this on its VFs to ensure they are unbound first.
2731 */
2732static void ef4_pci_remove(struct pci_dev *pci_dev)
2733{
2734	struct ef4_nic *efx;
2735
2736	efx = pci_get_drvdata(pci_dev);
2737	if (!efx)
2738		return;
2739
2740	/* Mark the NIC as fini, then stop the interface */
2741	rtnl_lock();
2742	ef4_dissociate(efx);
2743	dev_close(efx->net_dev);
2744	ef4_disable_interrupts(efx);
2745	efx->state = STATE_UNINIT;
2746	rtnl_unlock();
2747
2748	ef4_unregister_netdev(efx);
2749
2750	ef4_mtd_remove(efx);
2751
2752	ef4_pci_remove_main(efx);
2753
2754	ef4_fini_io(efx);
2755	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2756
2757	ef4_fini_struct(efx);
2758	free_netdev(efx->net_dev);
 
 
2759};
2760
2761/* NIC VPD information
2762 * Called during probe to display the part number of the installed NIC.
2763 */
2764static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2765{
2766	struct pci_dev *dev = efx->pci_dev;
2767	unsigned int vpd_size, kw_len;
2768	u8 *vpd_data;
2769	int start;
2770
2771	vpd_data = pci_vpd_alloc(dev, &vpd_size);
2772	if (IS_ERR(vpd_data)) {
2773		pci_warn(dev, "Unable to read VPD\n");
2774		return;
2775	}
2776
2777	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2778					     PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
2779	if (start < 0)
2780		pci_warn(dev, "Part number not found or incomplete\n");
2781	else
2782		pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
2783
2784	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2785					     PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
2786	if (start < 0)
2787		pci_warn(dev, "Serial number not found or incomplete\n");
2788	else
2789		efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
2790
2791	kfree(vpd_data);
2792}
2793
2794
2795/* Main body of NIC initialisation
2796 * This is called at module load (or hotplug insertion, theoretically).
2797 */
2798static int ef4_pci_probe_main(struct ef4_nic *efx)
2799{
2800	int rc;
2801
2802	/* Do start-of-day initialisation */
2803	rc = ef4_probe_all(efx);
2804	if (rc)
2805		goto fail1;
2806
2807	ef4_init_napi(efx);
2808
2809	rc = efx->type->init(efx);
2810	if (rc) {
2811		netif_err(efx, probe, efx->net_dev,
2812			  "failed to initialise NIC\n");
2813		goto fail3;
2814	}
2815
2816	rc = ef4_init_port(efx);
2817	if (rc) {
2818		netif_err(efx, probe, efx->net_dev,
2819			  "failed to initialise port\n");
2820		goto fail4;
2821	}
2822
2823	rc = ef4_nic_init_interrupt(efx);
2824	if (rc)
2825		goto fail5;
2826	rc = ef4_enable_interrupts(efx);
2827	if (rc)
2828		goto fail6;
2829
2830	return 0;
2831
2832 fail6:
2833	ef4_nic_fini_interrupt(efx);
2834 fail5:
2835	ef4_fini_port(efx);
2836 fail4:
2837	efx->type->fini(efx);
2838 fail3:
2839	ef4_fini_napi(efx);
2840	ef4_remove_all(efx);
2841 fail1:
2842	return rc;
2843}
2844
2845/* NIC initialisation
2846 *
2847 * This is called at module load (or hotplug insertion,
2848 * theoretically).  It sets up PCI mappings, resets the NIC,
2849 * sets up and registers the network devices with the kernel and hooks
2850 * the interrupt service routine.  It does not prepare the device for
2851 * transmission; this is left to the first time one of the network
2852 * interfaces is brought up (i.e. ef4_net_open).
2853 */
2854static int ef4_pci_probe(struct pci_dev *pci_dev,
2855			 const struct pci_device_id *entry)
2856{
2857	struct net_device *net_dev;
2858	struct ef4_nic *efx;
2859	int rc;
2860
2861	/* Allocate and initialise a struct net_device and struct ef4_nic */
2862	net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2863				     EF4_MAX_RX_QUEUES);
2864	if (!net_dev)
2865		return -ENOMEM;
2866	efx = netdev_priv(net_dev);
2867	efx->type = (const struct ef4_nic_type *) entry->driver_data;
2868	efx->fixed_features |= NETIF_F_HIGHDMA;
2869
2870	pci_set_drvdata(pci_dev, efx);
2871	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2872	rc = ef4_init_struct(efx, pci_dev, net_dev);
2873	if (rc)
2874		goto fail1;
2875
2876	netif_info(efx, probe, efx->net_dev,
2877		   "Solarflare NIC detected\n");
2878
2879	ef4_probe_vpd_strings(efx);
2880
2881	/* Set up basic I/O (BAR mappings etc) */
2882	rc = ef4_init_io(efx);
2883	if (rc)
2884		goto fail2;
2885
2886	rc = ef4_pci_probe_main(efx);
2887	if (rc)
2888		goto fail3;
2889
2890	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2891			      NETIF_F_RXCSUM);
2892	/* Mask for features that also apply to VLAN devices */
2893	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
2894				   NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
2895
2896	net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2897
2898	/* Disable VLAN filtering by default.  It may be enforced if
2899	 * the feature is fixed (i.e. VLAN filters are required to
2900	 * receive VLAN tagged packets due to vPort restrictions).
2901	 */
2902	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2903	net_dev->features |= efx->fixed_features;
2904
2905	rc = ef4_register_netdev(efx);
2906	if (rc)
2907		goto fail4;
2908
2909	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2910
2911	/* Try to create MTDs, but allow this to fail */
2912	rtnl_lock();
2913	rc = ef4_mtd_probe(efx);
2914	rtnl_unlock();
2915	if (rc && rc != -EPERM)
2916		netif_warn(efx, probe, efx->net_dev,
2917			   "failed to create MTDs (%d)\n", rc);
 
 
 
 
 
 
2918
2919	return 0;
2920
2921 fail4:
2922	ef4_pci_remove_main(efx);
2923 fail3:
2924	ef4_fini_io(efx);
2925 fail2:
2926	ef4_fini_struct(efx);
2927 fail1:
2928	WARN_ON(rc > 0);
2929	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2930	free_netdev(net_dev);
2931	return rc;
2932}
2933
2934static int ef4_pm_freeze(struct device *dev)
2935{
2936	struct ef4_nic *efx = dev_get_drvdata(dev);
2937
2938	rtnl_lock();
2939
2940	if (efx->state != STATE_DISABLED) {
2941		efx->state = STATE_UNINIT;
2942
2943		ef4_device_detach_sync(efx);
2944
2945		ef4_stop_all(efx);
2946		ef4_disable_interrupts(efx);
2947	}
2948
2949	rtnl_unlock();
2950
2951	return 0;
2952}
2953
2954static int ef4_pm_thaw(struct device *dev)
2955{
2956	int rc;
2957	struct ef4_nic *efx = dev_get_drvdata(dev);
2958
2959	rtnl_lock();
2960
2961	if (efx->state != STATE_DISABLED) {
2962		rc = ef4_enable_interrupts(efx);
2963		if (rc)
2964			goto fail;
2965
2966		mutex_lock(&efx->mac_lock);
2967		efx->phy_op->reconfigure(efx);
2968		mutex_unlock(&efx->mac_lock);
2969
2970		ef4_start_all(efx);
2971
2972		netif_device_attach(efx->net_dev);
2973
2974		efx->state = STATE_READY;
2975
2976		efx->type->resume_wol(efx);
2977	}
2978
2979	rtnl_unlock();
2980
2981	/* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
2982	queue_work(reset_workqueue, &efx->reset_work);
2983
2984	return 0;
2985
2986fail:
2987	rtnl_unlock();
2988
2989	return rc;
2990}
2991
2992static int ef4_pm_poweroff(struct device *dev)
2993{
2994	struct pci_dev *pci_dev = to_pci_dev(dev);
2995	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
2996
2997	efx->type->fini(efx);
2998
2999	efx->reset_pending = 0;
3000
3001	pci_save_state(pci_dev);
3002	return pci_set_power_state(pci_dev, PCI_D3hot);
3003}
3004
3005/* Used for both resume and restore */
3006static int ef4_pm_resume(struct device *dev)
3007{
3008	struct pci_dev *pci_dev = to_pci_dev(dev);
3009	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3010	int rc;
3011
3012	rc = pci_set_power_state(pci_dev, PCI_D0);
3013	if (rc)
3014		return rc;
3015	pci_restore_state(pci_dev);
3016	rc = pci_enable_device(pci_dev);
3017	if (rc)
3018		return rc;
3019	pci_set_master(efx->pci_dev);
3020	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3021	if (rc)
3022		return rc;
3023	rc = efx->type->init(efx);
3024	if (rc)
3025		return rc;
3026	rc = ef4_pm_thaw(dev);
3027	return rc;
3028}
3029
3030static int ef4_pm_suspend(struct device *dev)
3031{
3032	int rc;
3033
3034	ef4_pm_freeze(dev);
3035	rc = ef4_pm_poweroff(dev);
3036	if (rc)
3037		ef4_pm_resume(dev);
3038	return rc;
3039}
3040
3041static const struct dev_pm_ops ef4_pm_ops = {
3042	.suspend	= ef4_pm_suspend,
3043	.resume		= ef4_pm_resume,
3044	.freeze		= ef4_pm_freeze,
3045	.thaw		= ef4_pm_thaw,
3046	.poweroff	= ef4_pm_poweroff,
3047	.restore	= ef4_pm_resume,
3048};
3049
3050/* A PCI error affecting this device was detected.
3051 * At this point MMIO and DMA may be disabled.
3052 * Stop the software path and request a slot reset.
3053 */
3054static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
3055					      pci_channel_state_t state)
3056{
3057	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3058	struct ef4_nic *efx = pci_get_drvdata(pdev);
3059
3060	if (state == pci_channel_io_perm_failure)
3061		return PCI_ERS_RESULT_DISCONNECT;
3062
3063	rtnl_lock();
3064
3065	if (efx->state != STATE_DISABLED) {
3066		efx->state = STATE_RECOVERY;
3067		efx->reset_pending = 0;
3068
3069		ef4_device_detach_sync(efx);
3070
3071		ef4_stop_all(efx);
3072		ef4_disable_interrupts(efx);
3073
3074		status = PCI_ERS_RESULT_NEED_RESET;
3075	} else {
3076		/* If the interface is disabled we don't want to do anything
3077		 * with it.
3078		 */
3079		status = PCI_ERS_RESULT_RECOVERED;
3080	}
3081
3082	rtnl_unlock();
3083
3084	pci_disable_device(pdev);
3085
3086	return status;
3087}
3088
3089/* Fake a successful reset, which will be performed later in ef4_io_resume. */
3090static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
3091{
3092	struct ef4_nic *efx = pci_get_drvdata(pdev);
3093	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3094
3095	if (pci_enable_device(pdev)) {
3096		netif_err(efx, hw, efx->net_dev,
3097			  "Cannot re-enable PCI device after reset.\n");
3098		status =  PCI_ERS_RESULT_DISCONNECT;
3099	}
3100
3101	return status;
3102}
3103
3104/* Perform the actual reset and resume I/O operations. */
3105static void ef4_io_resume(struct pci_dev *pdev)
3106{
3107	struct ef4_nic *efx = pci_get_drvdata(pdev);
3108	int rc;
3109
3110	rtnl_lock();
3111
3112	if (efx->state == STATE_DISABLED)
3113		goto out;
3114
3115	rc = ef4_reset(efx, RESET_TYPE_ALL);
3116	if (rc) {
3117		netif_err(efx, hw, efx->net_dev,
3118			  "ef4_reset failed after PCI error (%d)\n", rc);
3119	} else {
3120		efx->state = STATE_READY;
3121		netif_dbg(efx, hw, efx->net_dev,
3122			  "Done resetting and resuming IO after PCI error.\n");
3123	}
3124
3125out:
3126	rtnl_unlock();
3127}
3128
3129/* For simplicity and reliability, we always require a slot reset and try to
3130 * reset the hardware when a pci error affecting the device is detected.
3131 * We leave both the link_reset and mmio_enabled callback unimplemented:
3132 * with our request for slot reset the mmio_enabled callback will never be
3133 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3134 */
3135static const struct pci_error_handlers ef4_err_handlers = {
3136	.error_detected = ef4_io_error_detected,
3137	.slot_reset	= ef4_io_slot_reset,
3138	.resume		= ef4_io_resume,
3139};
3140
3141static struct pci_driver ef4_pci_driver = {
3142	.name		= KBUILD_MODNAME,
3143	.id_table	= ef4_pci_table,
3144	.probe		= ef4_pci_probe,
3145	.remove		= ef4_pci_remove,
3146	.driver.pm	= &ef4_pm_ops,
3147	.err_handler	= &ef4_err_handlers,
3148};
3149
3150/**************************************************************************
3151 *
3152 * Kernel module interface
3153 *
3154 *************************************************************************/
3155
3156module_param(interrupt_mode, uint, 0444);
3157MODULE_PARM_DESC(interrupt_mode,
3158		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3159
3160static int __init ef4_init_module(void)
3161{
3162	int rc;
3163
3164	printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
3165
3166	rc = register_netdevice_notifier(&ef4_netdev_notifier);
3167	if (rc)
3168		goto err_notifier;
3169
3170	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3171	if (!reset_workqueue) {
3172		rc = -ENOMEM;
3173		goto err_reset;
3174	}
3175
3176	rc = pci_register_driver(&ef4_pci_driver);
3177	if (rc < 0)
3178		goto err_pci;
3179
3180	return 0;
3181
3182 err_pci:
3183	destroy_workqueue(reset_workqueue);
3184 err_reset:
3185	unregister_netdevice_notifier(&ef4_netdev_notifier);
3186 err_notifier:
3187	return rc;
3188}
3189
3190static void __exit ef4_exit_module(void)
3191{
3192	printk(KERN_INFO "Solarflare Falcon driver unloading\n");
3193
3194	pci_unregister_driver(&ef4_pci_driver);
3195	destroy_workqueue(reset_workqueue);
3196	unregister_netdevice_notifier(&ef4_netdev_notifier);
3197
3198}
3199
3200module_init(ef4_init_module);
3201module_exit(ef4_exit_module);
3202
3203MODULE_AUTHOR("Solarflare Communications and "
3204	      "Michael Brown <mbrown@fensystems.co.uk>");
3205MODULE_DESCRIPTION("Solarflare Falcon network driver");
3206MODULE_LICENSE("GPL");
3207MODULE_DEVICE_TABLE(pci, ef4_pci_table);
3208MODULE_VERSION(EF4_DRIVER_VERSION);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2005-2013 Solarflare Communications Inc.
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/delay.h>
  13#include <linux/notifier.h>
  14#include <linux/ip.h>
  15#include <linux/tcp.h>
  16#include <linux/in.h>
  17#include <linux/ethtool.h>
  18#include <linux/topology.h>
  19#include <linux/gfp.h>
  20#include <linux/aer.h>
  21#include <linux/interrupt.h>
  22#include "net_driver.h"
  23#include "efx.h"
  24#include "nic.h"
  25#include "selftest.h"
  26
  27#include "workarounds.h"
  28
  29/**************************************************************************
  30 *
  31 * Type name strings
  32 *
  33 **************************************************************************
  34 */
  35
  36/* Loopback mode names (see LOOPBACK_MODE()) */
  37const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
  38const char *const ef4_loopback_mode_names[] = {
  39	[LOOPBACK_NONE]		= "NONE",
  40	[LOOPBACK_DATA]		= "DATAPATH",
  41	[LOOPBACK_GMAC]		= "GMAC",
  42	[LOOPBACK_XGMII]	= "XGMII",
  43	[LOOPBACK_XGXS]		= "XGXS",
  44	[LOOPBACK_XAUI]		= "XAUI",
  45	[LOOPBACK_GMII]		= "GMII",
  46	[LOOPBACK_SGMII]	= "SGMII",
  47	[LOOPBACK_XGBR]		= "XGBR",
  48	[LOOPBACK_XFI]		= "XFI",
  49	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
  50	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
  51	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
  52	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
  53	[LOOPBACK_GPHY]		= "GPHY",
  54	[LOOPBACK_PHYXS]	= "PHYXS",
  55	[LOOPBACK_PCS]		= "PCS",
  56	[LOOPBACK_PMAPMD]	= "PMA/PMD",
  57	[LOOPBACK_XPORT]	= "XPORT",
  58	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
  59	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
  60	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
  61	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  62	[LOOPBACK_GMII_WS]	= "GMII_WS",
  63	[LOOPBACK_XFI_WS]	= "XFI_WS",
  64	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
  65	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
  66};
  67
  68const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
  69const char *const ef4_reset_type_names[] = {
  70	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
  71	[RESET_TYPE_ALL]                = "ALL",
  72	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
  73	[RESET_TYPE_WORLD]              = "WORLD",
  74	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
  75	[RESET_TYPE_DATAPATH]           = "DATAPATH",
  76	[RESET_TYPE_DISABLE]            = "DISABLE",
  77	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
  78	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
  79	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
  80	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
  81	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
  82};
  83
  84/* Reset workqueue. If any NIC has a hardware failure then a reset will be
  85 * queued onto this work queue. This is not a per-nic work queue, because
  86 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  87 */
  88static struct workqueue_struct *reset_workqueue;
  89
  90/* How often and how many times to poll for a reset while waiting for a
  91 * BIST that another function started to complete.
  92 */
  93#define BIST_WAIT_DELAY_MS	100
  94#define BIST_WAIT_DELAY_COUNT	100
  95
  96/**************************************************************************
  97 *
  98 * Configurable values
  99 *
 100 *************************************************************************/
 101
 102/*
 103 * Use separate channels for TX and RX events
 104 *
 105 * Set this to 1 to use separate channels for TX and RX. It allows us
 106 * to control interrupt affinity separately for TX and RX.
 107 *
 108 * This is only used in MSI-X interrupt mode
 109 */
 110bool ef4_separate_tx_channels;
 111module_param(ef4_separate_tx_channels, bool, 0444);
 112MODULE_PARM_DESC(ef4_separate_tx_channels,
 113		 "Use separate channels for TX and RX");
 114
 115/* This is the time (in jiffies) between invocations of the hardware
 116 * monitor.
 117 * On Falcon-based NICs, this will:
 118 * - Check the on-board hardware monitor;
 119 * - Poll the link state and reconfigure the hardware as necessary.
 120 * On Siena-based NICs for power systems with EEH support, this will give EEH a
 121 * chance to start.
 122 */
 123static unsigned int ef4_monitor_interval = 1 * HZ;
 124
 125/* Initial interrupt moderation settings.  They can be modified after
 126 * module load with ethtool.
 127 *
 128 * The default for RX should strike a balance between increasing the
 129 * round-trip latency and reducing overhead.
 130 */
 131static unsigned int rx_irq_mod_usec = 60;
 132
 133/* Initial interrupt moderation settings.  They can be modified after
 134 * module load with ethtool.
 135 *
 136 * This default is chosen to ensure that a 10G link does not go idle
 137 * while a TX queue is stopped after it has become full.  A queue is
 138 * restarted when it drops below half full.  The time this takes (assuming
 139 * worst case 3 descriptors per packet and 1024 descriptors) is
 140 *   512 / 3 * 1.2 = 205 usec.
 141 */
 142static unsigned int tx_irq_mod_usec = 150;
 143
 144/* This is the first interrupt mode to try out of:
 145 * 0 => MSI-X
 146 * 1 => MSI
 147 * 2 => legacy
 148 */
 149static unsigned int interrupt_mode;
 150
 151/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 152 * i.e. the number of CPUs among which we may distribute simultaneous
 153 * interrupt handling.
 154 *
 155 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
 156 * The default (0) means to assign an interrupt to each core.
 157 */
 158static unsigned int rss_cpus;
 159module_param(rss_cpus, uint, 0444);
 160MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
 161
 162static bool phy_flash_cfg;
 163module_param(phy_flash_cfg, bool, 0644);
 164MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
 165
 166static unsigned irq_adapt_low_thresh = 8000;
 167module_param(irq_adapt_low_thresh, uint, 0644);
 168MODULE_PARM_DESC(irq_adapt_low_thresh,
 169		 "Threshold score for reducing IRQ moderation");
 170
 171static unsigned irq_adapt_high_thresh = 16000;
 172module_param(irq_adapt_high_thresh, uint, 0644);
 173MODULE_PARM_DESC(irq_adapt_high_thresh,
 174		 "Threshold score for increasing IRQ moderation");
 175
 176static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 177			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
 178			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
 179			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
 180module_param(debug, uint, 0);
 181MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 182
 183/**************************************************************************
 184 *
 185 * Utility functions and prototypes
 186 *
 187 *************************************************************************/
 188
 189static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
 190static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
 191static void ef4_remove_channel(struct ef4_channel *channel);
 192static void ef4_remove_channels(struct ef4_nic *efx);
 193static const struct ef4_channel_type ef4_default_channel_type;
 194static void ef4_remove_port(struct ef4_nic *efx);
 195static void ef4_init_napi_channel(struct ef4_channel *channel);
 196static void ef4_fini_napi(struct ef4_nic *efx);
 197static void ef4_fini_napi_channel(struct ef4_channel *channel);
 198static void ef4_fini_struct(struct ef4_nic *efx);
 199static void ef4_start_all(struct ef4_nic *efx);
 200static void ef4_stop_all(struct ef4_nic *efx);
 201
 202#define EF4_ASSERT_RESET_SERIALISED(efx)		\
 203	do {						\
 204		if ((efx->state == STATE_READY) ||	\
 205		    (efx->state == STATE_RECOVERY) ||	\
 206		    (efx->state == STATE_DISABLED))	\
 207			ASSERT_RTNL();			\
 208	} while (0)
 209
 210static int ef4_check_disabled(struct ef4_nic *efx)
 211{
 212	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
 213		netif_err(efx, drv, efx->net_dev,
 214			  "device is disabled due to earlier errors\n");
 215		return -EIO;
 216	}
 217	return 0;
 218}
 219
 220/**************************************************************************
 221 *
 222 * Event queue processing
 223 *
 224 *************************************************************************/
 225
 226/* Process channel's event queue
 227 *
 228 * This function is responsible for processing the event queue of a
 229 * single channel.  The caller must guarantee that this function will
 230 * never be concurrently called more than once on the same channel,
 231 * though different channels may be being processed concurrently.
 232 */
 233static int ef4_process_channel(struct ef4_channel *channel, int budget)
 234{
 235	struct ef4_tx_queue *tx_queue;
 236	int spent;
 237
 238	if (unlikely(!channel->enabled))
 239		return 0;
 240
 241	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 242		tx_queue->pkts_compl = 0;
 243		tx_queue->bytes_compl = 0;
 244	}
 245
 246	spent = ef4_nic_process_eventq(channel, budget);
 247	if (spent && ef4_channel_has_rx_queue(channel)) {
 248		struct ef4_rx_queue *rx_queue =
 249			ef4_channel_get_rx_queue(channel);
 250
 251		ef4_rx_flush_packet(channel);
 252		ef4_fast_push_rx_descriptors(rx_queue, true);
 253	}
 254
 255	/* Update BQL */
 256	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 257		if (tx_queue->bytes_compl) {
 258			netdev_tx_completed_queue(tx_queue->core_txq,
 259				tx_queue->pkts_compl, tx_queue->bytes_compl);
 260		}
 261	}
 262
 263	return spent;
 264}
 265
 266/* NAPI poll handler
 267 *
 268 * NAPI guarantees serialisation of polls of the same device, which
 269 * provides the guarantee required by ef4_process_channel().
 270 */
 271static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
 272{
 273	int step = efx->irq_mod_step_us;
 274
 275	if (channel->irq_mod_score < irq_adapt_low_thresh) {
 276		if (channel->irq_moderation_us > step) {
 277			channel->irq_moderation_us -= step;
 278			efx->type->push_irq_moderation(channel);
 279		}
 280	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
 281		if (channel->irq_moderation_us <
 282		    efx->irq_rx_moderation_us) {
 283			channel->irq_moderation_us += step;
 284			efx->type->push_irq_moderation(channel);
 285		}
 286	}
 287
 288	channel->irq_count = 0;
 289	channel->irq_mod_score = 0;
 290}
 291
 292static int ef4_poll(struct napi_struct *napi, int budget)
 293{
 294	struct ef4_channel *channel =
 295		container_of(napi, struct ef4_channel, napi_str);
 296	struct ef4_nic *efx = channel->efx;
 297	int spent;
 298
 299	netif_vdbg(efx, intr, efx->net_dev,
 300		   "channel %d NAPI poll executing on CPU %d\n",
 301		   channel->channel, raw_smp_processor_id());
 302
 303	spent = ef4_process_channel(channel, budget);
 304
 305	if (spent < budget) {
 306		if (ef4_channel_has_rx_queue(channel) &&
 307		    efx->irq_rx_adaptive &&
 308		    unlikely(++channel->irq_count == 1000)) {
 309			ef4_update_irq_mod(efx, channel);
 310		}
 311
 312		ef4_filter_rfs_expire(channel);
 313
 314		/* There is no race here; although napi_disable() will
 315		 * only wait for napi_complete(), this isn't a problem
 316		 * since ef4_nic_eventq_read_ack() will have no effect if
 317		 * interrupts have already been disabled.
 318		 */
 319		napi_complete_done(napi, spent);
 320		ef4_nic_eventq_read_ack(channel);
 321	}
 322
 323	return spent;
 324}
 325
 326/* Create event queue
 327 * Event queue memory allocations are done only once.  If the channel
 328 * is reset, the memory buffer will be reused; this guards against
 329 * errors during channel reset and also simplifies interrupt handling.
 330 */
 331static int ef4_probe_eventq(struct ef4_channel *channel)
 332{
 333	struct ef4_nic *efx = channel->efx;
 334	unsigned long entries;
 335
 336	netif_dbg(efx, probe, efx->net_dev,
 337		  "chan %d create event queue\n", channel->channel);
 338
 339	/* Build an event queue with room for one event per tx and rx buffer,
 340	 * plus some extra for link state events and MCDI completions. */
 341	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
 342	EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
 343	channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
 344
 345	return ef4_nic_probe_eventq(channel);
 346}
 347
 348/* Prepare channel's event queue */
 349static int ef4_init_eventq(struct ef4_channel *channel)
 350{
 351	struct ef4_nic *efx = channel->efx;
 352	int rc;
 353
 354	EF4_WARN_ON_PARANOID(channel->eventq_init);
 355
 356	netif_dbg(efx, drv, efx->net_dev,
 357		  "chan %d init event queue\n", channel->channel);
 358
 359	rc = ef4_nic_init_eventq(channel);
 360	if (rc == 0) {
 361		efx->type->push_irq_moderation(channel);
 362		channel->eventq_read_ptr = 0;
 363		channel->eventq_init = true;
 364	}
 365	return rc;
 366}
 367
 368/* Enable event queue processing and NAPI */
 369void ef4_start_eventq(struct ef4_channel *channel)
 370{
 371	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
 372		  "chan %d start event queue\n", channel->channel);
 373
 374	/* Make sure the NAPI handler sees the enabled flag set */
 375	channel->enabled = true;
 376	smp_wmb();
 377
 378	napi_enable(&channel->napi_str);
 379	ef4_nic_eventq_read_ack(channel);
 380}
 381
 382/* Disable event queue processing and NAPI */
 383void ef4_stop_eventq(struct ef4_channel *channel)
 384{
 385	if (!channel->enabled)
 386		return;
 387
 388	napi_disable(&channel->napi_str);
 389	channel->enabled = false;
 390}
 391
 392static void ef4_fini_eventq(struct ef4_channel *channel)
 393{
 394	if (!channel->eventq_init)
 395		return;
 396
 397	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 398		  "chan %d fini event queue\n", channel->channel);
 399
 400	ef4_nic_fini_eventq(channel);
 401	channel->eventq_init = false;
 402}
 403
 404static void ef4_remove_eventq(struct ef4_channel *channel)
 405{
 406	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 407		  "chan %d remove event queue\n", channel->channel);
 408
 409	ef4_nic_remove_eventq(channel);
 410}
 411
 412/**************************************************************************
 413 *
 414 * Channel handling
 415 *
 416 *************************************************************************/
 417
 418/* Allocate and initialise a channel structure. */
 419static struct ef4_channel *
 420ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
 421{
 422	struct ef4_channel *channel;
 423	struct ef4_rx_queue *rx_queue;
 424	struct ef4_tx_queue *tx_queue;
 425	int j;
 426
 427	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 428	if (!channel)
 429		return NULL;
 430
 431	channel->efx = efx;
 432	channel->channel = i;
 433	channel->type = &ef4_default_channel_type;
 434
 435	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 436		tx_queue = &channel->tx_queue[j];
 437		tx_queue->efx = efx;
 438		tx_queue->queue = i * EF4_TXQ_TYPES + j;
 439		tx_queue->channel = channel;
 440	}
 441
 442	rx_queue = &channel->rx_queue;
 443	rx_queue->efx = efx;
 444	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 445
 446	return channel;
 447}
 448
 449/* Allocate and initialise a channel structure, copying parameters
 450 * (but not resources) from an old channel structure.
 451 */
 452static struct ef4_channel *
 453ef4_copy_channel(const struct ef4_channel *old_channel)
 454{
 455	struct ef4_channel *channel;
 456	struct ef4_rx_queue *rx_queue;
 457	struct ef4_tx_queue *tx_queue;
 458	int j;
 459
 460	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
 461	if (!channel)
 462		return NULL;
 463
 464	*channel = *old_channel;
 465
 466	channel->napi_dev = NULL;
 467	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
 468	channel->napi_str.napi_id = 0;
 469	channel->napi_str.state = 0;
 470	memset(&channel->eventq, 0, sizeof(channel->eventq));
 471
 472	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 473		tx_queue = &channel->tx_queue[j];
 474		if (tx_queue->channel)
 475			tx_queue->channel = channel;
 476		tx_queue->buffer = NULL;
 477		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
 478	}
 479
 480	rx_queue = &channel->rx_queue;
 481	rx_queue->buffer = NULL;
 482	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
 483	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 484
 485	return channel;
 486}
 487
 488static int ef4_probe_channel(struct ef4_channel *channel)
 489{
 490	struct ef4_tx_queue *tx_queue;
 491	struct ef4_rx_queue *rx_queue;
 492	int rc;
 493
 494	netif_dbg(channel->efx, probe, channel->efx->net_dev,
 495		  "creating channel %d\n", channel->channel);
 496
 497	rc = channel->type->pre_probe(channel);
 498	if (rc)
 499		goto fail;
 500
 501	rc = ef4_probe_eventq(channel);
 502	if (rc)
 503		goto fail;
 504
 505	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 506		rc = ef4_probe_tx_queue(tx_queue);
 507		if (rc)
 508			goto fail;
 509	}
 510
 511	ef4_for_each_channel_rx_queue(rx_queue, channel) {
 512		rc = ef4_probe_rx_queue(rx_queue);
 513		if (rc)
 514			goto fail;
 515	}
 516
 517	return 0;
 518
 519fail:
 520	ef4_remove_channel(channel);
 521	return rc;
 522}
 523
 524static void
 525ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
 526{
 527	struct ef4_nic *efx = channel->efx;
 528	const char *type;
 529	int number;
 530
 531	number = channel->channel;
 532	if (efx->tx_channel_offset == 0) {
 533		type = "";
 534	} else if (channel->channel < efx->tx_channel_offset) {
 535		type = "-rx";
 536	} else {
 537		type = "-tx";
 538		number -= efx->tx_channel_offset;
 539	}
 540	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
 541}
 542
 543static void ef4_set_channel_names(struct ef4_nic *efx)
 544{
 545	struct ef4_channel *channel;
 546
 547	ef4_for_each_channel(channel, efx)
 548		channel->type->get_name(channel,
 549					efx->msi_context[channel->channel].name,
 550					sizeof(efx->msi_context[0].name));
 551}
 552
 553static int ef4_probe_channels(struct ef4_nic *efx)
 554{
 555	struct ef4_channel *channel;
 556	int rc;
 557
 558	/* Restart special buffer allocation */
 559	efx->next_buffer_table = 0;
 560
 561	/* Probe channels in reverse, so that any 'extra' channels
 562	 * use the start of the buffer table. This allows the traffic
 563	 * channels to be resized without moving them or wasting the
 564	 * entries before them.
 565	 */
 566	ef4_for_each_channel_rev(channel, efx) {
 567		rc = ef4_probe_channel(channel);
 568		if (rc) {
 569			netif_err(efx, probe, efx->net_dev,
 570				  "failed to create channel %d\n",
 571				  channel->channel);
 572			goto fail;
 573		}
 574	}
 575	ef4_set_channel_names(efx);
 576
 577	return 0;
 578
 579fail:
 580	ef4_remove_channels(efx);
 581	return rc;
 582}
 583
 584/* Channels are shutdown and reinitialised whilst the NIC is running
 585 * to propagate configuration changes (mtu, checksum offload), or
 586 * to clear hardware error conditions
 587 */
 588static void ef4_start_datapath(struct ef4_nic *efx)
 589{
 590	netdev_features_t old_features = efx->net_dev->features;
 591	bool old_rx_scatter = efx->rx_scatter;
 592	struct ef4_tx_queue *tx_queue;
 593	struct ef4_rx_queue *rx_queue;
 594	struct ef4_channel *channel;
 595	size_t rx_buf_len;
 596
 597	/* Calculate the rx buffer allocation parameters required to
 598	 * support the current MTU, including padding for header
 599	 * alignment and overruns.
 600	 */
 601	efx->rx_dma_len = (efx->rx_prefix_size +
 602			   EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
 603			   efx->type->rx_buffer_padding);
 604	rx_buf_len = (sizeof(struct ef4_rx_page_state) +
 605		      efx->rx_ip_align + efx->rx_dma_len);
 606	if (rx_buf_len <= PAGE_SIZE) {
 607		efx->rx_scatter = efx->type->always_rx_scatter;
 608		efx->rx_buffer_order = 0;
 609	} else if (efx->type->can_rx_scatter) {
 610		BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
 611		BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
 612			     2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
 613				       EF4_RX_BUF_ALIGNMENT) >
 614			     PAGE_SIZE);
 615		efx->rx_scatter = true;
 616		efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
 617		efx->rx_buffer_order = 0;
 618	} else {
 619		efx->rx_scatter = false;
 620		efx->rx_buffer_order = get_order(rx_buf_len);
 621	}
 622
 623	ef4_rx_config_page_split(efx);
 624	if (efx->rx_buffer_order)
 625		netif_dbg(efx, drv, efx->net_dev,
 626			  "RX buf len=%u; page order=%u batch=%u\n",
 627			  efx->rx_dma_len, efx->rx_buffer_order,
 628			  efx->rx_pages_per_batch);
 629	else
 630		netif_dbg(efx, drv, efx->net_dev,
 631			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
 632			  efx->rx_dma_len, efx->rx_page_buf_step,
 633			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
 634
 635	/* Restore previously fixed features in hw_features and remove
 636	 * features which are fixed now
 637	 */
 638	efx->net_dev->hw_features |= efx->net_dev->features;
 639	efx->net_dev->hw_features &= ~efx->fixed_features;
 640	efx->net_dev->features |= efx->fixed_features;
 641	if (efx->net_dev->features != old_features)
 642		netdev_features_change(efx->net_dev);
 643
 644	/* RX filters may also have scatter-enabled flags */
 645	if (efx->rx_scatter != old_rx_scatter)
 646		efx->type->filter_update_rx_scatter(efx);
 647
 648	/* We must keep at least one descriptor in a TX ring empty.
 649	 * We could avoid this when the queue size does not exactly
 650	 * match the hardware ring size, but it's not that important.
 651	 * Therefore we stop the queue when one more skb might fill
 652	 * the ring completely.  We wake it when half way back to
 653	 * empty.
 654	 */
 655	efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
 656	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
 657
 658	/* Initialise the channels */
 659	ef4_for_each_channel(channel, efx) {
 660		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 661			ef4_init_tx_queue(tx_queue);
 662			atomic_inc(&efx->active_queues);
 663		}
 664
 665		ef4_for_each_channel_rx_queue(rx_queue, channel) {
 666			ef4_init_rx_queue(rx_queue);
 667			atomic_inc(&efx->active_queues);
 668			ef4_stop_eventq(channel);
 669			ef4_fast_push_rx_descriptors(rx_queue, false);
 670			ef4_start_eventq(channel);
 671		}
 672
 673		WARN_ON(channel->rx_pkt_n_frags);
 674	}
 675
 676	if (netif_device_present(efx->net_dev))
 677		netif_tx_wake_all_queues(efx->net_dev);
 678}
 679
 680static void ef4_stop_datapath(struct ef4_nic *efx)
 681{
 682	struct ef4_channel *channel;
 683	struct ef4_tx_queue *tx_queue;
 684	struct ef4_rx_queue *rx_queue;
 685	int rc;
 686
 687	EF4_ASSERT_RESET_SERIALISED(efx);
 688	BUG_ON(efx->port_enabled);
 689
 690	/* Stop RX refill */
 691	ef4_for_each_channel(channel, efx) {
 692		ef4_for_each_channel_rx_queue(rx_queue, channel)
 693			rx_queue->refill_enabled = false;
 694	}
 695
 696	ef4_for_each_channel(channel, efx) {
 697		/* RX packet processing is pipelined, so wait for the
 698		 * NAPI handler to complete.  At least event queue 0
 699		 * might be kept active by non-data events, so don't
 700		 * use napi_synchronize() but actually disable NAPI
 701		 * temporarily.
 702		 */
 703		if (ef4_channel_has_rx_queue(channel)) {
 704			ef4_stop_eventq(channel);
 705			ef4_start_eventq(channel);
 706		}
 707	}
 708
 709	rc = efx->type->fini_dmaq(efx);
 710	if (rc && EF4_WORKAROUND_7803(efx)) {
 711		/* Schedule a reset to recover from the flush failure. The
 712		 * descriptor caches reference memory we're about to free,
 713		 * but falcon_reconfigure_mac_wrapper() won't reconnect
 714		 * the MACs because of the pending reset.
 715		 */
 716		netif_err(efx, drv, efx->net_dev,
 717			  "Resetting to recover from flush failure\n");
 718		ef4_schedule_reset(efx, RESET_TYPE_ALL);
 719	} else if (rc) {
 720		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 721	} else {
 722		netif_dbg(efx, drv, efx->net_dev,
 723			  "successfully flushed all queues\n");
 724	}
 725
 726	ef4_for_each_channel(channel, efx) {
 727		ef4_for_each_channel_rx_queue(rx_queue, channel)
 728			ef4_fini_rx_queue(rx_queue);
 729		ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 730			ef4_fini_tx_queue(tx_queue);
 731	}
 732}
 733
 734static void ef4_remove_channel(struct ef4_channel *channel)
 735{
 736	struct ef4_tx_queue *tx_queue;
 737	struct ef4_rx_queue *rx_queue;
 738
 739	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 740		  "destroy chan %d\n", channel->channel);
 741
 742	ef4_for_each_channel_rx_queue(rx_queue, channel)
 743		ef4_remove_rx_queue(rx_queue);
 744	ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 745		ef4_remove_tx_queue(tx_queue);
 746	ef4_remove_eventq(channel);
 747	channel->type->post_remove(channel);
 748}
 749
 750static void ef4_remove_channels(struct ef4_nic *efx)
 751{
 752	struct ef4_channel *channel;
 753
 754	ef4_for_each_channel(channel, efx)
 755		ef4_remove_channel(channel);
 756}
 757
 758int
 759ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
 760{
 761	struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
 762	u32 old_rxq_entries, old_txq_entries;
 763	unsigned i, next_buffer_table = 0;
 764	int rc, rc2;
 765
 766	rc = ef4_check_disabled(efx);
 767	if (rc)
 768		return rc;
 769
 770	/* Not all channels should be reallocated. We must avoid
 771	 * reallocating their buffer table entries.
 772	 */
 773	ef4_for_each_channel(channel, efx) {
 774		struct ef4_rx_queue *rx_queue;
 775		struct ef4_tx_queue *tx_queue;
 776
 777		if (channel->type->copy)
 778			continue;
 779		next_buffer_table = max(next_buffer_table,
 780					channel->eventq.index +
 781					channel->eventq.entries);
 782		ef4_for_each_channel_rx_queue(rx_queue, channel)
 783			next_buffer_table = max(next_buffer_table,
 784						rx_queue->rxd.index +
 785						rx_queue->rxd.entries);
 786		ef4_for_each_channel_tx_queue(tx_queue, channel)
 787			next_buffer_table = max(next_buffer_table,
 788						tx_queue->txd.index +
 789						tx_queue->txd.entries);
 790	}
 791
 792	ef4_device_detach_sync(efx);
 793	ef4_stop_all(efx);
 794	ef4_soft_disable_interrupts(efx);
 795
 796	/* Clone channels (where possible) */
 797	memset(other_channel, 0, sizeof(other_channel));
 798	for (i = 0; i < efx->n_channels; i++) {
 799		channel = efx->channel[i];
 800		if (channel->type->copy)
 801			channel = channel->type->copy(channel);
 802		if (!channel) {
 803			rc = -ENOMEM;
 804			goto out;
 805		}
 806		other_channel[i] = channel;
 807	}
 808
 809	/* Swap entry counts and channel pointers */
 810	old_rxq_entries = efx->rxq_entries;
 811	old_txq_entries = efx->txq_entries;
 812	efx->rxq_entries = rxq_entries;
 813	efx->txq_entries = txq_entries;
 814	for (i = 0; i < efx->n_channels; i++) {
 815		swap(efx->channel[i], other_channel[i]);
 816	}
 817
 818	/* Restart buffer table allocation */
 819	efx->next_buffer_table = next_buffer_table;
 820
 821	for (i = 0; i < efx->n_channels; i++) {
 822		channel = efx->channel[i];
 823		if (!channel->type->copy)
 824			continue;
 825		rc = ef4_probe_channel(channel);
 826		if (rc)
 827			goto rollback;
 828		ef4_init_napi_channel(efx->channel[i]);
 829	}
 830
 831out:
 832	/* Destroy unused channel structures */
 833	for (i = 0; i < efx->n_channels; i++) {
 834		channel = other_channel[i];
 835		if (channel && channel->type->copy) {
 836			ef4_fini_napi_channel(channel);
 837			ef4_remove_channel(channel);
 838			kfree(channel);
 839		}
 840	}
 841
 842	rc2 = ef4_soft_enable_interrupts(efx);
 843	if (rc2) {
 844		rc = rc ? rc : rc2;
 845		netif_err(efx, drv, efx->net_dev,
 846			  "unable to restart interrupts on channel reallocation\n");
 847		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
 848	} else {
 849		ef4_start_all(efx);
 850		netif_device_attach(efx->net_dev);
 851	}
 852	return rc;
 853
 854rollback:
 855	/* Swap back */
 856	efx->rxq_entries = old_rxq_entries;
 857	efx->txq_entries = old_txq_entries;
 858	for (i = 0; i < efx->n_channels; i++) {
 859		swap(efx->channel[i], other_channel[i]);
 860	}
 861	goto out;
 862}
 863
 864void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
 865{
 866	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 867}
 868
 869static const struct ef4_channel_type ef4_default_channel_type = {
 870	.pre_probe		= ef4_channel_dummy_op_int,
 871	.post_remove		= ef4_channel_dummy_op_void,
 872	.get_name		= ef4_get_channel_name,
 873	.copy			= ef4_copy_channel,
 874	.keep_eventq		= false,
 875};
 876
 877int ef4_channel_dummy_op_int(struct ef4_channel *channel)
 878{
 879	return 0;
 880}
 881
 882void ef4_channel_dummy_op_void(struct ef4_channel *channel)
 883{
 884}
 885
 886/**************************************************************************
 887 *
 888 * Port handling
 889 *
 890 **************************************************************************/
 891
 892/* This ensures that the kernel is kept informed (via
 893 * netif_carrier_on/off) of the link status, and also maintains the
 894 * link status's stop on the port's TX queue.
 895 */
 896void ef4_link_status_changed(struct ef4_nic *efx)
 897{
 898	struct ef4_link_state *link_state = &efx->link_state;
 899
 900	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 901	 * that no events are triggered between unregister_netdev() and the
 902	 * driver unloading. A more general condition is that NETDEV_CHANGE
 903	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
 904	if (!netif_running(efx->net_dev))
 905		return;
 906
 907	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
 908		efx->n_link_state_changes++;
 909
 910		if (link_state->up)
 911			netif_carrier_on(efx->net_dev);
 912		else
 913			netif_carrier_off(efx->net_dev);
 914	}
 915
 916	/* Status message for kernel log */
 917	if (link_state->up)
 918		netif_info(efx, link, efx->net_dev,
 919			   "link up at %uMbps %s-duplex (MTU %d)\n",
 920			   link_state->speed, link_state->fd ? "full" : "half",
 921			   efx->net_dev->mtu);
 922	else
 923		netif_info(efx, link, efx->net_dev, "link down\n");
 924}
 925
 926void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
 927{
 928	efx->link_advertising = advertising;
 929	if (advertising) {
 930		if (advertising & ADVERTISED_Pause)
 931			efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
 932		else
 933			efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
 934		if (advertising & ADVERTISED_Asym_Pause)
 935			efx->wanted_fc ^= EF4_FC_TX;
 936	}
 937}
 938
 939void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
 940{
 941	efx->wanted_fc = wanted_fc;
 942	if (efx->link_advertising) {
 943		if (wanted_fc & EF4_FC_RX)
 944			efx->link_advertising |= (ADVERTISED_Pause |
 945						  ADVERTISED_Asym_Pause);
 946		else
 947			efx->link_advertising &= ~(ADVERTISED_Pause |
 948						   ADVERTISED_Asym_Pause);
 949		if (wanted_fc & EF4_FC_TX)
 950			efx->link_advertising ^= ADVERTISED_Asym_Pause;
 951	}
 952}
 953
 954static void ef4_fini_port(struct ef4_nic *efx);
 955
 956/* We assume that efx->type->reconfigure_mac will always try to sync RX
 957 * filters and therefore needs to read-lock the filter table against freeing
 958 */
 959void ef4_mac_reconfigure(struct ef4_nic *efx)
 960{
 961	down_read(&efx->filter_sem);
 962	efx->type->reconfigure_mac(efx);
 963	up_read(&efx->filter_sem);
 964}
 965
 966/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 967 * the MAC appropriately. All other PHY configuration changes are pushed
 968 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
 969 * through ef4_monitor().
 970 *
 971 * Callers must hold the mac_lock
 972 */
 973int __ef4_reconfigure_port(struct ef4_nic *efx)
 974{
 975	enum ef4_phy_mode phy_mode;
 976	int rc;
 977
 978	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 979
 980	/* Disable PHY transmit in mac level loopbacks */
 981	phy_mode = efx->phy_mode;
 982	if (LOOPBACK_INTERNAL(efx))
 983		efx->phy_mode |= PHY_MODE_TX_DISABLED;
 984	else
 985		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
 986
 987	rc = efx->type->reconfigure_port(efx);
 988
 989	if (rc)
 990		efx->phy_mode = phy_mode;
 991
 992	return rc;
 993}
 994
 995/* Reinitialise the MAC to pick up new PHY settings, even if the port is
 996 * disabled. */
 997int ef4_reconfigure_port(struct ef4_nic *efx)
 998{
 999	int rc;
1000
1001	EF4_ASSERT_RESET_SERIALISED(efx);
1002
1003	mutex_lock(&efx->mac_lock);
1004	rc = __ef4_reconfigure_port(efx);
1005	mutex_unlock(&efx->mac_lock);
1006
1007	return rc;
1008}
1009
1010/* Asynchronous work item for changing MAC promiscuity and multicast
1011 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1012 * MAC directly. */
1013static void ef4_mac_work(struct work_struct *data)
1014{
1015	struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1016
1017	mutex_lock(&efx->mac_lock);
1018	if (efx->port_enabled)
1019		ef4_mac_reconfigure(efx);
1020	mutex_unlock(&efx->mac_lock);
1021}
1022
1023static int ef4_probe_port(struct ef4_nic *efx)
1024{
1025	int rc;
1026
1027	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1028
1029	if (phy_flash_cfg)
1030		efx->phy_mode = PHY_MODE_SPECIAL;
1031
1032	/* Connect up MAC/PHY operations table */
1033	rc = efx->type->probe_port(efx);
1034	if (rc)
1035		return rc;
1036
1037	/* Initialise MAC address to permanent address */
1038	eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
1039
1040	return 0;
1041}
1042
1043static int ef4_init_port(struct ef4_nic *efx)
1044{
1045	int rc;
1046
1047	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1048
1049	mutex_lock(&efx->mac_lock);
1050
1051	rc = efx->phy_op->init(efx);
1052	if (rc)
1053		goto fail1;
1054
1055	efx->port_initialized = true;
1056
1057	/* Reconfigure the MAC before creating dma queues (required for
1058	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1059	ef4_mac_reconfigure(efx);
1060
1061	/* Ensure the PHY advertises the correct flow control settings */
1062	rc = efx->phy_op->reconfigure(efx);
1063	if (rc && rc != -EPERM)
1064		goto fail2;
1065
1066	mutex_unlock(&efx->mac_lock);
1067	return 0;
1068
1069fail2:
1070	efx->phy_op->fini(efx);
1071fail1:
1072	mutex_unlock(&efx->mac_lock);
1073	return rc;
1074}
1075
1076static void ef4_start_port(struct ef4_nic *efx)
1077{
1078	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1079	BUG_ON(efx->port_enabled);
1080
1081	mutex_lock(&efx->mac_lock);
1082	efx->port_enabled = true;
1083
1084	/* Ensure MAC ingress/egress is enabled */
1085	ef4_mac_reconfigure(efx);
1086
1087	mutex_unlock(&efx->mac_lock);
1088}
1089
1090/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1091 * and the async self-test, wait for them to finish and prevent them
1092 * being scheduled again.  This doesn't cover online resets, which
1093 * should only be cancelled when removing the device.
1094 */
1095static void ef4_stop_port(struct ef4_nic *efx)
1096{
1097	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1098
1099	EF4_ASSERT_RESET_SERIALISED(efx);
1100
1101	mutex_lock(&efx->mac_lock);
1102	efx->port_enabled = false;
1103	mutex_unlock(&efx->mac_lock);
1104
1105	/* Serialise against ef4_set_multicast_list() */
1106	netif_addr_lock_bh(efx->net_dev);
1107	netif_addr_unlock_bh(efx->net_dev);
1108
1109	cancel_delayed_work_sync(&efx->monitor_work);
1110	ef4_selftest_async_cancel(efx);
1111	cancel_work_sync(&efx->mac_work);
1112}
1113
1114static void ef4_fini_port(struct ef4_nic *efx)
1115{
1116	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1117
1118	if (!efx->port_initialized)
1119		return;
1120
1121	efx->phy_op->fini(efx);
1122	efx->port_initialized = false;
1123
1124	efx->link_state.up = false;
1125	ef4_link_status_changed(efx);
1126}
1127
1128static void ef4_remove_port(struct ef4_nic *efx)
1129{
1130	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1131
1132	efx->type->remove_port(efx);
1133}
1134
1135/**************************************************************************
1136 *
1137 * NIC handling
1138 *
1139 **************************************************************************/
1140
1141static LIST_HEAD(ef4_primary_list);
1142static LIST_HEAD(ef4_unassociated_list);
1143
1144static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
1145{
1146	return left->type == right->type &&
1147		left->vpd_sn && right->vpd_sn &&
1148		!strcmp(left->vpd_sn, right->vpd_sn);
1149}
1150
1151static void ef4_associate(struct ef4_nic *efx)
1152{
1153	struct ef4_nic *other, *next;
1154
1155	if (efx->primary == efx) {
1156		/* Adding primary function; look for secondaries */
1157
1158		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1159		list_add_tail(&efx->node, &ef4_primary_list);
1160
1161		list_for_each_entry_safe(other, next, &ef4_unassociated_list,
1162					 node) {
1163			if (ef4_same_controller(efx, other)) {
1164				list_del(&other->node);
1165				netif_dbg(other, probe, other->net_dev,
1166					  "moving to secondary list of %s %s\n",
1167					  pci_name(efx->pci_dev),
1168					  efx->net_dev->name);
1169				list_add_tail(&other->node,
1170					      &efx->secondary_list);
1171				other->primary = efx;
1172			}
1173		}
1174	} else {
1175		/* Adding secondary function; look for primary */
1176
1177		list_for_each_entry(other, &ef4_primary_list, node) {
1178			if (ef4_same_controller(efx, other)) {
1179				netif_dbg(efx, probe, efx->net_dev,
1180					  "adding to secondary list of %s %s\n",
1181					  pci_name(other->pci_dev),
1182					  other->net_dev->name);
1183				list_add_tail(&efx->node,
1184					      &other->secondary_list);
1185				efx->primary = other;
1186				return;
1187			}
1188		}
1189
1190		netif_dbg(efx, probe, efx->net_dev,
1191			  "adding to unassociated list\n");
1192		list_add_tail(&efx->node, &ef4_unassociated_list);
1193	}
1194}
1195
1196static void ef4_dissociate(struct ef4_nic *efx)
1197{
1198	struct ef4_nic *other, *next;
1199
1200	list_del(&efx->node);
1201	efx->primary = NULL;
1202
1203	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1204		list_del(&other->node);
1205		netif_dbg(other, probe, other->net_dev,
1206			  "moving to unassociated list\n");
1207		list_add_tail(&other->node, &ef4_unassociated_list);
1208		other->primary = NULL;
1209	}
1210}
1211
1212/* This configures the PCI device to enable I/O and DMA. */
1213static int ef4_init_io(struct ef4_nic *efx)
1214{
1215	struct pci_dev *pci_dev = efx->pci_dev;
1216	dma_addr_t dma_mask = efx->type->max_dma_mask;
1217	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1218	int rc, bar;
1219
1220	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1221
1222	bar = efx->type->mem_bar;
1223
1224	rc = pci_enable_device(pci_dev);
1225	if (rc) {
1226		netif_err(efx, probe, efx->net_dev,
1227			  "failed to enable PCI device\n");
1228		goto fail1;
1229	}
1230
1231	pci_set_master(pci_dev);
1232
1233	/* Set the PCI DMA mask.  Try all possibilities from our genuine mask
1234	 * down to 32 bits, because some architectures will allow 40 bit
1235	 * masks event though they reject 46 bit masks.
1236	 */
1237	while (dma_mask > 0x7fffffffUL) {
1238		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1239		if (rc == 0)
1240			break;
1241		dma_mask >>= 1;
1242	}
1243	if (rc) {
1244		netif_err(efx, probe, efx->net_dev,
1245			  "could not find a suitable DMA mask\n");
1246		goto fail2;
1247	}
1248	netif_dbg(efx, probe, efx->net_dev,
1249		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1250
1251	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1252	rc = pci_request_region(pci_dev, bar, "sfc");
1253	if (rc) {
1254		netif_err(efx, probe, efx->net_dev,
1255			  "request for memory BAR failed\n");
1256		rc = -EIO;
1257		goto fail3;
1258	}
1259	efx->membase = ioremap(efx->membase_phys, mem_map_size);
1260	if (!efx->membase) {
1261		netif_err(efx, probe, efx->net_dev,
1262			  "could not map memory BAR at %llx+%x\n",
1263			  (unsigned long long)efx->membase_phys, mem_map_size);
1264		rc = -ENOMEM;
1265		goto fail4;
1266	}
1267	netif_dbg(efx, probe, efx->net_dev,
1268		  "memory BAR at %llx+%x (virtual %p)\n",
1269		  (unsigned long long)efx->membase_phys, mem_map_size,
1270		  efx->membase);
1271
1272	return 0;
1273
1274 fail4:
1275	pci_release_region(efx->pci_dev, bar);
1276 fail3:
1277	efx->membase_phys = 0;
1278 fail2:
1279	pci_disable_device(efx->pci_dev);
1280 fail1:
1281	return rc;
1282}
1283
1284static void ef4_fini_io(struct ef4_nic *efx)
1285{
1286	int bar;
1287
1288	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1289
1290	if (efx->membase) {
1291		iounmap(efx->membase);
1292		efx->membase = NULL;
1293	}
1294
1295	if (efx->membase_phys) {
1296		bar = efx->type->mem_bar;
1297		pci_release_region(efx->pci_dev, bar);
1298		efx->membase_phys = 0;
1299	}
1300
1301	/* Don't disable bus-mastering if VFs are assigned */
1302	if (!pci_vfs_assigned(efx->pci_dev))
1303		pci_disable_device(efx->pci_dev);
1304}
1305
1306void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1307{
1308	size_t i;
1309
1310	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1311		efx->rx_indir_table[i] =
1312			ethtool_rxfh_indir_default(i, efx->rss_spread);
1313}
1314
1315static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1316{
1317	cpumask_var_t thread_mask;
1318	unsigned int count;
1319	int cpu;
1320
1321	if (rss_cpus) {
1322		count = rss_cpus;
1323	} else {
1324		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1325			netif_warn(efx, probe, efx->net_dev,
1326				   "RSS disabled due to allocation failure\n");
1327			return 1;
1328		}
1329
1330		count = 0;
1331		for_each_online_cpu(cpu) {
1332			if (!cpumask_test_cpu(cpu, thread_mask)) {
1333				++count;
1334				cpumask_or(thread_mask, thread_mask,
1335					   topology_sibling_cpumask(cpu));
1336			}
1337		}
1338
1339		free_cpumask_var(thread_mask);
1340	}
1341
1342	if (count > EF4_MAX_RX_QUEUES) {
1343		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1344			       "Reducing number of rx queues from %u to %u.\n",
1345			       count, EF4_MAX_RX_QUEUES);
1346		count = EF4_MAX_RX_QUEUES;
1347	}
1348
1349	return count;
1350}
1351
1352/* Probe the number and type of interrupts we are able to obtain, and
1353 * the resulting numbers of channels and RX queues.
1354 */
1355static int ef4_probe_interrupts(struct ef4_nic *efx)
1356{
1357	unsigned int extra_channels = 0;
1358	unsigned int i, j;
1359	int rc;
1360
1361	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
1362		if (efx->extra_channel_type[i])
1363			++extra_channels;
1364
1365	if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1366		struct msix_entry xentries[EF4_MAX_CHANNELS];
1367		unsigned int n_channels;
1368
1369		n_channels = ef4_wanted_parallelism(efx);
1370		if (ef4_separate_tx_channels)
1371			n_channels *= 2;
1372		n_channels += extra_channels;
1373		n_channels = min(n_channels, efx->max_channels);
1374
1375		for (i = 0; i < n_channels; i++)
1376			xentries[i].entry = i;
1377		rc = pci_enable_msix_range(efx->pci_dev,
1378					   xentries, 1, n_channels);
1379		if (rc < 0) {
1380			/* Fall back to single channel MSI */
1381			efx->interrupt_mode = EF4_INT_MODE_MSI;
1382			netif_err(efx, drv, efx->net_dev,
1383				  "could not enable MSI-X\n");
1384		} else if (rc < n_channels) {
1385			netif_err(efx, drv, efx->net_dev,
1386				  "WARNING: Insufficient MSI-X vectors"
1387				  " available (%d < %u).\n", rc, n_channels);
1388			netif_err(efx, drv, efx->net_dev,
1389				  "WARNING: Performance may be reduced.\n");
1390			n_channels = rc;
1391		}
1392
1393		if (rc > 0) {
1394			efx->n_channels = n_channels;
1395			if (n_channels > extra_channels)
1396				n_channels -= extra_channels;
1397			if (ef4_separate_tx_channels) {
1398				efx->n_tx_channels = min(max(n_channels / 2,
1399							     1U),
1400							 efx->max_tx_channels);
1401				efx->n_rx_channels = max(n_channels -
1402							 efx->n_tx_channels,
1403							 1U);
1404			} else {
1405				efx->n_tx_channels = min(n_channels,
1406							 efx->max_tx_channels);
1407				efx->n_rx_channels = n_channels;
1408			}
1409			for (i = 0; i < efx->n_channels; i++)
1410				ef4_get_channel(efx, i)->irq =
1411					xentries[i].vector;
1412		}
1413	}
1414
1415	/* Try single interrupt MSI */
1416	if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1417		efx->n_channels = 1;
1418		efx->n_rx_channels = 1;
1419		efx->n_tx_channels = 1;
1420		rc = pci_enable_msi(efx->pci_dev);
1421		if (rc == 0) {
1422			ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1423		} else {
1424			netif_err(efx, drv, efx->net_dev,
1425				  "could not enable MSI\n");
1426			efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1427		}
1428	}
1429
1430	/* Assume legacy interrupts */
1431	if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1432		efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1433		efx->n_rx_channels = 1;
1434		efx->n_tx_channels = 1;
1435		efx->legacy_irq = efx->pci_dev->irq;
1436	}
1437
1438	/* Assign extra channels if possible */
1439	j = efx->n_channels;
1440	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
1441		if (!efx->extra_channel_type[i])
1442			continue;
1443		if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1444		    efx->n_channels <= extra_channels) {
1445			efx->extra_channel_type[i]->handle_no_channel(efx);
1446		} else {
1447			--j;
1448			ef4_get_channel(efx, j)->type =
1449				efx->extra_channel_type[i];
1450		}
1451	}
1452
1453	efx->rss_spread = efx->n_rx_channels;
1454
1455	return 0;
1456}
1457
1458static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1459{
1460	struct ef4_channel *channel, *end_channel;
1461	int rc;
1462
1463	BUG_ON(efx->state == STATE_DISABLED);
1464
1465	efx->irq_soft_enabled = true;
1466	smp_wmb();
1467
1468	ef4_for_each_channel(channel, efx) {
1469		if (!channel->type->keep_eventq) {
1470			rc = ef4_init_eventq(channel);
1471			if (rc)
1472				goto fail;
1473		}
1474		ef4_start_eventq(channel);
1475	}
1476
1477	return 0;
1478fail:
1479	end_channel = channel;
1480	ef4_for_each_channel(channel, efx) {
1481		if (channel == end_channel)
1482			break;
1483		ef4_stop_eventq(channel);
1484		if (!channel->type->keep_eventq)
1485			ef4_fini_eventq(channel);
1486	}
1487
1488	return rc;
1489}
1490
1491static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1492{
1493	struct ef4_channel *channel;
1494
1495	if (efx->state == STATE_DISABLED)
1496		return;
1497
1498	efx->irq_soft_enabled = false;
1499	smp_wmb();
1500
1501	if (efx->legacy_irq)
1502		synchronize_irq(efx->legacy_irq);
1503
1504	ef4_for_each_channel(channel, efx) {
1505		if (channel->irq)
1506			synchronize_irq(channel->irq);
1507
1508		ef4_stop_eventq(channel);
1509		if (!channel->type->keep_eventq)
1510			ef4_fini_eventq(channel);
1511	}
1512}
1513
1514static int ef4_enable_interrupts(struct ef4_nic *efx)
1515{
1516	struct ef4_channel *channel, *end_channel;
1517	int rc;
1518
1519	BUG_ON(efx->state == STATE_DISABLED);
1520
1521	if (efx->eeh_disabled_legacy_irq) {
1522		enable_irq(efx->legacy_irq);
1523		efx->eeh_disabled_legacy_irq = false;
1524	}
1525
1526	efx->type->irq_enable_master(efx);
1527
1528	ef4_for_each_channel(channel, efx) {
1529		if (channel->type->keep_eventq) {
1530			rc = ef4_init_eventq(channel);
1531			if (rc)
1532				goto fail;
1533		}
1534	}
1535
1536	rc = ef4_soft_enable_interrupts(efx);
1537	if (rc)
1538		goto fail;
1539
1540	return 0;
1541
1542fail:
1543	end_channel = channel;
1544	ef4_for_each_channel(channel, efx) {
1545		if (channel == end_channel)
1546			break;
1547		if (channel->type->keep_eventq)
1548			ef4_fini_eventq(channel);
1549	}
1550
1551	efx->type->irq_disable_non_ev(efx);
1552
1553	return rc;
1554}
1555
1556static void ef4_disable_interrupts(struct ef4_nic *efx)
1557{
1558	struct ef4_channel *channel;
1559
1560	ef4_soft_disable_interrupts(efx);
1561
1562	ef4_for_each_channel(channel, efx) {
1563		if (channel->type->keep_eventq)
1564			ef4_fini_eventq(channel);
1565	}
1566
1567	efx->type->irq_disable_non_ev(efx);
1568}
1569
1570static void ef4_remove_interrupts(struct ef4_nic *efx)
1571{
1572	struct ef4_channel *channel;
1573
1574	/* Remove MSI/MSI-X interrupts */
1575	ef4_for_each_channel(channel, efx)
1576		channel->irq = 0;
1577	pci_disable_msi(efx->pci_dev);
1578	pci_disable_msix(efx->pci_dev);
1579
1580	/* Remove legacy interrupt */
1581	efx->legacy_irq = 0;
1582}
1583
1584static void ef4_set_channels(struct ef4_nic *efx)
1585{
1586	struct ef4_channel *channel;
1587	struct ef4_tx_queue *tx_queue;
1588
1589	efx->tx_channel_offset =
1590		ef4_separate_tx_channels ?
1591		efx->n_channels - efx->n_tx_channels : 0;
1592
1593	/* We need to mark which channels really have RX and TX
1594	 * queues, and adjust the TX queue numbers if we have separate
1595	 * RX-only and TX-only channels.
1596	 */
1597	ef4_for_each_channel(channel, efx) {
1598		if (channel->channel < efx->n_rx_channels)
1599			channel->rx_queue.core_index = channel->channel;
1600		else
1601			channel->rx_queue.core_index = -1;
1602
1603		ef4_for_each_channel_tx_queue(tx_queue, channel)
1604			tx_queue->queue -= (efx->tx_channel_offset *
1605					    EF4_TXQ_TYPES);
1606	}
1607}
1608
1609static int ef4_probe_nic(struct ef4_nic *efx)
1610{
1611	int rc;
1612
1613	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1614
1615	/* Carry out hardware-type specific initialisation */
1616	rc = efx->type->probe(efx);
1617	if (rc)
1618		return rc;
1619
1620	do {
1621		if (!efx->max_channels || !efx->max_tx_channels) {
1622			netif_err(efx, drv, efx->net_dev,
1623				  "Insufficient resources to allocate"
1624				  " any channels\n");
1625			rc = -ENOSPC;
1626			goto fail1;
1627		}
1628
1629		/* Determine the number of channels and queues by trying
1630		 * to hook in MSI-X interrupts.
1631		 */
1632		rc = ef4_probe_interrupts(efx);
1633		if (rc)
1634			goto fail1;
1635
1636		ef4_set_channels(efx);
1637
1638		/* dimension_resources can fail with EAGAIN */
1639		rc = efx->type->dimension_resources(efx);
1640		if (rc != 0 && rc != -EAGAIN)
1641			goto fail2;
1642
1643		if (rc == -EAGAIN)
1644			/* try again with new max_channels */
1645			ef4_remove_interrupts(efx);
1646
1647	} while (rc == -EAGAIN);
1648
1649	if (efx->n_channels > 1)
1650		netdev_rss_key_fill(&efx->rx_hash_key,
1651				    sizeof(efx->rx_hash_key));
1652	ef4_set_default_rx_indir_table(efx);
1653
1654	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1655	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1656
1657	/* Initialise the interrupt moderation settings */
1658	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1659	ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1660				true);
1661
1662	return 0;
1663
1664fail2:
1665	ef4_remove_interrupts(efx);
1666fail1:
1667	efx->type->remove(efx);
1668	return rc;
1669}
1670
1671static void ef4_remove_nic(struct ef4_nic *efx)
1672{
1673	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1674
1675	ef4_remove_interrupts(efx);
1676	efx->type->remove(efx);
1677}
1678
1679static int ef4_probe_filters(struct ef4_nic *efx)
1680{
1681	int rc;
1682
1683	spin_lock_init(&efx->filter_lock);
1684	init_rwsem(&efx->filter_sem);
1685	mutex_lock(&efx->mac_lock);
1686	down_write(&efx->filter_sem);
1687	rc = efx->type->filter_table_probe(efx);
1688	if (rc)
1689		goto out_unlock;
1690
1691#ifdef CONFIG_RFS_ACCEL
1692	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1693		struct ef4_channel *channel;
1694		int i, success = 1;
1695
1696		ef4_for_each_channel(channel, efx) {
1697			channel->rps_flow_id =
1698				kcalloc(efx->type->max_rx_ip_filters,
1699					sizeof(*channel->rps_flow_id),
1700					GFP_KERNEL);
1701			if (!channel->rps_flow_id)
1702				success = 0;
1703			else
1704				for (i = 0;
1705				     i < efx->type->max_rx_ip_filters;
1706				     ++i)
1707					channel->rps_flow_id[i] =
1708						RPS_FLOW_ID_INVALID;
1709		}
1710
1711		if (!success) {
1712			ef4_for_each_channel(channel, efx)
1713				kfree(channel->rps_flow_id);
1714			efx->type->filter_table_remove(efx);
1715			rc = -ENOMEM;
1716			goto out_unlock;
1717		}
1718
1719		efx->rps_expire_index = efx->rps_expire_channel = 0;
1720	}
1721#endif
1722out_unlock:
1723	up_write(&efx->filter_sem);
1724	mutex_unlock(&efx->mac_lock);
1725	return rc;
1726}
1727
1728static void ef4_remove_filters(struct ef4_nic *efx)
1729{
1730#ifdef CONFIG_RFS_ACCEL
1731	struct ef4_channel *channel;
1732
1733	ef4_for_each_channel(channel, efx)
1734		kfree(channel->rps_flow_id);
1735#endif
1736	down_write(&efx->filter_sem);
1737	efx->type->filter_table_remove(efx);
1738	up_write(&efx->filter_sem);
1739}
1740
1741static void ef4_restore_filters(struct ef4_nic *efx)
1742{
1743	down_read(&efx->filter_sem);
1744	efx->type->filter_table_restore(efx);
1745	up_read(&efx->filter_sem);
1746}
1747
1748/**************************************************************************
1749 *
1750 * NIC startup/shutdown
1751 *
1752 *************************************************************************/
1753
1754static int ef4_probe_all(struct ef4_nic *efx)
1755{
1756	int rc;
1757
1758	rc = ef4_probe_nic(efx);
1759	if (rc) {
1760		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1761		goto fail1;
1762	}
1763
1764	rc = ef4_probe_port(efx);
1765	if (rc) {
1766		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1767		goto fail2;
1768	}
1769
1770	BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
1771	if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1772		rc = -EINVAL;
1773		goto fail3;
1774	}
1775	efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1776
1777	rc = ef4_probe_filters(efx);
1778	if (rc) {
1779		netif_err(efx, probe, efx->net_dev,
1780			  "failed to create filter tables\n");
1781		goto fail4;
1782	}
1783
1784	rc = ef4_probe_channels(efx);
1785	if (rc)
1786		goto fail5;
1787
1788	return 0;
1789
1790 fail5:
1791	ef4_remove_filters(efx);
1792 fail4:
1793 fail3:
1794	ef4_remove_port(efx);
1795 fail2:
1796	ef4_remove_nic(efx);
1797 fail1:
1798	return rc;
1799}
1800
1801/* If the interface is supposed to be running but is not, start
1802 * the hardware and software data path, regular activity for the port
1803 * (MAC statistics, link polling, etc.) and schedule the port to be
1804 * reconfigured.  Interrupts must already be enabled.  This function
1805 * is safe to call multiple times, so long as the NIC is not disabled.
1806 * Requires the RTNL lock.
1807 */
1808static void ef4_start_all(struct ef4_nic *efx)
1809{
1810	EF4_ASSERT_RESET_SERIALISED(efx);
1811	BUG_ON(efx->state == STATE_DISABLED);
1812
1813	/* Check that it is appropriate to restart the interface. All
1814	 * of these flags are safe to read under just the rtnl lock */
1815	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1816	    efx->reset_pending)
1817		return;
1818
1819	ef4_start_port(efx);
1820	ef4_start_datapath(efx);
1821
1822	/* Start the hardware monitor if there is one */
1823	if (efx->type->monitor != NULL)
1824		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1825				   ef4_monitor_interval);
1826
1827	efx->type->start_stats(efx);
1828	efx->type->pull_stats(efx);
1829	spin_lock_bh(&efx->stats_lock);
1830	efx->type->update_stats(efx, NULL, NULL);
1831	spin_unlock_bh(&efx->stats_lock);
1832}
1833
1834/* Quiesce the hardware and software data path, and regular activity
1835 * for the port without bringing the link down.  Safe to call multiple
1836 * times with the NIC in almost any state, but interrupts should be
1837 * enabled.  Requires the RTNL lock.
1838 */
1839static void ef4_stop_all(struct ef4_nic *efx)
1840{
1841	EF4_ASSERT_RESET_SERIALISED(efx);
1842
1843	/* port_enabled can be read safely under the rtnl lock */
1844	if (!efx->port_enabled)
1845		return;
1846
1847	/* update stats before we go down so we can accurately count
1848	 * rx_nodesc_drops
1849	 */
1850	efx->type->pull_stats(efx);
1851	spin_lock_bh(&efx->stats_lock);
1852	efx->type->update_stats(efx, NULL, NULL);
1853	spin_unlock_bh(&efx->stats_lock);
1854	efx->type->stop_stats(efx);
1855	ef4_stop_port(efx);
1856
1857	/* Stop the kernel transmit interface.  This is only valid if
1858	 * the device is stopped or detached; otherwise the watchdog
1859	 * may fire immediately.
1860	 */
1861	WARN_ON(netif_running(efx->net_dev) &&
1862		netif_device_present(efx->net_dev));
1863	netif_tx_disable(efx->net_dev);
1864
1865	ef4_stop_datapath(efx);
1866}
1867
1868static void ef4_remove_all(struct ef4_nic *efx)
1869{
1870	ef4_remove_channels(efx);
1871	ef4_remove_filters(efx);
1872	ef4_remove_port(efx);
1873	ef4_remove_nic(efx);
1874}
1875
1876/**************************************************************************
1877 *
1878 * Interrupt moderation
1879 *
1880 **************************************************************************/
1881unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1882{
1883	if (usecs == 0)
1884		return 0;
1885	if (usecs * 1000 < efx->timer_quantum_ns)
1886		return 1; /* never round down to 0 */
1887	return usecs * 1000 / efx->timer_quantum_ns;
1888}
1889
1890unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
1891{
1892	/* We must round up when converting ticks to microseconds
1893	 * because we round down when converting the other way.
1894	 */
1895	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
1896}
1897
1898/* Set interrupt moderation parameters */
1899int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1900			    unsigned int rx_usecs, bool rx_adaptive,
1901			    bool rx_may_override_tx)
1902{
1903	struct ef4_channel *channel;
1904	unsigned int timer_max_us;
1905
1906	EF4_ASSERT_RESET_SERIALISED(efx);
1907
1908	timer_max_us = efx->timer_max_ns / 1000;
1909
1910	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
1911		return -EINVAL;
1912
1913	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1914	    !rx_may_override_tx) {
1915		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1916			  "RX and TX IRQ moderation must be equal\n");
1917		return -EINVAL;
1918	}
1919
1920	efx->irq_rx_adaptive = rx_adaptive;
1921	efx->irq_rx_moderation_us = rx_usecs;
1922	ef4_for_each_channel(channel, efx) {
1923		if (ef4_channel_has_rx_queue(channel))
1924			channel->irq_moderation_us = rx_usecs;
1925		else if (ef4_channel_has_tx_queues(channel))
1926			channel->irq_moderation_us = tx_usecs;
1927	}
1928
1929	return 0;
1930}
1931
1932void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1933			    unsigned int *rx_usecs, bool *rx_adaptive)
1934{
1935	*rx_adaptive = efx->irq_rx_adaptive;
1936	*rx_usecs = efx->irq_rx_moderation_us;
1937
1938	/* If channels are shared between RX and TX, so is IRQ
1939	 * moderation.  Otherwise, IRQ moderation is the same for all
1940	 * TX channels and is not adaptive.
1941	 */
1942	if (efx->tx_channel_offset == 0) {
1943		*tx_usecs = *rx_usecs;
1944	} else {
1945		struct ef4_channel *tx_channel;
1946
1947		tx_channel = efx->channel[efx->tx_channel_offset];
1948		*tx_usecs = tx_channel->irq_moderation_us;
1949	}
1950}
1951
1952/**************************************************************************
1953 *
1954 * Hardware monitor
1955 *
1956 **************************************************************************/
1957
1958/* Run periodically off the general workqueue */
1959static void ef4_monitor(struct work_struct *data)
1960{
1961	struct ef4_nic *efx = container_of(data, struct ef4_nic,
1962					   monitor_work.work);
1963
1964	netif_vdbg(efx, timer, efx->net_dev,
1965		   "hardware monitor executing on CPU %d\n",
1966		   raw_smp_processor_id());
1967	BUG_ON(efx->type->monitor == NULL);
1968
1969	/* If the mac_lock is already held then it is likely a port
1970	 * reconfiguration is already in place, which will likely do
1971	 * most of the work of monitor() anyway. */
1972	if (mutex_trylock(&efx->mac_lock)) {
1973		if (efx->port_enabled)
1974			efx->type->monitor(efx);
1975		mutex_unlock(&efx->mac_lock);
1976	}
1977
1978	queue_delayed_work(efx->workqueue, &efx->monitor_work,
1979			   ef4_monitor_interval);
1980}
1981
1982/**************************************************************************
1983 *
1984 * ioctls
1985 *
1986 *************************************************************************/
1987
1988/* Net device ioctl
1989 * Context: process, rtnl_lock() held.
1990 */
1991static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1992{
1993	struct ef4_nic *efx = netdev_priv(net_dev);
1994	struct mii_ioctl_data *data = if_mii(ifr);
1995
1996	/* Convert phy_id from older PRTAD/DEVAD format */
1997	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1998	    (data->phy_id & 0xfc00) == 0x0400)
1999		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2000
2001	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2002}
2003
2004/**************************************************************************
2005 *
2006 * NAPI interface
2007 *
2008 **************************************************************************/
2009
2010static void ef4_init_napi_channel(struct ef4_channel *channel)
2011{
2012	struct ef4_nic *efx = channel->efx;
2013
2014	channel->napi_dev = efx->net_dev;
2015	netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
2016}
2017
2018static void ef4_init_napi(struct ef4_nic *efx)
2019{
2020	struct ef4_channel *channel;
2021
2022	ef4_for_each_channel(channel, efx)
2023		ef4_init_napi_channel(channel);
2024}
2025
2026static void ef4_fini_napi_channel(struct ef4_channel *channel)
2027{
2028	if (channel->napi_dev)
2029		netif_napi_del(&channel->napi_str);
2030
2031	channel->napi_dev = NULL;
2032}
2033
2034static void ef4_fini_napi(struct ef4_nic *efx)
2035{
2036	struct ef4_channel *channel;
2037
2038	ef4_for_each_channel(channel, efx)
2039		ef4_fini_napi_channel(channel);
2040}
2041
2042/**************************************************************************
2043 *
2044 * Kernel net device interface
2045 *
2046 *************************************************************************/
2047
2048/* Context: process, rtnl_lock() held. */
2049int ef4_net_open(struct net_device *net_dev)
2050{
2051	struct ef4_nic *efx = netdev_priv(net_dev);
2052	int rc;
2053
2054	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2055		  raw_smp_processor_id());
2056
2057	rc = ef4_check_disabled(efx);
2058	if (rc)
2059		return rc;
2060	if (efx->phy_mode & PHY_MODE_SPECIAL)
2061		return -EBUSY;
2062
2063	/* Notify the kernel of the link state polled during driver load,
2064	 * before the monitor starts running */
2065	ef4_link_status_changed(efx);
2066
2067	ef4_start_all(efx);
2068	ef4_selftest_async_start(efx);
2069	return 0;
2070}
2071
2072/* Context: process, rtnl_lock() held.
2073 * Note that the kernel will ignore our return code; this method
2074 * should really be a void.
2075 */
2076int ef4_net_stop(struct net_device *net_dev)
2077{
2078	struct ef4_nic *efx = netdev_priv(net_dev);
2079
2080	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2081		  raw_smp_processor_id());
2082
2083	/* Stop the device and flush all the channels */
2084	ef4_stop_all(efx);
2085
2086	return 0;
2087}
2088
2089/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2090static void ef4_net_stats(struct net_device *net_dev,
2091			  struct rtnl_link_stats64 *stats)
2092{
2093	struct ef4_nic *efx = netdev_priv(net_dev);
2094
2095	spin_lock_bh(&efx->stats_lock);
2096	efx->type->update_stats(efx, NULL, stats);
2097	spin_unlock_bh(&efx->stats_lock);
2098}
2099
2100/* Context: netif_tx_lock held, BHs disabled. */
2101static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
2102{
2103	struct ef4_nic *efx = netdev_priv(net_dev);
2104
2105	netif_err(efx, tx_err, efx->net_dev,
2106		  "TX stuck with port_enabled=%d: resetting channels\n",
2107		  efx->port_enabled);
2108
2109	ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2110}
2111
2112
2113/* Context: process, rtnl_lock() held. */
2114static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
2115{
2116	struct ef4_nic *efx = netdev_priv(net_dev);
2117	int rc;
2118
2119	rc = ef4_check_disabled(efx);
2120	if (rc)
2121		return rc;
2122
2123	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2124
2125	ef4_device_detach_sync(efx);
2126	ef4_stop_all(efx);
2127
2128	mutex_lock(&efx->mac_lock);
2129	net_dev->mtu = new_mtu;
2130	ef4_mac_reconfigure(efx);
2131	mutex_unlock(&efx->mac_lock);
2132
2133	ef4_start_all(efx);
2134	netif_device_attach(efx->net_dev);
2135	return 0;
2136}
2137
2138static int ef4_set_mac_address(struct net_device *net_dev, void *data)
2139{
2140	struct ef4_nic *efx = netdev_priv(net_dev);
2141	struct sockaddr *addr = data;
2142	u8 *new_addr = addr->sa_data;
2143	u8 old_addr[6];
2144	int rc;
2145
2146	if (!is_valid_ether_addr(new_addr)) {
2147		netif_err(efx, drv, efx->net_dev,
2148			  "invalid ethernet MAC address requested: %pM\n",
2149			  new_addr);
2150		return -EADDRNOTAVAIL;
2151	}
2152
2153	/* save old address */
2154	ether_addr_copy(old_addr, net_dev->dev_addr);
2155	eth_hw_addr_set(net_dev, new_addr);
2156	if (efx->type->set_mac_address) {
2157		rc = efx->type->set_mac_address(efx);
2158		if (rc) {
2159			eth_hw_addr_set(net_dev, old_addr);
2160			return rc;
2161		}
2162	}
2163
2164	/* Reconfigure the MAC */
2165	mutex_lock(&efx->mac_lock);
2166	ef4_mac_reconfigure(efx);
2167	mutex_unlock(&efx->mac_lock);
2168
2169	return 0;
2170}
2171
2172/* Context: netif_addr_lock held, BHs disabled. */
2173static void ef4_set_rx_mode(struct net_device *net_dev)
2174{
2175	struct ef4_nic *efx = netdev_priv(net_dev);
2176
2177	if (efx->port_enabled)
2178		queue_work(efx->workqueue, &efx->mac_work);
2179	/* Otherwise ef4_start_port() will do this */
2180}
2181
2182static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
2183{
2184	struct ef4_nic *efx = netdev_priv(net_dev);
2185	int rc;
2186
2187	/* If disabling RX n-tuple filtering, clear existing filters */
2188	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2189		rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2190		if (rc)
2191			return rc;
2192	}
2193
2194	/* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2195	if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2196		/* ef4_set_rx_mode() will schedule MAC work to update filters
2197		 * when a new features are finally set in net_dev.
2198		 */
2199		ef4_set_rx_mode(net_dev);
2200	}
2201
2202	return 0;
2203}
2204
2205static const struct net_device_ops ef4_netdev_ops = {
2206	.ndo_open		= ef4_net_open,
2207	.ndo_stop		= ef4_net_stop,
2208	.ndo_get_stats64	= ef4_net_stats,
2209	.ndo_tx_timeout		= ef4_watchdog,
2210	.ndo_start_xmit		= ef4_hard_start_xmit,
2211	.ndo_validate_addr	= eth_validate_addr,
2212	.ndo_eth_ioctl		= ef4_ioctl,
2213	.ndo_change_mtu		= ef4_change_mtu,
2214	.ndo_set_mac_address	= ef4_set_mac_address,
2215	.ndo_set_rx_mode	= ef4_set_rx_mode,
2216	.ndo_set_features	= ef4_set_features,
2217	.ndo_setup_tc		= ef4_setup_tc,
2218#ifdef CONFIG_RFS_ACCEL
2219	.ndo_rx_flow_steer	= ef4_filter_rfs,
2220#endif
2221};
2222
2223static void ef4_update_name(struct ef4_nic *efx)
2224{
2225	strcpy(efx->name, efx->net_dev->name);
2226	ef4_mtd_rename(efx);
2227	ef4_set_channel_names(efx);
2228}
2229
2230static int ef4_netdev_event(struct notifier_block *this,
2231			    unsigned long event, void *ptr)
2232{
2233	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2234
2235	if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
2236	    event == NETDEV_CHANGENAME)
2237		ef4_update_name(netdev_priv(net_dev));
2238
2239	return NOTIFY_DONE;
2240}
2241
2242static struct notifier_block ef4_netdev_notifier = {
2243	.notifier_call = ef4_netdev_event,
2244};
2245
2246static ssize_t
2247phy_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2248{
2249	struct ef4_nic *efx = dev_get_drvdata(dev);
2250	return sprintf(buf, "%d\n", efx->phy_type);
2251}
2252static DEVICE_ATTR_RO(phy_type);
2253
2254static int ef4_register_netdev(struct ef4_nic *efx)
2255{
2256	struct net_device *net_dev = efx->net_dev;
2257	struct ef4_channel *channel;
2258	int rc;
2259
2260	net_dev->watchdog_timeo = 5 * HZ;
2261	net_dev->irq = efx->pci_dev->irq;
2262	net_dev->netdev_ops = &ef4_netdev_ops;
2263	net_dev->ethtool_ops = &ef4_ethtool_ops;
2264	netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
2265	net_dev->min_mtu = EF4_MIN_MTU;
2266	net_dev->max_mtu = EF4_MAX_MTU;
2267
2268	rtnl_lock();
2269
2270	/* Enable resets to be scheduled and check whether any were
2271	 * already requested.  If so, the NIC is probably hosed so we
2272	 * abort.
2273	 */
2274	efx->state = STATE_READY;
2275	smp_mb(); /* ensure we change state before checking reset_pending */
2276	if (efx->reset_pending) {
2277		netif_err(efx, probe, efx->net_dev,
2278			  "aborting probe due to scheduled reset\n");
2279		rc = -EIO;
2280		goto fail_locked;
2281	}
2282
2283	rc = dev_alloc_name(net_dev, net_dev->name);
2284	if (rc < 0)
2285		goto fail_locked;
2286	ef4_update_name(efx);
2287
2288	/* Always start with carrier off; PHY events will detect the link */
2289	netif_carrier_off(net_dev);
2290
2291	rc = register_netdevice(net_dev);
2292	if (rc)
2293		goto fail_locked;
2294
2295	ef4_for_each_channel(channel, efx) {
2296		struct ef4_tx_queue *tx_queue;
2297		ef4_for_each_channel_tx_queue(tx_queue, channel)
2298			ef4_init_tx_queue_core_txq(tx_queue);
2299	}
2300
2301	ef4_associate(efx);
2302
2303	rtnl_unlock();
2304
2305	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2306	if (rc) {
2307		netif_err(efx, drv, efx->net_dev,
2308			  "failed to init net dev attributes\n");
2309		goto fail_registered;
2310	}
2311	return 0;
2312
2313fail_registered:
2314	rtnl_lock();
2315	ef4_dissociate(efx);
2316	unregister_netdevice(net_dev);
2317fail_locked:
2318	efx->state = STATE_UNINIT;
2319	rtnl_unlock();
2320	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2321	return rc;
2322}
2323
2324static void ef4_unregister_netdev(struct ef4_nic *efx)
2325{
2326	if (!efx->net_dev)
2327		return;
2328
2329	BUG_ON(netdev_priv(efx->net_dev) != efx);
2330
2331	if (ef4_dev_registered(efx)) {
2332		strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2333		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2334		unregister_netdev(efx->net_dev);
2335	}
2336}
2337
2338/**************************************************************************
2339 *
2340 * Device reset and suspend
2341 *
2342 **************************************************************************/
2343
2344/* Tears down the entire software state and most of the hardware state
2345 * before reset.  */
2346void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2347{
2348	EF4_ASSERT_RESET_SERIALISED(efx);
2349
2350	ef4_stop_all(efx);
2351	ef4_disable_interrupts(efx);
2352
2353	mutex_lock(&efx->mac_lock);
2354	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2355	    method != RESET_TYPE_DATAPATH)
2356		efx->phy_op->fini(efx);
2357	efx->type->fini(efx);
2358}
2359
2360/* This function will always ensure that the locks acquired in
2361 * ef4_reset_down() are released. A failure return code indicates
2362 * that we were unable to reinitialise the hardware, and the
2363 * driver should be disabled. If ok is false, then the rx and tx
2364 * engines are not restarted, pending a RESET_DISABLE. */
2365int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2366{
2367	int rc;
2368
2369	EF4_ASSERT_RESET_SERIALISED(efx);
2370
2371	/* Ensure that SRAM is initialised even if we're disabling the device */
2372	rc = efx->type->init(efx);
2373	if (rc) {
2374		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2375		goto fail;
2376	}
2377
2378	if (!ok)
2379		goto fail;
2380
2381	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2382	    method != RESET_TYPE_DATAPATH) {
2383		rc = efx->phy_op->init(efx);
2384		if (rc)
2385			goto fail;
2386		rc = efx->phy_op->reconfigure(efx);
2387		if (rc && rc != -EPERM)
2388			netif_err(efx, drv, efx->net_dev,
2389				  "could not restore PHY settings\n");
2390	}
2391
2392	rc = ef4_enable_interrupts(efx);
2393	if (rc)
2394		goto fail;
2395
2396	down_read(&efx->filter_sem);
2397	ef4_restore_filters(efx);
2398	up_read(&efx->filter_sem);
2399
2400	mutex_unlock(&efx->mac_lock);
2401
2402	ef4_start_all(efx);
2403
2404	return 0;
2405
2406fail:
2407	efx->port_initialized = false;
2408
2409	mutex_unlock(&efx->mac_lock);
2410
2411	return rc;
2412}
2413
2414/* Reset the NIC using the specified method.  Note that the reset may
2415 * fail, in which case the card will be left in an unusable state.
2416 *
2417 * Caller must hold the rtnl_lock.
2418 */
2419int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2420{
2421	int rc, rc2;
2422	bool disabled;
2423
2424	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2425		   RESET_TYPE(method));
2426
2427	ef4_device_detach_sync(efx);
2428	ef4_reset_down(efx, method);
2429
2430	rc = efx->type->reset(efx, method);
2431	if (rc) {
2432		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2433		goto out;
2434	}
2435
2436	/* Clear flags for the scopes we covered.  We assume the NIC and
2437	 * driver are now quiescent so that there is no race here.
2438	 */
2439	if (method < RESET_TYPE_MAX_METHOD)
2440		efx->reset_pending &= -(1 << (method + 1));
2441	else /* it doesn't fit into the well-ordered scope hierarchy */
2442		__clear_bit(method, &efx->reset_pending);
2443
2444	/* Reinitialise bus-mastering, which may have been turned off before
2445	 * the reset was scheduled. This is still appropriate, even in the
2446	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2447	 * can respond to requests. */
2448	pci_set_master(efx->pci_dev);
2449
2450out:
2451	/* Leave device stopped if necessary */
2452	disabled = rc ||
2453		method == RESET_TYPE_DISABLE ||
2454		method == RESET_TYPE_RECOVER_OR_DISABLE;
2455	rc2 = ef4_reset_up(efx, method, !disabled);
2456	if (rc2) {
2457		disabled = true;
2458		if (!rc)
2459			rc = rc2;
2460	}
2461
2462	if (disabled) {
2463		dev_close(efx->net_dev);
2464		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2465		efx->state = STATE_DISABLED;
2466	} else {
2467		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2468		netif_device_attach(efx->net_dev);
2469	}
2470	return rc;
2471}
2472
2473/* Try recovery mechanisms.
2474 * For now only EEH is supported.
2475 * Returns 0 if the recovery mechanisms are unsuccessful.
2476 * Returns a non-zero value otherwise.
2477 */
2478int ef4_try_recovery(struct ef4_nic *efx)
2479{
2480#ifdef CONFIG_EEH
2481	/* A PCI error can occur and not be seen by EEH because nothing
2482	 * happens on the PCI bus. In this case the driver may fail and
2483	 * schedule a 'recover or reset', leading to this recovery handler.
2484	 * Manually call the eeh failure check function.
2485	 */
2486	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2487	if (eeh_dev_check_failure(eehdev)) {
2488		/* The EEH mechanisms will handle the error and reset the
2489		 * device if necessary.
2490		 */
2491		return 1;
2492	}
2493#endif
2494	return 0;
2495}
2496
2497/* The worker thread exists so that code that cannot sleep can
2498 * schedule a reset for later.
2499 */
2500static void ef4_reset_work(struct work_struct *data)
2501{
2502	struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2503	unsigned long pending;
2504	enum reset_type method;
2505
2506	pending = READ_ONCE(efx->reset_pending);
2507	method = fls(pending) - 1;
2508
2509	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2510	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2511	    ef4_try_recovery(efx))
2512		return;
2513
2514	if (!pending)
2515		return;
2516
2517	rtnl_lock();
2518
2519	/* We checked the state in ef4_schedule_reset() but it may
2520	 * have changed by now.  Now that we have the RTNL lock,
2521	 * it cannot change again.
2522	 */
2523	if (efx->state == STATE_READY)
2524		(void)ef4_reset(efx, method);
2525
2526	rtnl_unlock();
2527}
2528
2529void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2530{
2531	enum reset_type method;
2532
2533	if (efx->state == STATE_RECOVERY) {
2534		netif_dbg(efx, drv, efx->net_dev,
2535			  "recovering: skip scheduling %s reset\n",
2536			  RESET_TYPE(type));
2537		return;
2538	}
2539
2540	switch (type) {
2541	case RESET_TYPE_INVISIBLE:
2542	case RESET_TYPE_ALL:
2543	case RESET_TYPE_RECOVER_OR_ALL:
2544	case RESET_TYPE_WORLD:
2545	case RESET_TYPE_DISABLE:
2546	case RESET_TYPE_RECOVER_OR_DISABLE:
2547	case RESET_TYPE_DATAPATH:
2548		method = type;
2549		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2550			  RESET_TYPE(method));
2551		break;
2552	default:
2553		method = efx->type->map_reset_reason(type);
2554		netif_dbg(efx, drv, efx->net_dev,
2555			  "scheduling %s reset for %s\n",
2556			  RESET_TYPE(method), RESET_TYPE(type));
2557		break;
2558	}
2559
2560	set_bit(method, &efx->reset_pending);
2561	smp_mb(); /* ensure we change reset_pending before checking state */
2562
2563	/* If we're not READY then just leave the flags set as the cue
2564	 * to abort probing or reschedule the reset later.
2565	 */
2566	if (READ_ONCE(efx->state) != STATE_READY)
2567		return;
2568
2569	queue_work(reset_workqueue, &efx->reset_work);
2570}
2571
2572/**************************************************************************
2573 *
2574 * List of NICs we support
2575 *
2576 **************************************************************************/
2577
2578/* PCI device ID table */
2579static const struct pci_device_id ef4_pci_table[] = {
2580	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2581		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2582	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2583	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2584		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2585	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2586	{0}			/* end of list */
2587};
2588
2589/**************************************************************************
2590 *
2591 * Dummy PHY/MAC operations
2592 *
2593 * Can be used for some unimplemented operations
2594 * Needed so all function pointers are valid and do not have to be tested
2595 * before use
2596 *
2597 **************************************************************************/
2598int ef4_port_dummy_op_int(struct ef4_nic *efx)
2599{
2600	return 0;
2601}
2602void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2603
2604static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2605{
2606	return false;
2607}
2608
2609static const struct ef4_phy_operations ef4_dummy_phy_operations = {
2610	.init		 = ef4_port_dummy_op_int,
2611	.reconfigure	 = ef4_port_dummy_op_int,
2612	.poll		 = ef4_port_dummy_op_poll,
2613	.fini		 = ef4_port_dummy_op_void,
2614};
2615
2616/**************************************************************************
2617 *
2618 * Data housekeeping
2619 *
2620 **************************************************************************/
2621
2622/* This zeroes out and then fills in the invariants in a struct
2623 * ef4_nic (including all sub-structures).
2624 */
2625static int ef4_init_struct(struct ef4_nic *efx,
2626			   struct pci_dev *pci_dev, struct net_device *net_dev)
2627{
2628	int i;
2629
2630	/* Initialise common structures */
2631	INIT_LIST_HEAD(&efx->node);
2632	INIT_LIST_HEAD(&efx->secondary_list);
2633	spin_lock_init(&efx->biu_lock);
2634#ifdef CONFIG_SFC_FALCON_MTD
2635	INIT_LIST_HEAD(&efx->mtd_list);
2636#endif
2637	INIT_WORK(&efx->reset_work, ef4_reset_work);
2638	INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2639	INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2640	efx->pci_dev = pci_dev;
2641	efx->msg_enable = debug;
2642	efx->state = STATE_UNINIT;
2643	strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2644
2645	efx->net_dev = net_dev;
2646	efx->rx_prefix_size = efx->type->rx_prefix_size;
2647	efx->rx_ip_align =
2648		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2649	efx->rx_packet_hash_offset =
2650		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2651	efx->rx_packet_ts_offset =
2652		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2653	spin_lock_init(&efx->stats_lock);
2654	mutex_init(&efx->mac_lock);
2655	efx->phy_op = &ef4_dummy_phy_operations;
2656	efx->mdio.dev = net_dev;
2657	INIT_WORK(&efx->mac_work, ef4_mac_work);
2658	init_waitqueue_head(&efx->flush_wq);
2659
2660	for (i = 0; i < EF4_MAX_CHANNELS; i++) {
2661		efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2662		if (!efx->channel[i])
2663			goto fail;
2664		efx->msi_context[i].efx = efx;
2665		efx->msi_context[i].index = i;
2666	}
2667
2668	/* Higher numbered interrupt modes are less capable! */
2669	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2670				  interrupt_mode);
2671
2672	/* Would be good to use the net_dev name, but we're too early */
2673	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2674		 pci_name(pci_dev));
2675	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2676	if (!efx->workqueue)
2677		goto fail;
2678
2679	return 0;
2680
2681fail:
2682	ef4_fini_struct(efx);
2683	return -ENOMEM;
2684}
2685
2686static void ef4_fini_struct(struct ef4_nic *efx)
2687{
2688	int i;
2689
2690	for (i = 0; i < EF4_MAX_CHANNELS; i++)
2691		kfree(efx->channel[i]);
2692
2693	kfree(efx->vpd_sn);
2694
2695	if (efx->workqueue) {
2696		destroy_workqueue(efx->workqueue);
2697		efx->workqueue = NULL;
2698	}
2699}
2700
2701void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2702{
2703	u64 n_rx_nodesc_trunc = 0;
2704	struct ef4_channel *channel;
2705
2706	ef4_for_each_channel(channel, efx)
2707		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2708	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2709	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2710}
2711
2712/**************************************************************************
2713 *
2714 * PCI interface
2715 *
2716 **************************************************************************/
2717
2718/* Main body of final NIC shutdown code
2719 * This is called only at module unload (or hotplug removal).
2720 */
2721static void ef4_pci_remove_main(struct ef4_nic *efx)
2722{
2723	/* Flush reset_work. It can no longer be scheduled since we
2724	 * are not READY.
2725	 */
2726	BUG_ON(efx->state == STATE_READY);
2727	cancel_work_sync(&efx->reset_work);
2728
2729	ef4_disable_interrupts(efx);
2730	ef4_nic_fini_interrupt(efx);
2731	ef4_fini_port(efx);
2732	efx->type->fini(efx);
2733	ef4_fini_napi(efx);
2734	ef4_remove_all(efx);
2735}
2736
2737/* Final NIC shutdown
2738 * This is called only at module unload (or hotplug removal).  A PF can call
2739 * this on its VFs to ensure they are unbound first.
2740 */
2741static void ef4_pci_remove(struct pci_dev *pci_dev)
2742{
2743	struct ef4_nic *efx;
2744
2745	efx = pci_get_drvdata(pci_dev);
2746	if (!efx)
2747		return;
2748
2749	/* Mark the NIC as fini, then stop the interface */
2750	rtnl_lock();
2751	ef4_dissociate(efx);
2752	dev_close(efx->net_dev);
2753	ef4_disable_interrupts(efx);
2754	efx->state = STATE_UNINIT;
2755	rtnl_unlock();
2756
2757	ef4_unregister_netdev(efx);
2758
2759	ef4_mtd_remove(efx);
2760
2761	ef4_pci_remove_main(efx);
2762
2763	ef4_fini_io(efx);
2764	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2765
2766	ef4_fini_struct(efx);
2767	free_netdev(efx->net_dev);
2768
2769	pci_disable_pcie_error_reporting(pci_dev);
2770};
2771
2772/* NIC VPD information
2773 * Called during probe to display the part number of the installed NIC.
2774 */
2775static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2776{
2777	struct pci_dev *dev = efx->pci_dev;
2778	unsigned int vpd_size, kw_len;
2779	u8 *vpd_data;
2780	int start;
2781
2782	vpd_data = pci_vpd_alloc(dev, &vpd_size);
2783	if (IS_ERR(vpd_data)) {
2784		pci_warn(dev, "Unable to read VPD\n");
2785		return;
2786	}
2787
2788	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2789					     PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
2790	if (start < 0)
2791		pci_warn(dev, "Part number not found or incomplete\n");
2792	else
2793		pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
2794
2795	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2796					     PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
2797	if (start < 0)
2798		pci_warn(dev, "Serial number not found or incomplete\n");
2799	else
2800		efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
2801
2802	kfree(vpd_data);
2803}
2804
2805
2806/* Main body of NIC initialisation
2807 * This is called at module load (or hotplug insertion, theoretically).
2808 */
2809static int ef4_pci_probe_main(struct ef4_nic *efx)
2810{
2811	int rc;
2812
2813	/* Do start-of-day initialisation */
2814	rc = ef4_probe_all(efx);
2815	if (rc)
2816		goto fail1;
2817
2818	ef4_init_napi(efx);
2819
2820	rc = efx->type->init(efx);
2821	if (rc) {
2822		netif_err(efx, probe, efx->net_dev,
2823			  "failed to initialise NIC\n");
2824		goto fail3;
2825	}
2826
2827	rc = ef4_init_port(efx);
2828	if (rc) {
2829		netif_err(efx, probe, efx->net_dev,
2830			  "failed to initialise port\n");
2831		goto fail4;
2832	}
2833
2834	rc = ef4_nic_init_interrupt(efx);
2835	if (rc)
2836		goto fail5;
2837	rc = ef4_enable_interrupts(efx);
2838	if (rc)
2839		goto fail6;
2840
2841	return 0;
2842
2843 fail6:
2844	ef4_nic_fini_interrupt(efx);
2845 fail5:
2846	ef4_fini_port(efx);
2847 fail4:
2848	efx->type->fini(efx);
2849 fail3:
2850	ef4_fini_napi(efx);
2851	ef4_remove_all(efx);
2852 fail1:
2853	return rc;
2854}
2855
2856/* NIC initialisation
2857 *
2858 * This is called at module load (or hotplug insertion,
2859 * theoretically).  It sets up PCI mappings, resets the NIC,
2860 * sets up and registers the network devices with the kernel and hooks
2861 * the interrupt service routine.  It does not prepare the device for
2862 * transmission; this is left to the first time one of the network
2863 * interfaces is brought up (i.e. ef4_net_open).
2864 */
2865static int ef4_pci_probe(struct pci_dev *pci_dev,
2866			 const struct pci_device_id *entry)
2867{
2868	struct net_device *net_dev;
2869	struct ef4_nic *efx;
2870	int rc;
2871
2872	/* Allocate and initialise a struct net_device and struct ef4_nic */
2873	net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2874				     EF4_MAX_RX_QUEUES);
2875	if (!net_dev)
2876		return -ENOMEM;
2877	efx = netdev_priv(net_dev);
2878	efx->type = (const struct ef4_nic_type *) entry->driver_data;
2879	efx->fixed_features |= NETIF_F_HIGHDMA;
2880
2881	pci_set_drvdata(pci_dev, efx);
2882	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2883	rc = ef4_init_struct(efx, pci_dev, net_dev);
2884	if (rc)
2885		goto fail1;
2886
2887	netif_info(efx, probe, efx->net_dev,
2888		   "Solarflare NIC detected\n");
2889
2890	ef4_probe_vpd_strings(efx);
2891
2892	/* Set up basic I/O (BAR mappings etc) */
2893	rc = ef4_init_io(efx);
2894	if (rc)
2895		goto fail2;
2896
2897	rc = ef4_pci_probe_main(efx);
2898	if (rc)
2899		goto fail3;
2900
2901	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2902			      NETIF_F_RXCSUM);
2903	/* Mask for features that also apply to VLAN devices */
2904	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
2905				   NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
2906
2907	net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2908
2909	/* Disable VLAN filtering by default.  It may be enforced if
2910	 * the feature is fixed (i.e. VLAN filters are required to
2911	 * receive VLAN tagged packets due to vPort restrictions).
2912	 */
2913	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2914	net_dev->features |= efx->fixed_features;
2915
2916	rc = ef4_register_netdev(efx);
2917	if (rc)
2918		goto fail4;
2919
2920	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2921
2922	/* Try to create MTDs, but allow this to fail */
2923	rtnl_lock();
2924	rc = ef4_mtd_probe(efx);
2925	rtnl_unlock();
2926	if (rc && rc != -EPERM)
2927		netif_warn(efx, probe, efx->net_dev,
2928			   "failed to create MTDs (%d)\n", rc);
2929
2930	rc = pci_enable_pcie_error_reporting(pci_dev);
2931	if (rc && rc != -EINVAL)
2932		netif_notice(efx, probe, efx->net_dev,
2933			     "PCIE error reporting unavailable (%d).\n",
2934			     rc);
2935
2936	return 0;
2937
2938 fail4:
2939	ef4_pci_remove_main(efx);
2940 fail3:
2941	ef4_fini_io(efx);
2942 fail2:
2943	ef4_fini_struct(efx);
2944 fail1:
2945	WARN_ON(rc > 0);
2946	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2947	free_netdev(net_dev);
2948	return rc;
2949}
2950
2951static int ef4_pm_freeze(struct device *dev)
2952{
2953	struct ef4_nic *efx = dev_get_drvdata(dev);
2954
2955	rtnl_lock();
2956
2957	if (efx->state != STATE_DISABLED) {
2958		efx->state = STATE_UNINIT;
2959
2960		ef4_device_detach_sync(efx);
2961
2962		ef4_stop_all(efx);
2963		ef4_disable_interrupts(efx);
2964	}
2965
2966	rtnl_unlock();
2967
2968	return 0;
2969}
2970
2971static int ef4_pm_thaw(struct device *dev)
2972{
2973	int rc;
2974	struct ef4_nic *efx = dev_get_drvdata(dev);
2975
2976	rtnl_lock();
2977
2978	if (efx->state != STATE_DISABLED) {
2979		rc = ef4_enable_interrupts(efx);
2980		if (rc)
2981			goto fail;
2982
2983		mutex_lock(&efx->mac_lock);
2984		efx->phy_op->reconfigure(efx);
2985		mutex_unlock(&efx->mac_lock);
2986
2987		ef4_start_all(efx);
2988
2989		netif_device_attach(efx->net_dev);
2990
2991		efx->state = STATE_READY;
2992
2993		efx->type->resume_wol(efx);
2994	}
2995
2996	rtnl_unlock();
2997
2998	/* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
2999	queue_work(reset_workqueue, &efx->reset_work);
3000
3001	return 0;
3002
3003fail:
3004	rtnl_unlock();
3005
3006	return rc;
3007}
3008
3009static int ef4_pm_poweroff(struct device *dev)
3010{
3011	struct pci_dev *pci_dev = to_pci_dev(dev);
3012	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3013
3014	efx->type->fini(efx);
3015
3016	efx->reset_pending = 0;
3017
3018	pci_save_state(pci_dev);
3019	return pci_set_power_state(pci_dev, PCI_D3hot);
3020}
3021
3022/* Used for both resume and restore */
3023static int ef4_pm_resume(struct device *dev)
3024{
3025	struct pci_dev *pci_dev = to_pci_dev(dev);
3026	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3027	int rc;
3028
3029	rc = pci_set_power_state(pci_dev, PCI_D0);
3030	if (rc)
3031		return rc;
3032	pci_restore_state(pci_dev);
3033	rc = pci_enable_device(pci_dev);
3034	if (rc)
3035		return rc;
3036	pci_set_master(efx->pci_dev);
3037	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3038	if (rc)
3039		return rc;
3040	rc = efx->type->init(efx);
3041	if (rc)
3042		return rc;
3043	rc = ef4_pm_thaw(dev);
3044	return rc;
3045}
3046
3047static int ef4_pm_suspend(struct device *dev)
3048{
3049	int rc;
3050
3051	ef4_pm_freeze(dev);
3052	rc = ef4_pm_poweroff(dev);
3053	if (rc)
3054		ef4_pm_resume(dev);
3055	return rc;
3056}
3057
3058static const struct dev_pm_ops ef4_pm_ops = {
3059	.suspend	= ef4_pm_suspend,
3060	.resume		= ef4_pm_resume,
3061	.freeze		= ef4_pm_freeze,
3062	.thaw		= ef4_pm_thaw,
3063	.poweroff	= ef4_pm_poweroff,
3064	.restore	= ef4_pm_resume,
3065};
3066
3067/* A PCI error affecting this device was detected.
3068 * At this point MMIO and DMA may be disabled.
3069 * Stop the software path and request a slot reset.
3070 */
3071static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
3072					      pci_channel_state_t state)
3073{
3074	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3075	struct ef4_nic *efx = pci_get_drvdata(pdev);
3076
3077	if (state == pci_channel_io_perm_failure)
3078		return PCI_ERS_RESULT_DISCONNECT;
3079
3080	rtnl_lock();
3081
3082	if (efx->state != STATE_DISABLED) {
3083		efx->state = STATE_RECOVERY;
3084		efx->reset_pending = 0;
3085
3086		ef4_device_detach_sync(efx);
3087
3088		ef4_stop_all(efx);
3089		ef4_disable_interrupts(efx);
3090
3091		status = PCI_ERS_RESULT_NEED_RESET;
3092	} else {
3093		/* If the interface is disabled we don't want to do anything
3094		 * with it.
3095		 */
3096		status = PCI_ERS_RESULT_RECOVERED;
3097	}
3098
3099	rtnl_unlock();
3100
3101	pci_disable_device(pdev);
3102
3103	return status;
3104}
3105
3106/* Fake a successful reset, which will be performed later in ef4_io_resume. */
3107static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
3108{
3109	struct ef4_nic *efx = pci_get_drvdata(pdev);
3110	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3111
3112	if (pci_enable_device(pdev)) {
3113		netif_err(efx, hw, efx->net_dev,
3114			  "Cannot re-enable PCI device after reset.\n");
3115		status =  PCI_ERS_RESULT_DISCONNECT;
3116	}
3117
3118	return status;
3119}
3120
3121/* Perform the actual reset and resume I/O operations. */
3122static void ef4_io_resume(struct pci_dev *pdev)
3123{
3124	struct ef4_nic *efx = pci_get_drvdata(pdev);
3125	int rc;
3126
3127	rtnl_lock();
3128
3129	if (efx->state == STATE_DISABLED)
3130		goto out;
3131
3132	rc = ef4_reset(efx, RESET_TYPE_ALL);
3133	if (rc) {
3134		netif_err(efx, hw, efx->net_dev,
3135			  "ef4_reset failed after PCI error (%d)\n", rc);
3136	} else {
3137		efx->state = STATE_READY;
3138		netif_dbg(efx, hw, efx->net_dev,
3139			  "Done resetting and resuming IO after PCI error.\n");
3140	}
3141
3142out:
3143	rtnl_unlock();
3144}
3145
3146/* For simplicity and reliability, we always require a slot reset and try to
3147 * reset the hardware when a pci error affecting the device is detected.
3148 * We leave both the link_reset and mmio_enabled callback unimplemented:
3149 * with our request for slot reset the mmio_enabled callback will never be
3150 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3151 */
3152static const struct pci_error_handlers ef4_err_handlers = {
3153	.error_detected = ef4_io_error_detected,
3154	.slot_reset	= ef4_io_slot_reset,
3155	.resume		= ef4_io_resume,
3156};
3157
3158static struct pci_driver ef4_pci_driver = {
3159	.name		= KBUILD_MODNAME,
3160	.id_table	= ef4_pci_table,
3161	.probe		= ef4_pci_probe,
3162	.remove		= ef4_pci_remove,
3163	.driver.pm	= &ef4_pm_ops,
3164	.err_handler	= &ef4_err_handlers,
3165};
3166
3167/**************************************************************************
3168 *
3169 * Kernel module interface
3170 *
3171 *************************************************************************/
3172
3173module_param(interrupt_mode, uint, 0444);
3174MODULE_PARM_DESC(interrupt_mode,
3175		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3176
3177static int __init ef4_init_module(void)
3178{
3179	int rc;
3180
3181	printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
3182
3183	rc = register_netdevice_notifier(&ef4_netdev_notifier);
3184	if (rc)
3185		goto err_notifier;
3186
3187	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3188	if (!reset_workqueue) {
3189		rc = -ENOMEM;
3190		goto err_reset;
3191	}
3192
3193	rc = pci_register_driver(&ef4_pci_driver);
3194	if (rc < 0)
3195		goto err_pci;
3196
3197	return 0;
3198
3199 err_pci:
3200	destroy_workqueue(reset_workqueue);
3201 err_reset:
3202	unregister_netdevice_notifier(&ef4_netdev_notifier);
3203 err_notifier:
3204	return rc;
3205}
3206
3207static void __exit ef4_exit_module(void)
3208{
3209	printk(KERN_INFO "Solarflare Falcon driver unloading\n");
3210
3211	pci_unregister_driver(&ef4_pci_driver);
3212	destroy_workqueue(reset_workqueue);
3213	unregister_netdevice_notifier(&ef4_netdev_notifier);
3214
3215}
3216
3217module_init(ef4_init_module);
3218module_exit(ef4_exit_module);
3219
3220MODULE_AUTHOR("Solarflare Communications and "
3221	      "Michael Brown <mbrown@fensystems.co.uk>");
3222MODULE_DESCRIPTION("Solarflare Falcon network driver");
3223MODULE_LICENSE("GPL");
3224MODULE_DEVICE_TABLE(pci, ef4_pci_table);
3225MODULE_VERSION(EF4_DRIVER_VERSION);