Linux Audio

Check our new training course

Loading...
v4.17
 
   1/****************************************************************************
   2 * Driver for Solarflare network controllers and boards
   3 * Copyright 2005-2006 Fen Systems Ltd.
   4 * Copyright 2005-2013 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/pci.h>
  13#include <linux/netdevice.h>
  14#include <linux/etherdevice.h>
  15#include <linux/delay.h>
  16#include <linux/notifier.h>
  17#include <linux/ip.h>
  18#include <linux/tcp.h>
  19#include <linux/in.h>
  20#include <linux/ethtool.h>
  21#include <linux/topology.h>
  22#include <linux/gfp.h>
  23#include <linux/aer.h>
  24#include <linux/interrupt.h>
  25#include "net_driver.h"
  26#include "efx.h"
  27#include "nic.h"
  28#include "selftest.h"
  29
  30#include "workarounds.h"
  31
  32/**************************************************************************
  33 *
  34 * Type name strings
  35 *
  36 **************************************************************************
  37 */
  38
  39/* Loopback mode names (see LOOPBACK_MODE()) */
  40const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
  41const char *const ef4_loopback_mode_names[] = {
  42	[LOOPBACK_NONE]		= "NONE",
  43	[LOOPBACK_DATA]		= "DATAPATH",
  44	[LOOPBACK_GMAC]		= "GMAC",
  45	[LOOPBACK_XGMII]	= "XGMII",
  46	[LOOPBACK_XGXS]		= "XGXS",
  47	[LOOPBACK_XAUI]		= "XAUI",
  48	[LOOPBACK_GMII]		= "GMII",
  49	[LOOPBACK_SGMII]	= "SGMII",
  50	[LOOPBACK_XGBR]		= "XGBR",
  51	[LOOPBACK_XFI]		= "XFI",
  52	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
  53	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
  54	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
  55	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
  56	[LOOPBACK_GPHY]		= "GPHY",
  57	[LOOPBACK_PHYXS]	= "PHYXS",
  58	[LOOPBACK_PCS]		= "PCS",
  59	[LOOPBACK_PMAPMD]	= "PMA/PMD",
  60	[LOOPBACK_XPORT]	= "XPORT",
  61	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
  62	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
  63	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
  64	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  65	[LOOPBACK_GMII_WS]	= "GMII_WS",
  66	[LOOPBACK_XFI_WS]	= "XFI_WS",
  67	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
  68	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
  69};
  70
  71const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
  72const char *const ef4_reset_type_names[] = {
  73	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
  74	[RESET_TYPE_ALL]                = "ALL",
  75	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
  76	[RESET_TYPE_WORLD]              = "WORLD",
  77	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
  78	[RESET_TYPE_DATAPATH]           = "DATAPATH",
  79	[RESET_TYPE_DISABLE]            = "DISABLE",
  80	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
  81	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
  82	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
  83	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
  84	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
  85};
  86
  87/* Reset workqueue. If any NIC has a hardware failure then a reset will be
  88 * queued onto this work queue. This is not a per-nic work queue, because
  89 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  90 */
  91static struct workqueue_struct *reset_workqueue;
  92
  93/* How often and how many times to poll for a reset while waiting for a
  94 * BIST that another function started to complete.
  95 */
  96#define BIST_WAIT_DELAY_MS	100
  97#define BIST_WAIT_DELAY_COUNT	100
  98
  99/**************************************************************************
 100 *
 101 * Configurable values
 102 *
 103 *************************************************************************/
 104
 105/*
 106 * Use separate channels for TX and RX events
 107 *
 108 * Set this to 1 to use separate channels for TX and RX. It allows us
 109 * to control interrupt affinity separately for TX and RX.
 110 *
 111 * This is only used in MSI-X interrupt mode
 112 */
 113bool ef4_separate_tx_channels;
 114module_param(ef4_separate_tx_channels, bool, 0444);
 115MODULE_PARM_DESC(ef4_separate_tx_channels,
 116		 "Use separate channels for TX and RX");
 117
 118/* This is the weight assigned to each of the (per-channel) virtual
 119 * NAPI devices.
 120 */
 121static int napi_weight = 64;
 122
 123/* This is the time (in jiffies) between invocations of the hardware
 124 * monitor.
 125 * On Falcon-based NICs, this will:
 126 * - Check the on-board hardware monitor;
 127 * - Poll the link state and reconfigure the hardware as necessary.
 128 * On Siena-based NICs for power systems with EEH support, this will give EEH a
 129 * chance to start.
 130 */
 131static unsigned int ef4_monitor_interval = 1 * HZ;
 132
 133/* Initial interrupt moderation settings.  They can be modified after
 134 * module load with ethtool.
 135 *
 136 * The default for RX should strike a balance between increasing the
 137 * round-trip latency and reducing overhead.
 138 */
 139static unsigned int rx_irq_mod_usec = 60;
 140
 141/* Initial interrupt moderation settings.  They can be modified after
 142 * module load with ethtool.
 143 *
 144 * This default is chosen to ensure that a 10G link does not go idle
 145 * while a TX queue is stopped after it has become full.  A queue is
 146 * restarted when it drops below half full.  The time this takes (assuming
 147 * worst case 3 descriptors per packet and 1024 descriptors) is
 148 *   512 / 3 * 1.2 = 205 usec.
 149 */
 150static unsigned int tx_irq_mod_usec = 150;
 151
 152/* This is the first interrupt mode to try out of:
 153 * 0 => MSI-X
 154 * 1 => MSI
 155 * 2 => legacy
 156 */
 157static unsigned int interrupt_mode;
 158
 159/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 160 * i.e. the number of CPUs among which we may distribute simultaneous
 161 * interrupt handling.
 162 *
 163 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
 164 * The default (0) means to assign an interrupt to each core.
 165 */
 166static unsigned int rss_cpus;
 167module_param(rss_cpus, uint, 0444);
 168MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
 169
 170static bool phy_flash_cfg;
 171module_param(phy_flash_cfg, bool, 0644);
 172MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
 173
 174static unsigned irq_adapt_low_thresh = 8000;
 175module_param(irq_adapt_low_thresh, uint, 0644);
 176MODULE_PARM_DESC(irq_adapt_low_thresh,
 177		 "Threshold score for reducing IRQ moderation");
 178
 179static unsigned irq_adapt_high_thresh = 16000;
 180module_param(irq_adapt_high_thresh, uint, 0644);
 181MODULE_PARM_DESC(irq_adapt_high_thresh,
 182		 "Threshold score for increasing IRQ moderation");
 183
 184static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 185			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
 186			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
 187			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
 188module_param(debug, uint, 0);
 189MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 190
 191/**************************************************************************
 192 *
 193 * Utility functions and prototypes
 194 *
 195 *************************************************************************/
 196
 197static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
 198static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
 199static void ef4_remove_channel(struct ef4_channel *channel);
 200static void ef4_remove_channels(struct ef4_nic *efx);
 201static const struct ef4_channel_type ef4_default_channel_type;
 202static void ef4_remove_port(struct ef4_nic *efx);
 203static void ef4_init_napi_channel(struct ef4_channel *channel);
 204static void ef4_fini_napi(struct ef4_nic *efx);
 205static void ef4_fini_napi_channel(struct ef4_channel *channel);
 206static void ef4_fini_struct(struct ef4_nic *efx);
 207static void ef4_start_all(struct ef4_nic *efx);
 208static void ef4_stop_all(struct ef4_nic *efx);
 209
 210#define EF4_ASSERT_RESET_SERIALISED(efx)		\
 211	do {						\
 212		if ((efx->state == STATE_READY) ||	\
 213		    (efx->state == STATE_RECOVERY) ||	\
 214		    (efx->state == STATE_DISABLED))	\
 215			ASSERT_RTNL();			\
 216	} while (0)
 217
 218static int ef4_check_disabled(struct ef4_nic *efx)
 219{
 220	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
 221		netif_err(efx, drv, efx->net_dev,
 222			  "device is disabled due to earlier errors\n");
 223		return -EIO;
 224	}
 225	return 0;
 226}
 227
 228/**************************************************************************
 229 *
 230 * Event queue processing
 231 *
 232 *************************************************************************/
 233
 234/* Process channel's event queue
 235 *
 236 * This function is responsible for processing the event queue of a
 237 * single channel.  The caller must guarantee that this function will
 238 * never be concurrently called more than once on the same channel,
 239 * though different channels may be being processed concurrently.
 240 */
 241static int ef4_process_channel(struct ef4_channel *channel, int budget)
 242{
 243	struct ef4_tx_queue *tx_queue;
 244	int spent;
 245
 246	if (unlikely(!channel->enabled))
 247		return 0;
 248
 249	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 250		tx_queue->pkts_compl = 0;
 251		tx_queue->bytes_compl = 0;
 252	}
 253
 254	spent = ef4_nic_process_eventq(channel, budget);
 255	if (spent && ef4_channel_has_rx_queue(channel)) {
 256		struct ef4_rx_queue *rx_queue =
 257			ef4_channel_get_rx_queue(channel);
 258
 259		ef4_rx_flush_packet(channel);
 260		ef4_fast_push_rx_descriptors(rx_queue, true);
 261	}
 262
 263	/* Update BQL */
 264	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 265		if (tx_queue->bytes_compl) {
 266			netdev_tx_completed_queue(tx_queue->core_txq,
 267				tx_queue->pkts_compl, tx_queue->bytes_compl);
 268		}
 269	}
 270
 271	return spent;
 272}
 273
 274/* NAPI poll handler
 275 *
 276 * NAPI guarantees serialisation of polls of the same device, which
 277 * provides the guarantee required by ef4_process_channel().
 278 */
 279static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
 280{
 281	int step = efx->irq_mod_step_us;
 282
 283	if (channel->irq_mod_score < irq_adapt_low_thresh) {
 284		if (channel->irq_moderation_us > step) {
 285			channel->irq_moderation_us -= step;
 286			efx->type->push_irq_moderation(channel);
 287		}
 288	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
 289		if (channel->irq_moderation_us <
 290		    efx->irq_rx_moderation_us) {
 291			channel->irq_moderation_us += step;
 292			efx->type->push_irq_moderation(channel);
 293		}
 294	}
 295
 296	channel->irq_count = 0;
 297	channel->irq_mod_score = 0;
 298}
 299
 300static int ef4_poll(struct napi_struct *napi, int budget)
 301{
 302	struct ef4_channel *channel =
 303		container_of(napi, struct ef4_channel, napi_str);
 304	struct ef4_nic *efx = channel->efx;
 305	int spent;
 306
 307	netif_vdbg(efx, intr, efx->net_dev,
 308		   "channel %d NAPI poll executing on CPU %d\n",
 309		   channel->channel, raw_smp_processor_id());
 310
 311	spent = ef4_process_channel(channel, budget);
 312
 313	if (spent < budget) {
 314		if (ef4_channel_has_rx_queue(channel) &&
 315		    efx->irq_rx_adaptive &&
 316		    unlikely(++channel->irq_count == 1000)) {
 317			ef4_update_irq_mod(efx, channel);
 318		}
 319
 320		ef4_filter_rfs_expire(channel);
 321
 322		/* There is no race here; although napi_disable() will
 323		 * only wait for napi_complete(), this isn't a problem
 324		 * since ef4_nic_eventq_read_ack() will have no effect if
 325		 * interrupts have already been disabled.
 326		 */
 327		napi_complete_done(napi, spent);
 328		ef4_nic_eventq_read_ack(channel);
 329	}
 330
 331	return spent;
 332}
 333
 334/* Create event queue
 335 * Event queue memory allocations are done only once.  If the channel
 336 * is reset, the memory buffer will be reused; this guards against
 337 * errors during channel reset and also simplifies interrupt handling.
 338 */
 339static int ef4_probe_eventq(struct ef4_channel *channel)
 340{
 341	struct ef4_nic *efx = channel->efx;
 342	unsigned long entries;
 343
 344	netif_dbg(efx, probe, efx->net_dev,
 345		  "chan %d create event queue\n", channel->channel);
 346
 347	/* Build an event queue with room for one event per tx and rx buffer,
 348	 * plus some extra for link state events and MCDI completions. */
 349	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
 350	EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
 351	channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
 352
 353	return ef4_nic_probe_eventq(channel);
 354}
 355
 356/* Prepare channel's event queue */
 357static int ef4_init_eventq(struct ef4_channel *channel)
 358{
 359	struct ef4_nic *efx = channel->efx;
 360	int rc;
 361
 362	EF4_WARN_ON_PARANOID(channel->eventq_init);
 363
 364	netif_dbg(efx, drv, efx->net_dev,
 365		  "chan %d init event queue\n", channel->channel);
 366
 367	rc = ef4_nic_init_eventq(channel);
 368	if (rc == 0) {
 369		efx->type->push_irq_moderation(channel);
 370		channel->eventq_read_ptr = 0;
 371		channel->eventq_init = true;
 372	}
 373	return rc;
 374}
 375
 376/* Enable event queue processing and NAPI */
 377void ef4_start_eventq(struct ef4_channel *channel)
 378{
 379	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
 380		  "chan %d start event queue\n", channel->channel);
 381
 382	/* Make sure the NAPI handler sees the enabled flag set */
 383	channel->enabled = true;
 384	smp_wmb();
 385
 386	napi_enable(&channel->napi_str);
 387	ef4_nic_eventq_read_ack(channel);
 388}
 389
 390/* Disable event queue processing and NAPI */
 391void ef4_stop_eventq(struct ef4_channel *channel)
 392{
 393	if (!channel->enabled)
 394		return;
 395
 396	napi_disable(&channel->napi_str);
 397	channel->enabled = false;
 398}
 399
 400static void ef4_fini_eventq(struct ef4_channel *channel)
 401{
 402	if (!channel->eventq_init)
 403		return;
 404
 405	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 406		  "chan %d fini event queue\n", channel->channel);
 407
 408	ef4_nic_fini_eventq(channel);
 409	channel->eventq_init = false;
 410}
 411
 412static void ef4_remove_eventq(struct ef4_channel *channel)
 413{
 414	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 415		  "chan %d remove event queue\n", channel->channel);
 416
 417	ef4_nic_remove_eventq(channel);
 418}
 419
 420/**************************************************************************
 421 *
 422 * Channel handling
 423 *
 424 *************************************************************************/
 425
 426/* Allocate and initialise a channel structure. */
 427static struct ef4_channel *
 428ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
 429{
 430	struct ef4_channel *channel;
 431	struct ef4_rx_queue *rx_queue;
 432	struct ef4_tx_queue *tx_queue;
 433	int j;
 434
 435	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 436	if (!channel)
 437		return NULL;
 438
 439	channel->efx = efx;
 440	channel->channel = i;
 441	channel->type = &ef4_default_channel_type;
 442
 443	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 444		tx_queue = &channel->tx_queue[j];
 445		tx_queue->efx = efx;
 446		tx_queue->queue = i * EF4_TXQ_TYPES + j;
 447		tx_queue->channel = channel;
 448	}
 449
 450	rx_queue = &channel->rx_queue;
 451	rx_queue->efx = efx;
 452	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 453
 454	return channel;
 455}
 456
 457/* Allocate and initialise a channel structure, copying parameters
 458 * (but not resources) from an old channel structure.
 459 */
 460static struct ef4_channel *
 461ef4_copy_channel(const struct ef4_channel *old_channel)
 462{
 463	struct ef4_channel *channel;
 464	struct ef4_rx_queue *rx_queue;
 465	struct ef4_tx_queue *tx_queue;
 466	int j;
 467
 468	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
 469	if (!channel)
 470		return NULL;
 471
 472	*channel = *old_channel;
 473
 474	channel->napi_dev = NULL;
 475	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
 476	channel->napi_str.napi_id = 0;
 477	channel->napi_str.state = 0;
 478	memset(&channel->eventq, 0, sizeof(channel->eventq));
 479
 480	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 481		tx_queue = &channel->tx_queue[j];
 482		if (tx_queue->channel)
 483			tx_queue->channel = channel;
 484		tx_queue->buffer = NULL;
 485		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
 486	}
 487
 488	rx_queue = &channel->rx_queue;
 489	rx_queue->buffer = NULL;
 490	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
 491	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 492
 493	return channel;
 494}
 495
 496static int ef4_probe_channel(struct ef4_channel *channel)
 497{
 498	struct ef4_tx_queue *tx_queue;
 499	struct ef4_rx_queue *rx_queue;
 500	int rc;
 501
 502	netif_dbg(channel->efx, probe, channel->efx->net_dev,
 503		  "creating channel %d\n", channel->channel);
 504
 505	rc = channel->type->pre_probe(channel);
 506	if (rc)
 507		goto fail;
 508
 509	rc = ef4_probe_eventq(channel);
 510	if (rc)
 511		goto fail;
 512
 513	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 514		rc = ef4_probe_tx_queue(tx_queue);
 515		if (rc)
 516			goto fail;
 517	}
 518
 519	ef4_for_each_channel_rx_queue(rx_queue, channel) {
 520		rc = ef4_probe_rx_queue(rx_queue);
 521		if (rc)
 522			goto fail;
 523	}
 524
 525	return 0;
 526
 527fail:
 528	ef4_remove_channel(channel);
 529	return rc;
 530}
 531
 532static void
 533ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
 534{
 535	struct ef4_nic *efx = channel->efx;
 536	const char *type;
 537	int number;
 538
 539	number = channel->channel;
 540	if (efx->tx_channel_offset == 0) {
 541		type = "";
 542	} else if (channel->channel < efx->tx_channel_offset) {
 543		type = "-rx";
 544	} else {
 545		type = "-tx";
 546		number -= efx->tx_channel_offset;
 547	}
 548	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
 549}
 550
 551static void ef4_set_channel_names(struct ef4_nic *efx)
 552{
 553	struct ef4_channel *channel;
 554
 555	ef4_for_each_channel(channel, efx)
 556		channel->type->get_name(channel,
 557					efx->msi_context[channel->channel].name,
 558					sizeof(efx->msi_context[0].name));
 559}
 560
 561static int ef4_probe_channels(struct ef4_nic *efx)
 562{
 563	struct ef4_channel *channel;
 564	int rc;
 565
 566	/* Restart special buffer allocation */
 567	efx->next_buffer_table = 0;
 568
 569	/* Probe channels in reverse, so that any 'extra' channels
 570	 * use the start of the buffer table. This allows the traffic
 571	 * channels to be resized without moving them or wasting the
 572	 * entries before them.
 573	 */
 574	ef4_for_each_channel_rev(channel, efx) {
 575		rc = ef4_probe_channel(channel);
 576		if (rc) {
 577			netif_err(efx, probe, efx->net_dev,
 578				  "failed to create channel %d\n",
 579				  channel->channel);
 580			goto fail;
 581		}
 582	}
 583	ef4_set_channel_names(efx);
 584
 585	return 0;
 586
 587fail:
 588	ef4_remove_channels(efx);
 589	return rc;
 590}
 591
 592/* Channels are shutdown and reinitialised whilst the NIC is running
 593 * to propagate configuration changes (mtu, checksum offload), or
 594 * to clear hardware error conditions
 595 */
 596static void ef4_start_datapath(struct ef4_nic *efx)
 597{
 598	netdev_features_t old_features = efx->net_dev->features;
 599	bool old_rx_scatter = efx->rx_scatter;
 600	struct ef4_tx_queue *tx_queue;
 601	struct ef4_rx_queue *rx_queue;
 602	struct ef4_channel *channel;
 603	size_t rx_buf_len;
 604
 605	/* Calculate the rx buffer allocation parameters required to
 606	 * support the current MTU, including padding for header
 607	 * alignment and overruns.
 608	 */
 609	efx->rx_dma_len = (efx->rx_prefix_size +
 610			   EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
 611			   efx->type->rx_buffer_padding);
 612	rx_buf_len = (sizeof(struct ef4_rx_page_state) +
 613		      efx->rx_ip_align + efx->rx_dma_len);
 614	if (rx_buf_len <= PAGE_SIZE) {
 615		efx->rx_scatter = efx->type->always_rx_scatter;
 616		efx->rx_buffer_order = 0;
 617	} else if (efx->type->can_rx_scatter) {
 618		BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
 619		BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
 620			     2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
 621				       EF4_RX_BUF_ALIGNMENT) >
 622			     PAGE_SIZE);
 623		efx->rx_scatter = true;
 624		efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
 625		efx->rx_buffer_order = 0;
 626	} else {
 627		efx->rx_scatter = false;
 628		efx->rx_buffer_order = get_order(rx_buf_len);
 629	}
 630
 631	ef4_rx_config_page_split(efx);
 632	if (efx->rx_buffer_order)
 633		netif_dbg(efx, drv, efx->net_dev,
 634			  "RX buf len=%u; page order=%u batch=%u\n",
 635			  efx->rx_dma_len, efx->rx_buffer_order,
 636			  efx->rx_pages_per_batch);
 637	else
 638		netif_dbg(efx, drv, efx->net_dev,
 639			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
 640			  efx->rx_dma_len, efx->rx_page_buf_step,
 641			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
 642
 643	/* Restore previously fixed features in hw_features and remove
 644	 * features which are fixed now
 645	 */
 646	efx->net_dev->hw_features |= efx->net_dev->features;
 647	efx->net_dev->hw_features &= ~efx->fixed_features;
 648	efx->net_dev->features |= efx->fixed_features;
 649	if (efx->net_dev->features != old_features)
 650		netdev_features_change(efx->net_dev);
 651
 652	/* RX filters may also have scatter-enabled flags */
 653	if (efx->rx_scatter != old_rx_scatter)
 654		efx->type->filter_update_rx_scatter(efx);
 655
 656	/* We must keep at least one descriptor in a TX ring empty.
 657	 * We could avoid this when the queue size does not exactly
 658	 * match the hardware ring size, but it's not that important.
 659	 * Therefore we stop the queue when one more skb might fill
 660	 * the ring completely.  We wake it when half way back to
 661	 * empty.
 662	 */
 663	efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
 664	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
 665
 666	/* Initialise the channels */
 667	ef4_for_each_channel(channel, efx) {
 668		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 669			ef4_init_tx_queue(tx_queue);
 670			atomic_inc(&efx->active_queues);
 671		}
 672
 673		ef4_for_each_channel_rx_queue(rx_queue, channel) {
 674			ef4_init_rx_queue(rx_queue);
 675			atomic_inc(&efx->active_queues);
 676			ef4_stop_eventq(channel);
 677			ef4_fast_push_rx_descriptors(rx_queue, false);
 678			ef4_start_eventq(channel);
 679		}
 680
 681		WARN_ON(channel->rx_pkt_n_frags);
 682	}
 683
 684	if (netif_device_present(efx->net_dev))
 685		netif_tx_wake_all_queues(efx->net_dev);
 686}
 687
 688static void ef4_stop_datapath(struct ef4_nic *efx)
 689{
 690	struct ef4_channel *channel;
 691	struct ef4_tx_queue *tx_queue;
 692	struct ef4_rx_queue *rx_queue;
 693	int rc;
 694
 695	EF4_ASSERT_RESET_SERIALISED(efx);
 696	BUG_ON(efx->port_enabled);
 697
 698	/* Stop RX refill */
 699	ef4_for_each_channel(channel, efx) {
 700		ef4_for_each_channel_rx_queue(rx_queue, channel)
 701			rx_queue->refill_enabled = false;
 702	}
 703
 704	ef4_for_each_channel(channel, efx) {
 705		/* RX packet processing is pipelined, so wait for the
 706		 * NAPI handler to complete.  At least event queue 0
 707		 * might be kept active by non-data events, so don't
 708		 * use napi_synchronize() but actually disable NAPI
 709		 * temporarily.
 710		 */
 711		if (ef4_channel_has_rx_queue(channel)) {
 712			ef4_stop_eventq(channel);
 713			ef4_start_eventq(channel);
 714		}
 715	}
 716
 717	rc = efx->type->fini_dmaq(efx);
 718	if (rc && EF4_WORKAROUND_7803(efx)) {
 719		/* Schedule a reset to recover from the flush failure. The
 720		 * descriptor caches reference memory we're about to free,
 721		 * but falcon_reconfigure_mac_wrapper() won't reconnect
 722		 * the MACs because of the pending reset.
 723		 */
 724		netif_err(efx, drv, efx->net_dev,
 725			  "Resetting to recover from flush failure\n");
 726		ef4_schedule_reset(efx, RESET_TYPE_ALL);
 727	} else if (rc) {
 728		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 729	} else {
 730		netif_dbg(efx, drv, efx->net_dev,
 731			  "successfully flushed all queues\n");
 732	}
 733
 734	ef4_for_each_channel(channel, efx) {
 735		ef4_for_each_channel_rx_queue(rx_queue, channel)
 736			ef4_fini_rx_queue(rx_queue);
 737		ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 738			ef4_fini_tx_queue(tx_queue);
 739	}
 740}
 741
 742static void ef4_remove_channel(struct ef4_channel *channel)
 743{
 744	struct ef4_tx_queue *tx_queue;
 745	struct ef4_rx_queue *rx_queue;
 746
 747	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 748		  "destroy chan %d\n", channel->channel);
 749
 750	ef4_for_each_channel_rx_queue(rx_queue, channel)
 751		ef4_remove_rx_queue(rx_queue);
 752	ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 753		ef4_remove_tx_queue(tx_queue);
 754	ef4_remove_eventq(channel);
 755	channel->type->post_remove(channel);
 756}
 757
 758static void ef4_remove_channels(struct ef4_nic *efx)
 759{
 760	struct ef4_channel *channel;
 761
 762	ef4_for_each_channel(channel, efx)
 763		ef4_remove_channel(channel);
 764}
 765
 766int
 767ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
 768{
 769	struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
 770	u32 old_rxq_entries, old_txq_entries;
 771	unsigned i, next_buffer_table = 0;
 772	int rc, rc2;
 773
 774	rc = ef4_check_disabled(efx);
 775	if (rc)
 776		return rc;
 777
 778	/* Not all channels should be reallocated. We must avoid
 779	 * reallocating their buffer table entries.
 780	 */
 781	ef4_for_each_channel(channel, efx) {
 782		struct ef4_rx_queue *rx_queue;
 783		struct ef4_tx_queue *tx_queue;
 784
 785		if (channel->type->copy)
 786			continue;
 787		next_buffer_table = max(next_buffer_table,
 788					channel->eventq.index +
 789					channel->eventq.entries);
 790		ef4_for_each_channel_rx_queue(rx_queue, channel)
 791			next_buffer_table = max(next_buffer_table,
 792						rx_queue->rxd.index +
 793						rx_queue->rxd.entries);
 794		ef4_for_each_channel_tx_queue(tx_queue, channel)
 795			next_buffer_table = max(next_buffer_table,
 796						tx_queue->txd.index +
 797						tx_queue->txd.entries);
 798	}
 799
 800	ef4_device_detach_sync(efx);
 801	ef4_stop_all(efx);
 802	ef4_soft_disable_interrupts(efx);
 803
 804	/* Clone channels (where possible) */
 805	memset(other_channel, 0, sizeof(other_channel));
 806	for (i = 0; i < efx->n_channels; i++) {
 807		channel = efx->channel[i];
 808		if (channel->type->copy)
 809			channel = channel->type->copy(channel);
 810		if (!channel) {
 811			rc = -ENOMEM;
 812			goto out;
 813		}
 814		other_channel[i] = channel;
 815	}
 816
 817	/* Swap entry counts and channel pointers */
 818	old_rxq_entries = efx->rxq_entries;
 819	old_txq_entries = efx->txq_entries;
 820	efx->rxq_entries = rxq_entries;
 821	efx->txq_entries = txq_entries;
 822	for (i = 0; i < efx->n_channels; i++) {
 823		channel = efx->channel[i];
 824		efx->channel[i] = other_channel[i];
 825		other_channel[i] = channel;
 826	}
 827
 828	/* Restart buffer table allocation */
 829	efx->next_buffer_table = next_buffer_table;
 830
 831	for (i = 0; i < efx->n_channels; i++) {
 832		channel = efx->channel[i];
 833		if (!channel->type->copy)
 834			continue;
 835		rc = ef4_probe_channel(channel);
 836		if (rc)
 837			goto rollback;
 838		ef4_init_napi_channel(efx->channel[i]);
 839	}
 840
 841out:
 842	/* Destroy unused channel structures */
 843	for (i = 0; i < efx->n_channels; i++) {
 844		channel = other_channel[i];
 845		if (channel && channel->type->copy) {
 846			ef4_fini_napi_channel(channel);
 847			ef4_remove_channel(channel);
 848			kfree(channel);
 849		}
 850	}
 851
 852	rc2 = ef4_soft_enable_interrupts(efx);
 853	if (rc2) {
 854		rc = rc ? rc : rc2;
 855		netif_err(efx, drv, efx->net_dev,
 856			  "unable to restart interrupts on channel reallocation\n");
 857		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
 858	} else {
 859		ef4_start_all(efx);
 860		netif_device_attach(efx->net_dev);
 861	}
 862	return rc;
 863
 864rollback:
 865	/* Swap back */
 866	efx->rxq_entries = old_rxq_entries;
 867	efx->txq_entries = old_txq_entries;
 868	for (i = 0; i < efx->n_channels; i++) {
 869		channel = efx->channel[i];
 870		efx->channel[i] = other_channel[i];
 871		other_channel[i] = channel;
 872	}
 873	goto out;
 874}
 875
 876void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
 877{
 878	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 879}
 880
 881static const struct ef4_channel_type ef4_default_channel_type = {
 882	.pre_probe		= ef4_channel_dummy_op_int,
 883	.post_remove		= ef4_channel_dummy_op_void,
 884	.get_name		= ef4_get_channel_name,
 885	.copy			= ef4_copy_channel,
 886	.keep_eventq		= false,
 887};
 888
 889int ef4_channel_dummy_op_int(struct ef4_channel *channel)
 890{
 891	return 0;
 892}
 893
 894void ef4_channel_dummy_op_void(struct ef4_channel *channel)
 895{
 896}
 897
 898/**************************************************************************
 899 *
 900 * Port handling
 901 *
 902 **************************************************************************/
 903
 904/* This ensures that the kernel is kept informed (via
 905 * netif_carrier_on/off) of the link status, and also maintains the
 906 * link status's stop on the port's TX queue.
 907 */
 908void ef4_link_status_changed(struct ef4_nic *efx)
 909{
 910	struct ef4_link_state *link_state = &efx->link_state;
 911
 912	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 913	 * that no events are triggered between unregister_netdev() and the
 914	 * driver unloading. A more general condition is that NETDEV_CHANGE
 915	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
 916	if (!netif_running(efx->net_dev))
 917		return;
 918
 919	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
 920		efx->n_link_state_changes++;
 921
 922		if (link_state->up)
 923			netif_carrier_on(efx->net_dev);
 924		else
 925			netif_carrier_off(efx->net_dev);
 926	}
 927
 928	/* Status message for kernel log */
 929	if (link_state->up)
 930		netif_info(efx, link, efx->net_dev,
 931			   "link up at %uMbps %s-duplex (MTU %d)\n",
 932			   link_state->speed, link_state->fd ? "full" : "half",
 933			   efx->net_dev->mtu);
 934	else
 935		netif_info(efx, link, efx->net_dev, "link down\n");
 936}
 937
 938void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
 939{
 940	efx->link_advertising = advertising;
 941	if (advertising) {
 942		if (advertising & ADVERTISED_Pause)
 943			efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
 944		else
 945			efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
 946		if (advertising & ADVERTISED_Asym_Pause)
 947			efx->wanted_fc ^= EF4_FC_TX;
 948	}
 949}
 950
 951void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
 952{
 953	efx->wanted_fc = wanted_fc;
 954	if (efx->link_advertising) {
 955		if (wanted_fc & EF4_FC_RX)
 956			efx->link_advertising |= (ADVERTISED_Pause |
 957						  ADVERTISED_Asym_Pause);
 958		else
 959			efx->link_advertising &= ~(ADVERTISED_Pause |
 960						   ADVERTISED_Asym_Pause);
 961		if (wanted_fc & EF4_FC_TX)
 962			efx->link_advertising ^= ADVERTISED_Asym_Pause;
 963	}
 964}
 965
 966static void ef4_fini_port(struct ef4_nic *efx);
 967
 968/* We assume that efx->type->reconfigure_mac will always try to sync RX
 969 * filters and therefore needs to read-lock the filter table against freeing
 970 */
 971void ef4_mac_reconfigure(struct ef4_nic *efx)
 972{
 973	down_read(&efx->filter_sem);
 974	efx->type->reconfigure_mac(efx);
 975	up_read(&efx->filter_sem);
 976}
 977
 978/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 979 * the MAC appropriately. All other PHY configuration changes are pushed
 980 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
 981 * through ef4_monitor().
 982 *
 983 * Callers must hold the mac_lock
 984 */
 985int __ef4_reconfigure_port(struct ef4_nic *efx)
 986{
 987	enum ef4_phy_mode phy_mode;
 988	int rc;
 989
 990	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 991
 992	/* Disable PHY transmit in mac level loopbacks */
 993	phy_mode = efx->phy_mode;
 994	if (LOOPBACK_INTERNAL(efx))
 995		efx->phy_mode |= PHY_MODE_TX_DISABLED;
 996	else
 997		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
 998
 999	rc = efx->type->reconfigure_port(efx);
1000
1001	if (rc)
1002		efx->phy_mode = phy_mode;
1003
1004	return rc;
1005}
1006
1007/* Reinitialise the MAC to pick up new PHY settings, even if the port is
1008 * disabled. */
1009int ef4_reconfigure_port(struct ef4_nic *efx)
1010{
1011	int rc;
1012
1013	EF4_ASSERT_RESET_SERIALISED(efx);
1014
1015	mutex_lock(&efx->mac_lock);
1016	rc = __ef4_reconfigure_port(efx);
1017	mutex_unlock(&efx->mac_lock);
1018
1019	return rc;
1020}
1021
1022/* Asynchronous work item for changing MAC promiscuity and multicast
1023 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1024 * MAC directly. */
1025static void ef4_mac_work(struct work_struct *data)
1026{
1027	struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1028
1029	mutex_lock(&efx->mac_lock);
1030	if (efx->port_enabled)
1031		ef4_mac_reconfigure(efx);
1032	mutex_unlock(&efx->mac_lock);
1033}
1034
1035static int ef4_probe_port(struct ef4_nic *efx)
1036{
1037	int rc;
1038
1039	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1040
1041	if (phy_flash_cfg)
1042		efx->phy_mode = PHY_MODE_SPECIAL;
1043
1044	/* Connect up MAC/PHY operations table */
1045	rc = efx->type->probe_port(efx);
1046	if (rc)
1047		return rc;
1048
1049	/* Initialise MAC address to permanent address */
1050	ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1051
1052	return 0;
1053}
1054
1055static int ef4_init_port(struct ef4_nic *efx)
1056{
1057	int rc;
1058
1059	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1060
1061	mutex_lock(&efx->mac_lock);
1062
1063	rc = efx->phy_op->init(efx);
1064	if (rc)
1065		goto fail1;
1066
1067	efx->port_initialized = true;
1068
1069	/* Reconfigure the MAC before creating dma queues (required for
1070	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1071	ef4_mac_reconfigure(efx);
1072
1073	/* Ensure the PHY advertises the correct flow control settings */
1074	rc = efx->phy_op->reconfigure(efx);
1075	if (rc && rc != -EPERM)
1076		goto fail2;
1077
1078	mutex_unlock(&efx->mac_lock);
1079	return 0;
1080
1081fail2:
1082	efx->phy_op->fini(efx);
1083fail1:
1084	mutex_unlock(&efx->mac_lock);
1085	return rc;
1086}
1087
1088static void ef4_start_port(struct ef4_nic *efx)
1089{
1090	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1091	BUG_ON(efx->port_enabled);
1092
1093	mutex_lock(&efx->mac_lock);
1094	efx->port_enabled = true;
1095
1096	/* Ensure MAC ingress/egress is enabled */
1097	ef4_mac_reconfigure(efx);
1098
1099	mutex_unlock(&efx->mac_lock);
1100}
1101
1102/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1103 * and the async self-test, wait for them to finish and prevent them
1104 * being scheduled again.  This doesn't cover online resets, which
1105 * should only be cancelled when removing the device.
1106 */
1107static void ef4_stop_port(struct ef4_nic *efx)
1108{
1109	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1110
1111	EF4_ASSERT_RESET_SERIALISED(efx);
1112
1113	mutex_lock(&efx->mac_lock);
1114	efx->port_enabled = false;
1115	mutex_unlock(&efx->mac_lock);
1116
1117	/* Serialise against ef4_set_multicast_list() */
1118	netif_addr_lock_bh(efx->net_dev);
1119	netif_addr_unlock_bh(efx->net_dev);
1120
1121	cancel_delayed_work_sync(&efx->monitor_work);
1122	ef4_selftest_async_cancel(efx);
1123	cancel_work_sync(&efx->mac_work);
1124}
1125
1126static void ef4_fini_port(struct ef4_nic *efx)
1127{
1128	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1129
1130	if (!efx->port_initialized)
1131		return;
1132
1133	efx->phy_op->fini(efx);
1134	efx->port_initialized = false;
1135
1136	efx->link_state.up = false;
1137	ef4_link_status_changed(efx);
1138}
1139
1140static void ef4_remove_port(struct ef4_nic *efx)
1141{
1142	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1143
1144	efx->type->remove_port(efx);
1145}
1146
1147/**************************************************************************
1148 *
1149 * NIC handling
1150 *
1151 **************************************************************************/
1152
1153static LIST_HEAD(ef4_primary_list);
1154static LIST_HEAD(ef4_unassociated_list);
1155
1156static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
1157{
1158	return left->type == right->type &&
1159		left->vpd_sn && right->vpd_sn &&
1160		!strcmp(left->vpd_sn, right->vpd_sn);
1161}
1162
1163static void ef4_associate(struct ef4_nic *efx)
1164{
1165	struct ef4_nic *other, *next;
1166
1167	if (efx->primary == efx) {
1168		/* Adding primary function; look for secondaries */
1169
1170		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1171		list_add_tail(&efx->node, &ef4_primary_list);
1172
1173		list_for_each_entry_safe(other, next, &ef4_unassociated_list,
1174					 node) {
1175			if (ef4_same_controller(efx, other)) {
1176				list_del(&other->node);
1177				netif_dbg(other, probe, other->net_dev,
1178					  "moving to secondary list of %s %s\n",
1179					  pci_name(efx->pci_dev),
1180					  efx->net_dev->name);
1181				list_add_tail(&other->node,
1182					      &efx->secondary_list);
1183				other->primary = efx;
1184			}
1185		}
1186	} else {
1187		/* Adding secondary function; look for primary */
1188
1189		list_for_each_entry(other, &ef4_primary_list, node) {
1190			if (ef4_same_controller(efx, other)) {
1191				netif_dbg(efx, probe, efx->net_dev,
1192					  "adding to secondary list of %s %s\n",
1193					  pci_name(other->pci_dev),
1194					  other->net_dev->name);
1195				list_add_tail(&efx->node,
1196					      &other->secondary_list);
1197				efx->primary = other;
1198				return;
1199			}
1200		}
1201
1202		netif_dbg(efx, probe, efx->net_dev,
1203			  "adding to unassociated list\n");
1204		list_add_tail(&efx->node, &ef4_unassociated_list);
1205	}
1206}
1207
1208static void ef4_dissociate(struct ef4_nic *efx)
1209{
1210	struct ef4_nic *other, *next;
1211
1212	list_del(&efx->node);
1213	efx->primary = NULL;
1214
1215	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1216		list_del(&other->node);
1217		netif_dbg(other, probe, other->net_dev,
1218			  "moving to unassociated list\n");
1219		list_add_tail(&other->node, &ef4_unassociated_list);
1220		other->primary = NULL;
1221	}
1222}
1223
1224/* This configures the PCI device to enable I/O and DMA. */
1225static int ef4_init_io(struct ef4_nic *efx)
1226{
1227	struct pci_dev *pci_dev = efx->pci_dev;
1228	dma_addr_t dma_mask = efx->type->max_dma_mask;
1229	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1230	int rc, bar;
1231
1232	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1233
1234	bar = efx->type->mem_bar;
1235
1236	rc = pci_enable_device(pci_dev);
1237	if (rc) {
1238		netif_err(efx, probe, efx->net_dev,
1239			  "failed to enable PCI device\n");
1240		goto fail1;
1241	}
1242
1243	pci_set_master(pci_dev);
1244
1245	/* Set the PCI DMA mask.  Try all possibilities from our
1246	 * genuine mask down to 32 bits, because some architectures
1247	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1248	 * masks event though they reject 46 bit masks.
1249	 */
1250	while (dma_mask > 0x7fffffffUL) {
1251		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1252		if (rc == 0)
1253			break;
1254		dma_mask >>= 1;
1255	}
1256	if (rc) {
1257		netif_err(efx, probe, efx->net_dev,
1258			  "could not find a suitable DMA mask\n");
1259		goto fail2;
1260	}
1261	netif_dbg(efx, probe, efx->net_dev,
1262		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1263
1264	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1265	rc = pci_request_region(pci_dev, bar, "sfc");
1266	if (rc) {
1267		netif_err(efx, probe, efx->net_dev,
1268			  "request for memory BAR failed\n");
1269		rc = -EIO;
1270		goto fail3;
1271	}
1272	efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1273	if (!efx->membase) {
1274		netif_err(efx, probe, efx->net_dev,
1275			  "could not map memory BAR at %llx+%x\n",
1276			  (unsigned long long)efx->membase_phys, mem_map_size);
1277		rc = -ENOMEM;
1278		goto fail4;
1279	}
1280	netif_dbg(efx, probe, efx->net_dev,
1281		  "memory BAR at %llx+%x (virtual %p)\n",
1282		  (unsigned long long)efx->membase_phys, mem_map_size,
1283		  efx->membase);
1284
1285	return 0;
1286
1287 fail4:
1288	pci_release_region(efx->pci_dev, bar);
1289 fail3:
1290	efx->membase_phys = 0;
1291 fail2:
1292	pci_disable_device(efx->pci_dev);
1293 fail1:
1294	return rc;
1295}
1296
1297static void ef4_fini_io(struct ef4_nic *efx)
1298{
1299	int bar;
1300
1301	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1302
1303	if (efx->membase) {
1304		iounmap(efx->membase);
1305		efx->membase = NULL;
1306	}
1307
1308	if (efx->membase_phys) {
1309		bar = efx->type->mem_bar;
1310		pci_release_region(efx->pci_dev, bar);
1311		efx->membase_phys = 0;
1312	}
1313
1314	/* Don't disable bus-mastering if VFs are assigned */
1315	if (!pci_vfs_assigned(efx->pci_dev))
1316		pci_disable_device(efx->pci_dev);
1317}
1318
1319void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1320{
1321	size_t i;
1322
1323	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1324		efx->rx_indir_table[i] =
1325			ethtool_rxfh_indir_default(i, efx->rss_spread);
1326}
1327
1328static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1329{
1330	cpumask_var_t thread_mask;
1331	unsigned int count;
1332	int cpu;
1333
1334	if (rss_cpus) {
1335		count = rss_cpus;
1336	} else {
1337		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1338			netif_warn(efx, probe, efx->net_dev,
1339				   "RSS disabled due to allocation failure\n");
1340			return 1;
1341		}
1342
1343		count = 0;
1344		for_each_online_cpu(cpu) {
1345			if (!cpumask_test_cpu(cpu, thread_mask)) {
1346				++count;
1347				cpumask_or(thread_mask, thread_mask,
1348					   topology_sibling_cpumask(cpu));
1349			}
1350		}
1351
1352		free_cpumask_var(thread_mask);
1353	}
1354
1355	if (count > EF4_MAX_RX_QUEUES) {
1356		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1357			       "Reducing number of rx queues from %u to %u.\n",
1358			       count, EF4_MAX_RX_QUEUES);
1359		count = EF4_MAX_RX_QUEUES;
1360	}
1361
1362	return count;
1363}
1364
1365/* Probe the number and type of interrupts we are able to obtain, and
1366 * the resulting numbers of channels and RX queues.
1367 */
1368static int ef4_probe_interrupts(struct ef4_nic *efx)
1369{
1370	unsigned int extra_channels = 0;
1371	unsigned int i, j;
1372	int rc;
1373
1374	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
1375		if (efx->extra_channel_type[i])
1376			++extra_channels;
1377
1378	if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1379		struct msix_entry xentries[EF4_MAX_CHANNELS];
1380		unsigned int n_channels;
1381
1382		n_channels = ef4_wanted_parallelism(efx);
1383		if (ef4_separate_tx_channels)
1384			n_channels *= 2;
1385		n_channels += extra_channels;
1386		n_channels = min(n_channels, efx->max_channels);
1387
1388		for (i = 0; i < n_channels; i++)
1389			xentries[i].entry = i;
1390		rc = pci_enable_msix_range(efx->pci_dev,
1391					   xentries, 1, n_channels);
1392		if (rc < 0) {
1393			/* Fall back to single channel MSI */
1394			efx->interrupt_mode = EF4_INT_MODE_MSI;
1395			netif_err(efx, drv, efx->net_dev,
1396				  "could not enable MSI-X\n");
1397		} else if (rc < n_channels) {
1398			netif_err(efx, drv, efx->net_dev,
1399				  "WARNING: Insufficient MSI-X vectors"
1400				  " available (%d < %u).\n", rc, n_channels);
1401			netif_err(efx, drv, efx->net_dev,
1402				  "WARNING: Performance may be reduced.\n");
1403			n_channels = rc;
1404		}
1405
1406		if (rc > 0) {
1407			efx->n_channels = n_channels;
1408			if (n_channels > extra_channels)
1409				n_channels -= extra_channels;
1410			if (ef4_separate_tx_channels) {
1411				efx->n_tx_channels = min(max(n_channels / 2,
1412							     1U),
1413							 efx->max_tx_channels);
1414				efx->n_rx_channels = max(n_channels -
1415							 efx->n_tx_channels,
1416							 1U);
1417			} else {
1418				efx->n_tx_channels = min(n_channels,
1419							 efx->max_tx_channels);
1420				efx->n_rx_channels = n_channels;
1421			}
1422			for (i = 0; i < efx->n_channels; i++)
1423				ef4_get_channel(efx, i)->irq =
1424					xentries[i].vector;
1425		}
1426	}
1427
1428	/* Try single interrupt MSI */
1429	if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1430		efx->n_channels = 1;
1431		efx->n_rx_channels = 1;
1432		efx->n_tx_channels = 1;
1433		rc = pci_enable_msi(efx->pci_dev);
1434		if (rc == 0) {
1435			ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1436		} else {
1437			netif_err(efx, drv, efx->net_dev,
1438				  "could not enable MSI\n");
1439			efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1440		}
1441	}
1442
1443	/* Assume legacy interrupts */
1444	if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1445		efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1446		efx->n_rx_channels = 1;
1447		efx->n_tx_channels = 1;
1448		efx->legacy_irq = efx->pci_dev->irq;
1449	}
1450
1451	/* Assign extra channels if possible */
1452	j = efx->n_channels;
1453	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
1454		if (!efx->extra_channel_type[i])
1455			continue;
1456		if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1457		    efx->n_channels <= extra_channels) {
1458			efx->extra_channel_type[i]->handle_no_channel(efx);
1459		} else {
1460			--j;
1461			ef4_get_channel(efx, j)->type =
1462				efx->extra_channel_type[i];
1463		}
1464	}
1465
1466	efx->rss_spread = efx->n_rx_channels;
1467
1468	return 0;
1469}
1470
1471static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1472{
1473	struct ef4_channel *channel, *end_channel;
1474	int rc;
1475
1476	BUG_ON(efx->state == STATE_DISABLED);
1477
1478	efx->irq_soft_enabled = true;
1479	smp_wmb();
1480
1481	ef4_for_each_channel(channel, efx) {
1482		if (!channel->type->keep_eventq) {
1483			rc = ef4_init_eventq(channel);
1484			if (rc)
1485				goto fail;
1486		}
1487		ef4_start_eventq(channel);
1488	}
1489
1490	return 0;
1491fail:
1492	end_channel = channel;
1493	ef4_for_each_channel(channel, efx) {
1494		if (channel == end_channel)
1495			break;
1496		ef4_stop_eventq(channel);
1497		if (!channel->type->keep_eventq)
1498			ef4_fini_eventq(channel);
1499	}
1500
1501	return rc;
1502}
1503
1504static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1505{
1506	struct ef4_channel *channel;
1507
1508	if (efx->state == STATE_DISABLED)
1509		return;
1510
1511	efx->irq_soft_enabled = false;
1512	smp_wmb();
1513
1514	if (efx->legacy_irq)
1515		synchronize_irq(efx->legacy_irq);
1516
1517	ef4_for_each_channel(channel, efx) {
1518		if (channel->irq)
1519			synchronize_irq(channel->irq);
1520
1521		ef4_stop_eventq(channel);
1522		if (!channel->type->keep_eventq)
1523			ef4_fini_eventq(channel);
1524	}
1525}
1526
1527static int ef4_enable_interrupts(struct ef4_nic *efx)
1528{
1529	struct ef4_channel *channel, *end_channel;
1530	int rc;
1531
1532	BUG_ON(efx->state == STATE_DISABLED);
1533
1534	if (efx->eeh_disabled_legacy_irq) {
1535		enable_irq(efx->legacy_irq);
1536		efx->eeh_disabled_legacy_irq = false;
1537	}
1538
1539	efx->type->irq_enable_master(efx);
1540
1541	ef4_for_each_channel(channel, efx) {
1542		if (channel->type->keep_eventq) {
1543			rc = ef4_init_eventq(channel);
1544			if (rc)
1545				goto fail;
1546		}
1547	}
1548
1549	rc = ef4_soft_enable_interrupts(efx);
1550	if (rc)
1551		goto fail;
1552
1553	return 0;
1554
1555fail:
1556	end_channel = channel;
1557	ef4_for_each_channel(channel, efx) {
1558		if (channel == end_channel)
1559			break;
1560		if (channel->type->keep_eventq)
1561			ef4_fini_eventq(channel);
1562	}
1563
1564	efx->type->irq_disable_non_ev(efx);
1565
1566	return rc;
1567}
1568
1569static void ef4_disable_interrupts(struct ef4_nic *efx)
1570{
1571	struct ef4_channel *channel;
1572
1573	ef4_soft_disable_interrupts(efx);
1574
1575	ef4_for_each_channel(channel, efx) {
1576		if (channel->type->keep_eventq)
1577			ef4_fini_eventq(channel);
1578	}
1579
1580	efx->type->irq_disable_non_ev(efx);
1581}
1582
1583static void ef4_remove_interrupts(struct ef4_nic *efx)
1584{
1585	struct ef4_channel *channel;
1586
1587	/* Remove MSI/MSI-X interrupts */
1588	ef4_for_each_channel(channel, efx)
1589		channel->irq = 0;
1590	pci_disable_msi(efx->pci_dev);
1591	pci_disable_msix(efx->pci_dev);
1592
1593	/* Remove legacy interrupt */
1594	efx->legacy_irq = 0;
1595}
1596
1597static void ef4_set_channels(struct ef4_nic *efx)
1598{
1599	struct ef4_channel *channel;
1600	struct ef4_tx_queue *tx_queue;
1601
1602	efx->tx_channel_offset =
1603		ef4_separate_tx_channels ?
1604		efx->n_channels - efx->n_tx_channels : 0;
1605
1606	/* We need to mark which channels really have RX and TX
1607	 * queues, and adjust the TX queue numbers if we have separate
1608	 * RX-only and TX-only channels.
1609	 */
1610	ef4_for_each_channel(channel, efx) {
1611		if (channel->channel < efx->n_rx_channels)
1612			channel->rx_queue.core_index = channel->channel;
1613		else
1614			channel->rx_queue.core_index = -1;
1615
1616		ef4_for_each_channel_tx_queue(tx_queue, channel)
1617			tx_queue->queue -= (efx->tx_channel_offset *
1618					    EF4_TXQ_TYPES);
1619	}
1620}
1621
1622static int ef4_probe_nic(struct ef4_nic *efx)
1623{
1624	int rc;
1625
1626	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1627
1628	/* Carry out hardware-type specific initialisation */
1629	rc = efx->type->probe(efx);
1630	if (rc)
1631		return rc;
1632
1633	do {
1634		if (!efx->max_channels || !efx->max_tx_channels) {
1635			netif_err(efx, drv, efx->net_dev,
1636				  "Insufficient resources to allocate"
1637				  " any channels\n");
1638			rc = -ENOSPC;
1639			goto fail1;
1640		}
1641
1642		/* Determine the number of channels and queues by trying
1643		 * to hook in MSI-X interrupts.
1644		 */
1645		rc = ef4_probe_interrupts(efx);
1646		if (rc)
1647			goto fail1;
1648
1649		ef4_set_channels(efx);
1650
1651		/* dimension_resources can fail with EAGAIN */
1652		rc = efx->type->dimension_resources(efx);
1653		if (rc != 0 && rc != -EAGAIN)
1654			goto fail2;
1655
1656		if (rc == -EAGAIN)
1657			/* try again with new max_channels */
1658			ef4_remove_interrupts(efx);
1659
1660	} while (rc == -EAGAIN);
1661
1662	if (efx->n_channels > 1)
1663		netdev_rss_key_fill(&efx->rx_hash_key,
1664				    sizeof(efx->rx_hash_key));
1665	ef4_set_default_rx_indir_table(efx);
1666
1667	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1668	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1669
1670	/* Initialise the interrupt moderation settings */
1671	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1672	ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1673				true);
1674
1675	return 0;
1676
1677fail2:
1678	ef4_remove_interrupts(efx);
1679fail1:
1680	efx->type->remove(efx);
1681	return rc;
1682}
1683
1684static void ef4_remove_nic(struct ef4_nic *efx)
1685{
1686	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1687
1688	ef4_remove_interrupts(efx);
1689	efx->type->remove(efx);
1690}
1691
1692static int ef4_probe_filters(struct ef4_nic *efx)
1693{
1694	int rc;
1695
1696	spin_lock_init(&efx->filter_lock);
1697	init_rwsem(&efx->filter_sem);
1698	mutex_lock(&efx->mac_lock);
1699	down_write(&efx->filter_sem);
1700	rc = efx->type->filter_table_probe(efx);
1701	if (rc)
1702		goto out_unlock;
1703
1704#ifdef CONFIG_RFS_ACCEL
1705	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1706		struct ef4_channel *channel;
1707		int i, success = 1;
1708
1709		ef4_for_each_channel(channel, efx) {
1710			channel->rps_flow_id =
1711				kcalloc(efx->type->max_rx_ip_filters,
1712					sizeof(*channel->rps_flow_id),
1713					GFP_KERNEL);
1714			if (!channel->rps_flow_id)
1715				success = 0;
1716			else
1717				for (i = 0;
1718				     i < efx->type->max_rx_ip_filters;
1719				     ++i)
1720					channel->rps_flow_id[i] =
1721						RPS_FLOW_ID_INVALID;
1722		}
1723
1724		if (!success) {
1725			ef4_for_each_channel(channel, efx)
1726				kfree(channel->rps_flow_id);
1727			efx->type->filter_table_remove(efx);
1728			rc = -ENOMEM;
1729			goto out_unlock;
1730		}
1731
1732		efx->rps_expire_index = efx->rps_expire_channel = 0;
1733	}
1734#endif
1735out_unlock:
1736	up_write(&efx->filter_sem);
1737	mutex_unlock(&efx->mac_lock);
1738	return rc;
1739}
1740
1741static void ef4_remove_filters(struct ef4_nic *efx)
1742{
1743#ifdef CONFIG_RFS_ACCEL
1744	struct ef4_channel *channel;
1745
1746	ef4_for_each_channel(channel, efx)
1747		kfree(channel->rps_flow_id);
1748#endif
1749	down_write(&efx->filter_sem);
1750	efx->type->filter_table_remove(efx);
1751	up_write(&efx->filter_sem);
1752}
1753
1754static void ef4_restore_filters(struct ef4_nic *efx)
1755{
1756	down_read(&efx->filter_sem);
1757	efx->type->filter_table_restore(efx);
1758	up_read(&efx->filter_sem);
1759}
1760
1761/**************************************************************************
1762 *
1763 * NIC startup/shutdown
1764 *
1765 *************************************************************************/
1766
1767static int ef4_probe_all(struct ef4_nic *efx)
1768{
1769	int rc;
1770
1771	rc = ef4_probe_nic(efx);
1772	if (rc) {
1773		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1774		goto fail1;
1775	}
1776
1777	rc = ef4_probe_port(efx);
1778	if (rc) {
1779		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1780		goto fail2;
1781	}
1782
1783	BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
1784	if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1785		rc = -EINVAL;
1786		goto fail3;
1787	}
1788	efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1789
1790	rc = ef4_probe_filters(efx);
1791	if (rc) {
1792		netif_err(efx, probe, efx->net_dev,
1793			  "failed to create filter tables\n");
1794		goto fail4;
1795	}
1796
1797	rc = ef4_probe_channels(efx);
1798	if (rc)
1799		goto fail5;
1800
1801	return 0;
1802
1803 fail5:
1804	ef4_remove_filters(efx);
1805 fail4:
1806 fail3:
1807	ef4_remove_port(efx);
1808 fail2:
1809	ef4_remove_nic(efx);
1810 fail1:
1811	return rc;
1812}
1813
1814/* If the interface is supposed to be running but is not, start
1815 * the hardware and software data path, regular activity for the port
1816 * (MAC statistics, link polling, etc.) and schedule the port to be
1817 * reconfigured.  Interrupts must already be enabled.  This function
1818 * is safe to call multiple times, so long as the NIC is not disabled.
1819 * Requires the RTNL lock.
1820 */
1821static void ef4_start_all(struct ef4_nic *efx)
1822{
1823	EF4_ASSERT_RESET_SERIALISED(efx);
1824	BUG_ON(efx->state == STATE_DISABLED);
1825
1826	/* Check that it is appropriate to restart the interface. All
1827	 * of these flags are safe to read under just the rtnl lock */
1828	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1829	    efx->reset_pending)
1830		return;
1831
1832	ef4_start_port(efx);
1833	ef4_start_datapath(efx);
1834
1835	/* Start the hardware monitor if there is one */
1836	if (efx->type->monitor != NULL)
1837		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1838				   ef4_monitor_interval);
1839
1840	efx->type->start_stats(efx);
1841	efx->type->pull_stats(efx);
1842	spin_lock_bh(&efx->stats_lock);
1843	efx->type->update_stats(efx, NULL, NULL);
1844	spin_unlock_bh(&efx->stats_lock);
1845}
1846
1847/* Quiesce the hardware and software data path, and regular activity
1848 * for the port without bringing the link down.  Safe to call multiple
1849 * times with the NIC in almost any state, but interrupts should be
1850 * enabled.  Requires the RTNL lock.
1851 */
1852static void ef4_stop_all(struct ef4_nic *efx)
1853{
1854	EF4_ASSERT_RESET_SERIALISED(efx);
1855
1856	/* port_enabled can be read safely under the rtnl lock */
1857	if (!efx->port_enabled)
1858		return;
1859
1860	/* update stats before we go down so we can accurately count
1861	 * rx_nodesc_drops
1862	 */
1863	efx->type->pull_stats(efx);
1864	spin_lock_bh(&efx->stats_lock);
1865	efx->type->update_stats(efx, NULL, NULL);
1866	spin_unlock_bh(&efx->stats_lock);
1867	efx->type->stop_stats(efx);
1868	ef4_stop_port(efx);
1869
1870	/* Stop the kernel transmit interface.  This is only valid if
1871	 * the device is stopped or detached; otherwise the watchdog
1872	 * may fire immediately.
1873	 */
1874	WARN_ON(netif_running(efx->net_dev) &&
1875		netif_device_present(efx->net_dev));
1876	netif_tx_disable(efx->net_dev);
1877
1878	ef4_stop_datapath(efx);
1879}
1880
1881static void ef4_remove_all(struct ef4_nic *efx)
1882{
1883	ef4_remove_channels(efx);
1884	ef4_remove_filters(efx);
1885	ef4_remove_port(efx);
1886	ef4_remove_nic(efx);
1887}
1888
1889/**************************************************************************
1890 *
1891 * Interrupt moderation
1892 *
1893 **************************************************************************/
1894unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1895{
1896	if (usecs == 0)
1897		return 0;
1898	if (usecs * 1000 < efx->timer_quantum_ns)
1899		return 1; /* never round down to 0 */
1900	return usecs * 1000 / efx->timer_quantum_ns;
1901}
1902
1903unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
1904{
1905	/* We must round up when converting ticks to microseconds
1906	 * because we round down when converting the other way.
1907	 */
1908	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
1909}
1910
1911/* Set interrupt moderation parameters */
1912int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1913			    unsigned int rx_usecs, bool rx_adaptive,
1914			    bool rx_may_override_tx)
1915{
1916	struct ef4_channel *channel;
1917	unsigned int timer_max_us;
1918
1919	EF4_ASSERT_RESET_SERIALISED(efx);
1920
1921	timer_max_us = efx->timer_max_ns / 1000;
1922
1923	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
1924		return -EINVAL;
1925
1926	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1927	    !rx_may_override_tx) {
1928		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1929			  "RX and TX IRQ moderation must be equal\n");
1930		return -EINVAL;
1931	}
1932
1933	efx->irq_rx_adaptive = rx_adaptive;
1934	efx->irq_rx_moderation_us = rx_usecs;
1935	ef4_for_each_channel(channel, efx) {
1936		if (ef4_channel_has_rx_queue(channel))
1937			channel->irq_moderation_us = rx_usecs;
1938		else if (ef4_channel_has_tx_queues(channel))
1939			channel->irq_moderation_us = tx_usecs;
1940	}
1941
1942	return 0;
1943}
1944
1945void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1946			    unsigned int *rx_usecs, bool *rx_adaptive)
1947{
1948	*rx_adaptive = efx->irq_rx_adaptive;
1949	*rx_usecs = efx->irq_rx_moderation_us;
1950
1951	/* If channels are shared between RX and TX, so is IRQ
1952	 * moderation.  Otherwise, IRQ moderation is the same for all
1953	 * TX channels and is not adaptive.
1954	 */
1955	if (efx->tx_channel_offset == 0) {
1956		*tx_usecs = *rx_usecs;
1957	} else {
1958		struct ef4_channel *tx_channel;
1959
1960		tx_channel = efx->channel[efx->tx_channel_offset];
1961		*tx_usecs = tx_channel->irq_moderation_us;
1962	}
1963}
1964
1965/**************************************************************************
1966 *
1967 * Hardware monitor
1968 *
1969 **************************************************************************/
1970
1971/* Run periodically off the general workqueue */
1972static void ef4_monitor(struct work_struct *data)
1973{
1974	struct ef4_nic *efx = container_of(data, struct ef4_nic,
1975					   monitor_work.work);
1976
1977	netif_vdbg(efx, timer, efx->net_dev,
1978		   "hardware monitor executing on CPU %d\n",
1979		   raw_smp_processor_id());
1980	BUG_ON(efx->type->monitor == NULL);
1981
1982	/* If the mac_lock is already held then it is likely a port
1983	 * reconfiguration is already in place, which will likely do
1984	 * most of the work of monitor() anyway. */
1985	if (mutex_trylock(&efx->mac_lock)) {
1986		if (efx->port_enabled)
1987			efx->type->monitor(efx);
1988		mutex_unlock(&efx->mac_lock);
1989	}
1990
1991	queue_delayed_work(efx->workqueue, &efx->monitor_work,
1992			   ef4_monitor_interval);
1993}
1994
1995/**************************************************************************
1996 *
1997 * ioctls
1998 *
1999 *************************************************************************/
2000
2001/* Net device ioctl
2002 * Context: process, rtnl_lock() held.
2003 */
2004static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2005{
2006	struct ef4_nic *efx = netdev_priv(net_dev);
2007	struct mii_ioctl_data *data = if_mii(ifr);
2008
2009	/* Convert phy_id from older PRTAD/DEVAD format */
2010	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2011	    (data->phy_id & 0xfc00) == 0x0400)
2012		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2013
2014	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2015}
2016
2017/**************************************************************************
2018 *
2019 * NAPI interface
2020 *
2021 **************************************************************************/
2022
2023static void ef4_init_napi_channel(struct ef4_channel *channel)
2024{
2025	struct ef4_nic *efx = channel->efx;
2026
2027	channel->napi_dev = efx->net_dev;
2028	netif_napi_add(channel->napi_dev, &channel->napi_str,
2029		       ef4_poll, napi_weight);
2030}
2031
2032static void ef4_init_napi(struct ef4_nic *efx)
2033{
2034	struct ef4_channel *channel;
2035
2036	ef4_for_each_channel(channel, efx)
2037		ef4_init_napi_channel(channel);
2038}
2039
2040static void ef4_fini_napi_channel(struct ef4_channel *channel)
2041{
2042	if (channel->napi_dev)
2043		netif_napi_del(&channel->napi_str);
2044
2045	channel->napi_dev = NULL;
2046}
2047
2048static void ef4_fini_napi(struct ef4_nic *efx)
2049{
2050	struct ef4_channel *channel;
2051
2052	ef4_for_each_channel(channel, efx)
2053		ef4_fini_napi_channel(channel);
2054}
2055
2056/**************************************************************************
2057 *
2058 * Kernel netpoll interface
2059 *
2060 *************************************************************************/
2061
2062#ifdef CONFIG_NET_POLL_CONTROLLER
2063
2064/* Although in the common case interrupts will be disabled, this is not
2065 * guaranteed. However, all our work happens inside the NAPI callback,
2066 * so no locking is required.
2067 */
2068static void ef4_netpoll(struct net_device *net_dev)
2069{
2070	struct ef4_nic *efx = netdev_priv(net_dev);
2071	struct ef4_channel *channel;
2072
2073	ef4_for_each_channel(channel, efx)
2074		ef4_schedule_channel(channel);
2075}
2076
2077#endif
2078
2079/**************************************************************************
2080 *
2081 * Kernel net device interface
2082 *
2083 *************************************************************************/
2084
2085/* Context: process, rtnl_lock() held. */
2086int ef4_net_open(struct net_device *net_dev)
2087{
2088	struct ef4_nic *efx = netdev_priv(net_dev);
2089	int rc;
2090
2091	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2092		  raw_smp_processor_id());
2093
2094	rc = ef4_check_disabled(efx);
2095	if (rc)
2096		return rc;
2097	if (efx->phy_mode & PHY_MODE_SPECIAL)
2098		return -EBUSY;
2099
2100	/* Notify the kernel of the link state polled during driver load,
2101	 * before the monitor starts running */
2102	ef4_link_status_changed(efx);
2103
2104	ef4_start_all(efx);
2105	ef4_selftest_async_start(efx);
2106	return 0;
2107}
2108
2109/* Context: process, rtnl_lock() held.
2110 * Note that the kernel will ignore our return code; this method
2111 * should really be a void.
2112 */
2113int ef4_net_stop(struct net_device *net_dev)
2114{
2115	struct ef4_nic *efx = netdev_priv(net_dev);
2116
2117	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2118		  raw_smp_processor_id());
2119
2120	/* Stop the device and flush all the channels */
2121	ef4_stop_all(efx);
2122
2123	return 0;
2124}
2125
2126/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2127static void ef4_net_stats(struct net_device *net_dev,
2128			  struct rtnl_link_stats64 *stats)
2129{
2130	struct ef4_nic *efx = netdev_priv(net_dev);
2131
2132	spin_lock_bh(&efx->stats_lock);
2133	efx->type->update_stats(efx, NULL, stats);
2134	spin_unlock_bh(&efx->stats_lock);
2135}
2136
2137/* Context: netif_tx_lock held, BHs disabled. */
2138static void ef4_watchdog(struct net_device *net_dev)
2139{
2140	struct ef4_nic *efx = netdev_priv(net_dev);
2141
2142	netif_err(efx, tx_err, efx->net_dev,
2143		  "TX stuck with port_enabled=%d: resetting channels\n",
2144		  efx->port_enabled);
2145
2146	ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2147}
2148
2149
2150/* Context: process, rtnl_lock() held. */
2151static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
2152{
2153	struct ef4_nic *efx = netdev_priv(net_dev);
2154	int rc;
2155
2156	rc = ef4_check_disabled(efx);
2157	if (rc)
2158		return rc;
2159
2160	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2161
2162	ef4_device_detach_sync(efx);
2163	ef4_stop_all(efx);
2164
2165	mutex_lock(&efx->mac_lock);
2166	net_dev->mtu = new_mtu;
2167	ef4_mac_reconfigure(efx);
2168	mutex_unlock(&efx->mac_lock);
2169
2170	ef4_start_all(efx);
2171	netif_device_attach(efx->net_dev);
2172	return 0;
2173}
2174
2175static int ef4_set_mac_address(struct net_device *net_dev, void *data)
2176{
2177	struct ef4_nic *efx = netdev_priv(net_dev);
2178	struct sockaddr *addr = data;
2179	u8 *new_addr = addr->sa_data;
2180	u8 old_addr[6];
2181	int rc;
2182
2183	if (!is_valid_ether_addr(new_addr)) {
2184		netif_err(efx, drv, efx->net_dev,
2185			  "invalid ethernet MAC address requested: %pM\n",
2186			  new_addr);
2187		return -EADDRNOTAVAIL;
2188	}
2189
2190	/* save old address */
2191	ether_addr_copy(old_addr, net_dev->dev_addr);
2192	ether_addr_copy(net_dev->dev_addr, new_addr);
2193	if (efx->type->set_mac_address) {
2194		rc = efx->type->set_mac_address(efx);
2195		if (rc) {
2196			ether_addr_copy(net_dev->dev_addr, old_addr);
2197			return rc;
2198		}
2199	}
2200
2201	/* Reconfigure the MAC */
2202	mutex_lock(&efx->mac_lock);
2203	ef4_mac_reconfigure(efx);
2204	mutex_unlock(&efx->mac_lock);
2205
2206	return 0;
2207}
2208
2209/* Context: netif_addr_lock held, BHs disabled. */
2210static void ef4_set_rx_mode(struct net_device *net_dev)
2211{
2212	struct ef4_nic *efx = netdev_priv(net_dev);
2213
2214	if (efx->port_enabled)
2215		queue_work(efx->workqueue, &efx->mac_work);
2216	/* Otherwise ef4_start_port() will do this */
2217}
2218
2219static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
2220{
2221	struct ef4_nic *efx = netdev_priv(net_dev);
2222	int rc;
2223
2224	/* If disabling RX n-tuple filtering, clear existing filters */
2225	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2226		rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2227		if (rc)
2228			return rc;
2229	}
2230
2231	/* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2232	if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2233		/* ef4_set_rx_mode() will schedule MAC work to update filters
2234		 * when a new features are finally set in net_dev.
2235		 */
2236		ef4_set_rx_mode(net_dev);
2237	}
2238
2239	return 0;
2240}
2241
2242static const struct net_device_ops ef4_netdev_ops = {
2243	.ndo_open		= ef4_net_open,
2244	.ndo_stop		= ef4_net_stop,
2245	.ndo_get_stats64	= ef4_net_stats,
2246	.ndo_tx_timeout		= ef4_watchdog,
2247	.ndo_start_xmit		= ef4_hard_start_xmit,
2248	.ndo_validate_addr	= eth_validate_addr,
2249	.ndo_do_ioctl		= ef4_ioctl,
2250	.ndo_change_mtu		= ef4_change_mtu,
2251	.ndo_set_mac_address	= ef4_set_mac_address,
2252	.ndo_set_rx_mode	= ef4_set_rx_mode,
2253	.ndo_set_features	= ef4_set_features,
2254#ifdef CONFIG_NET_POLL_CONTROLLER
2255	.ndo_poll_controller = ef4_netpoll,
2256#endif
2257	.ndo_setup_tc		= ef4_setup_tc,
2258#ifdef CONFIG_RFS_ACCEL
2259	.ndo_rx_flow_steer	= ef4_filter_rfs,
2260#endif
2261};
2262
2263static void ef4_update_name(struct ef4_nic *efx)
2264{
2265	strcpy(efx->name, efx->net_dev->name);
2266	ef4_mtd_rename(efx);
2267	ef4_set_channel_names(efx);
2268}
2269
2270static int ef4_netdev_event(struct notifier_block *this,
2271			    unsigned long event, void *ptr)
2272{
2273	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2274
2275	if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
2276	    event == NETDEV_CHANGENAME)
2277		ef4_update_name(netdev_priv(net_dev));
2278
2279	return NOTIFY_DONE;
2280}
2281
2282static struct notifier_block ef4_netdev_notifier = {
2283	.notifier_call = ef4_netdev_event,
2284};
2285
2286static ssize_t
2287show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2288{
2289	struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2290	return sprintf(buf, "%d\n", efx->phy_type);
2291}
2292static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2293
2294static int ef4_register_netdev(struct ef4_nic *efx)
2295{
2296	struct net_device *net_dev = efx->net_dev;
2297	struct ef4_channel *channel;
2298	int rc;
2299
2300	net_dev->watchdog_timeo = 5 * HZ;
2301	net_dev->irq = efx->pci_dev->irq;
2302	net_dev->netdev_ops = &ef4_netdev_ops;
2303	net_dev->ethtool_ops = &ef4_ethtool_ops;
2304	net_dev->gso_max_segs = EF4_TSO_MAX_SEGS;
2305	net_dev->min_mtu = EF4_MIN_MTU;
2306	net_dev->max_mtu = EF4_MAX_MTU;
2307
2308	rtnl_lock();
2309
2310	/* Enable resets to be scheduled and check whether any were
2311	 * already requested.  If so, the NIC is probably hosed so we
2312	 * abort.
2313	 */
2314	efx->state = STATE_READY;
2315	smp_mb(); /* ensure we change state before checking reset_pending */
2316	if (efx->reset_pending) {
2317		netif_err(efx, probe, efx->net_dev,
2318			  "aborting probe due to scheduled reset\n");
2319		rc = -EIO;
2320		goto fail_locked;
2321	}
2322
2323	rc = dev_alloc_name(net_dev, net_dev->name);
2324	if (rc < 0)
2325		goto fail_locked;
2326	ef4_update_name(efx);
2327
2328	/* Always start with carrier off; PHY events will detect the link */
2329	netif_carrier_off(net_dev);
2330
2331	rc = register_netdevice(net_dev);
2332	if (rc)
2333		goto fail_locked;
2334
2335	ef4_for_each_channel(channel, efx) {
2336		struct ef4_tx_queue *tx_queue;
2337		ef4_for_each_channel_tx_queue(tx_queue, channel)
2338			ef4_init_tx_queue_core_txq(tx_queue);
2339	}
2340
2341	ef4_associate(efx);
2342
2343	rtnl_unlock();
2344
2345	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2346	if (rc) {
2347		netif_err(efx, drv, efx->net_dev,
2348			  "failed to init net dev attributes\n");
2349		goto fail_registered;
2350	}
2351	return 0;
2352
2353fail_registered:
2354	rtnl_lock();
2355	ef4_dissociate(efx);
2356	unregister_netdevice(net_dev);
2357fail_locked:
2358	efx->state = STATE_UNINIT;
2359	rtnl_unlock();
2360	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2361	return rc;
2362}
2363
2364static void ef4_unregister_netdev(struct ef4_nic *efx)
2365{
2366	if (!efx->net_dev)
2367		return;
2368
2369	BUG_ON(netdev_priv(efx->net_dev) != efx);
2370
2371	if (ef4_dev_registered(efx)) {
2372		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2373		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2374		unregister_netdev(efx->net_dev);
2375	}
2376}
2377
2378/**************************************************************************
2379 *
2380 * Device reset and suspend
2381 *
2382 **************************************************************************/
2383
2384/* Tears down the entire software state and most of the hardware state
2385 * before reset.  */
2386void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2387{
2388	EF4_ASSERT_RESET_SERIALISED(efx);
2389
2390	ef4_stop_all(efx);
2391	ef4_disable_interrupts(efx);
2392
2393	mutex_lock(&efx->mac_lock);
2394	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2395	    method != RESET_TYPE_DATAPATH)
2396		efx->phy_op->fini(efx);
2397	efx->type->fini(efx);
2398}
2399
2400/* This function will always ensure that the locks acquired in
2401 * ef4_reset_down() are released. A failure return code indicates
2402 * that we were unable to reinitialise the hardware, and the
2403 * driver should be disabled. If ok is false, then the rx and tx
2404 * engines are not restarted, pending a RESET_DISABLE. */
2405int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2406{
2407	int rc;
2408
2409	EF4_ASSERT_RESET_SERIALISED(efx);
2410
2411	/* Ensure that SRAM is initialised even if we're disabling the device */
2412	rc = efx->type->init(efx);
2413	if (rc) {
2414		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2415		goto fail;
2416	}
2417
2418	if (!ok)
2419		goto fail;
2420
2421	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2422	    method != RESET_TYPE_DATAPATH) {
2423		rc = efx->phy_op->init(efx);
2424		if (rc)
2425			goto fail;
2426		rc = efx->phy_op->reconfigure(efx);
2427		if (rc && rc != -EPERM)
2428			netif_err(efx, drv, efx->net_dev,
2429				  "could not restore PHY settings\n");
2430	}
2431
2432	rc = ef4_enable_interrupts(efx);
2433	if (rc)
2434		goto fail;
2435
2436	down_read(&efx->filter_sem);
2437	ef4_restore_filters(efx);
2438	up_read(&efx->filter_sem);
2439
2440	mutex_unlock(&efx->mac_lock);
2441
2442	ef4_start_all(efx);
2443
2444	return 0;
2445
2446fail:
2447	efx->port_initialized = false;
2448
2449	mutex_unlock(&efx->mac_lock);
2450
2451	return rc;
2452}
2453
2454/* Reset the NIC using the specified method.  Note that the reset may
2455 * fail, in which case the card will be left in an unusable state.
2456 *
2457 * Caller must hold the rtnl_lock.
2458 */
2459int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2460{
2461	int rc, rc2;
2462	bool disabled;
2463
2464	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2465		   RESET_TYPE(method));
2466
2467	ef4_device_detach_sync(efx);
2468	ef4_reset_down(efx, method);
2469
2470	rc = efx->type->reset(efx, method);
2471	if (rc) {
2472		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2473		goto out;
2474	}
2475
2476	/* Clear flags for the scopes we covered.  We assume the NIC and
2477	 * driver are now quiescent so that there is no race here.
2478	 */
2479	if (method < RESET_TYPE_MAX_METHOD)
2480		efx->reset_pending &= -(1 << (method + 1));
2481	else /* it doesn't fit into the well-ordered scope hierarchy */
2482		__clear_bit(method, &efx->reset_pending);
2483
2484	/* Reinitialise bus-mastering, which may have been turned off before
2485	 * the reset was scheduled. This is still appropriate, even in the
2486	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2487	 * can respond to requests. */
2488	pci_set_master(efx->pci_dev);
2489
2490out:
2491	/* Leave device stopped if necessary */
2492	disabled = rc ||
2493		method == RESET_TYPE_DISABLE ||
2494		method == RESET_TYPE_RECOVER_OR_DISABLE;
2495	rc2 = ef4_reset_up(efx, method, !disabled);
2496	if (rc2) {
2497		disabled = true;
2498		if (!rc)
2499			rc = rc2;
2500	}
2501
2502	if (disabled) {
2503		dev_close(efx->net_dev);
2504		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2505		efx->state = STATE_DISABLED;
2506	} else {
2507		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2508		netif_device_attach(efx->net_dev);
2509	}
2510	return rc;
2511}
2512
2513/* Try recovery mechanisms.
2514 * For now only EEH is supported.
2515 * Returns 0 if the recovery mechanisms are unsuccessful.
2516 * Returns a non-zero value otherwise.
2517 */
2518int ef4_try_recovery(struct ef4_nic *efx)
2519{
2520#ifdef CONFIG_EEH
2521	/* A PCI error can occur and not be seen by EEH because nothing
2522	 * happens on the PCI bus. In this case the driver may fail and
2523	 * schedule a 'recover or reset', leading to this recovery handler.
2524	 * Manually call the eeh failure check function.
2525	 */
2526	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2527	if (eeh_dev_check_failure(eehdev)) {
2528		/* The EEH mechanisms will handle the error and reset the
2529		 * device if necessary.
2530		 */
2531		return 1;
2532	}
2533#endif
2534	return 0;
2535}
2536
2537/* The worker thread exists so that code that cannot sleep can
2538 * schedule a reset for later.
2539 */
2540static void ef4_reset_work(struct work_struct *data)
2541{
2542	struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2543	unsigned long pending;
2544	enum reset_type method;
2545
2546	pending = READ_ONCE(efx->reset_pending);
2547	method = fls(pending) - 1;
2548
2549	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2550	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2551	    ef4_try_recovery(efx))
2552		return;
2553
2554	if (!pending)
2555		return;
2556
2557	rtnl_lock();
2558
2559	/* We checked the state in ef4_schedule_reset() but it may
2560	 * have changed by now.  Now that we have the RTNL lock,
2561	 * it cannot change again.
2562	 */
2563	if (efx->state == STATE_READY)
2564		(void)ef4_reset(efx, method);
2565
2566	rtnl_unlock();
2567}
2568
2569void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2570{
2571	enum reset_type method;
2572
2573	if (efx->state == STATE_RECOVERY) {
2574		netif_dbg(efx, drv, efx->net_dev,
2575			  "recovering: skip scheduling %s reset\n",
2576			  RESET_TYPE(type));
2577		return;
2578	}
2579
2580	switch (type) {
2581	case RESET_TYPE_INVISIBLE:
2582	case RESET_TYPE_ALL:
2583	case RESET_TYPE_RECOVER_OR_ALL:
2584	case RESET_TYPE_WORLD:
2585	case RESET_TYPE_DISABLE:
2586	case RESET_TYPE_RECOVER_OR_DISABLE:
2587	case RESET_TYPE_DATAPATH:
2588		method = type;
2589		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2590			  RESET_TYPE(method));
2591		break;
2592	default:
2593		method = efx->type->map_reset_reason(type);
2594		netif_dbg(efx, drv, efx->net_dev,
2595			  "scheduling %s reset for %s\n",
2596			  RESET_TYPE(method), RESET_TYPE(type));
2597		break;
2598	}
2599
2600	set_bit(method, &efx->reset_pending);
2601	smp_mb(); /* ensure we change reset_pending before checking state */
2602
2603	/* If we're not READY then just leave the flags set as the cue
2604	 * to abort probing or reschedule the reset later.
2605	 */
2606	if (READ_ONCE(efx->state) != STATE_READY)
2607		return;
2608
2609	queue_work(reset_workqueue, &efx->reset_work);
2610}
2611
2612/**************************************************************************
2613 *
2614 * List of NICs we support
2615 *
2616 **************************************************************************/
2617
2618/* PCI device ID table */
2619static const struct pci_device_id ef4_pci_table[] = {
2620	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2621		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2622	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2623	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2624		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2625	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2626	{0}			/* end of list */
2627};
2628
2629/**************************************************************************
2630 *
2631 * Dummy PHY/MAC operations
2632 *
2633 * Can be used for some unimplemented operations
2634 * Needed so all function pointers are valid and do not have to be tested
2635 * before use
2636 *
2637 **************************************************************************/
2638int ef4_port_dummy_op_int(struct ef4_nic *efx)
2639{
2640	return 0;
2641}
2642void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2643
2644static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2645{
2646	return false;
2647}
2648
2649static const struct ef4_phy_operations ef4_dummy_phy_operations = {
2650	.init		 = ef4_port_dummy_op_int,
2651	.reconfigure	 = ef4_port_dummy_op_int,
2652	.poll		 = ef4_port_dummy_op_poll,
2653	.fini		 = ef4_port_dummy_op_void,
2654};
2655
2656/**************************************************************************
2657 *
2658 * Data housekeeping
2659 *
2660 **************************************************************************/
2661
2662/* This zeroes out and then fills in the invariants in a struct
2663 * ef4_nic (including all sub-structures).
2664 */
2665static int ef4_init_struct(struct ef4_nic *efx,
2666			   struct pci_dev *pci_dev, struct net_device *net_dev)
2667{
2668	int i;
2669
2670	/* Initialise common structures */
2671	INIT_LIST_HEAD(&efx->node);
2672	INIT_LIST_HEAD(&efx->secondary_list);
2673	spin_lock_init(&efx->biu_lock);
2674#ifdef CONFIG_SFC_FALCON_MTD
2675	INIT_LIST_HEAD(&efx->mtd_list);
2676#endif
2677	INIT_WORK(&efx->reset_work, ef4_reset_work);
2678	INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2679	INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2680	efx->pci_dev = pci_dev;
2681	efx->msg_enable = debug;
2682	efx->state = STATE_UNINIT;
2683	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2684
2685	efx->net_dev = net_dev;
2686	efx->rx_prefix_size = efx->type->rx_prefix_size;
2687	efx->rx_ip_align =
2688		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2689	efx->rx_packet_hash_offset =
2690		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2691	efx->rx_packet_ts_offset =
2692		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2693	spin_lock_init(&efx->stats_lock);
2694	mutex_init(&efx->mac_lock);
2695	efx->phy_op = &ef4_dummy_phy_operations;
2696	efx->mdio.dev = net_dev;
2697	INIT_WORK(&efx->mac_work, ef4_mac_work);
2698	init_waitqueue_head(&efx->flush_wq);
2699
2700	for (i = 0; i < EF4_MAX_CHANNELS; i++) {
2701		efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2702		if (!efx->channel[i])
2703			goto fail;
2704		efx->msi_context[i].efx = efx;
2705		efx->msi_context[i].index = i;
2706	}
2707
2708	/* Higher numbered interrupt modes are less capable! */
2709	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2710				  interrupt_mode);
2711
2712	/* Would be good to use the net_dev name, but we're too early */
2713	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2714		 pci_name(pci_dev));
2715	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2716	if (!efx->workqueue)
2717		goto fail;
2718
2719	return 0;
2720
2721fail:
2722	ef4_fini_struct(efx);
2723	return -ENOMEM;
2724}
2725
2726static void ef4_fini_struct(struct ef4_nic *efx)
2727{
2728	int i;
2729
2730	for (i = 0; i < EF4_MAX_CHANNELS; i++)
2731		kfree(efx->channel[i]);
2732
2733	kfree(efx->vpd_sn);
2734
2735	if (efx->workqueue) {
2736		destroy_workqueue(efx->workqueue);
2737		efx->workqueue = NULL;
2738	}
2739}
2740
2741void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2742{
2743	u64 n_rx_nodesc_trunc = 0;
2744	struct ef4_channel *channel;
2745
2746	ef4_for_each_channel(channel, efx)
2747		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2748	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2749	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2750}
2751
2752/**************************************************************************
2753 *
2754 * PCI interface
2755 *
2756 **************************************************************************/
2757
2758/* Main body of final NIC shutdown code
2759 * This is called only at module unload (or hotplug removal).
2760 */
2761static void ef4_pci_remove_main(struct ef4_nic *efx)
2762{
2763	/* Flush reset_work. It can no longer be scheduled since we
2764	 * are not READY.
2765	 */
2766	BUG_ON(efx->state == STATE_READY);
2767	cancel_work_sync(&efx->reset_work);
2768
2769	ef4_disable_interrupts(efx);
2770	ef4_nic_fini_interrupt(efx);
2771	ef4_fini_port(efx);
2772	efx->type->fini(efx);
2773	ef4_fini_napi(efx);
2774	ef4_remove_all(efx);
2775}
2776
2777/* Final NIC shutdown
2778 * This is called only at module unload (or hotplug removal).  A PF can call
2779 * this on its VFs to ensure they are unbound first.
2780 */
2781static void ef4_pci_remove(struct pci_dev *pci_dev)
2782{
2783	struct ef4_nic *efx;
2784
2785	efx = pci_get_drvdata(pci_dev);
2786	if (!efx)
2787		return;
2788
2789	/* Mark the NIC as fini, then stop the interface */
2790	rtnl_lock();
2791	ef4_dissociate(efx);
2792	dev_close(efx->net_dev);
2793	ef4_disable_interrupts(efx);
2794	efx->state = STATE_UNINIT;
2795	rtnl_unlock();
2796
2797	ef4_unregister_netdev(efx);
2798
2799	ef4_mtd_remove(efx);
2800
2801	ef4_pci_remove_main(efx);
2802
2803	ef4_fini_io(efx);
2804	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2805
2806	ef4_fini_struct(efx);
2807	free_netdev(efx->net_dev);
2808
2809	pci_disable_pcie_error_reporting(pci_dev);
2810};
2811
2812/* NIC VPD information
2813 * Called during probe to display the part number of the
2814 * installed NIC.  VPD is potentially very large but this should
2815 * always appear within the first 512 bytes.
2816 */
2817#define SFC_VPD_LEN 512
2818static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2819{
2820	struct pci_dev *dev = efx->pci_dev;
2821	char vpd_data[SFC_VPD_LEN];
2822	ssize_t vpd_size;
2823	int ro_start, ro_size, i, j;
2824
2825	/* Get the vpd data from the device */
2826	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
2827	if (vpd_size <= 0) {
2828		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
2829		return;
2830	}
2831
2832	/* Get the Read only section */
2833	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
2834	if (ro_start < 0) {
2835		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
2836		return;
2837	}
2838
2839	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
2840	j = ro_size;
2841	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
2842	if (i + j > vpd_size)
2843		j = vpd_size - i;
2844
2845	/* Get the Part number */
2846	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
2847	if (i < 0) {
2848		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
2849		return;
2850	}
2851
2852	j = pci_vpd_info_field_size(&vpd_data[i]);
2853	i += PCI_VPD_INFO_FLD_HDR_SIZE;
2854	if (i + j > vpd_size) {
2855		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
2856		return;
2857	}
2858
2859	netif_info(efx, drv, efx->net_dev,
2860		   "Part Number : %.*s\n", j, &vpd_data[i]);
2861
2862	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
2863	j = ro_size;
2864	i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
2865	if (i < 0) {
2866		netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
2867		return;
2868	}
2869
2870	j = pci_vpd_info_field_size(&vpd_data[i]);
2871	i += PCI_VPD_INFO_FLD_HDR_SIZE;
2872	if (i + j > vpd_size) {
2873		netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
2874		return;
2875	}
2876
2877	efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
2878	if (!efx->vpd_sn)
2879		return;
 
 
 
2880
2881	snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
2882}
2883
2884
2885/* Main body of NIC initialisation
2886 * This is called at module load (or hotplug insertion, theoretically).
2887 */
2888static int ef4_pci_probe_main(struct ef4_nic *efx)
2889{
2890	int rc;
2891
2892	/* Do start-of-day initialisation */
2893	rc = ef4_probe_all(efx);
2894	if (rc)
2895		goto fail1;
2896
2897	ef4_init_napi(efx);
2898
2899	rc = efx->type->init(efx);
2900	if (rc) {
2901		netif_err(efx, probe, efx->net_dev,
2902			  "failed to initialise NIC\n");
2903		goto fail3;
2904	}
2905
2906	rc = ef4_init_port(efx);
2907	if (rc) {
2908		netif_err(efx, probe, efx->net_dev,
2909			  "failed to initialise port\n");
2910		goto fail4;
2911	}
2912
2913	rc = ef4_nic_init_interrupt(efx);
2914	if (rc)
2915		goto fail5;
2916	rc = ef4_enable_interrupts(efx);
2917	if (rc)
2918		goto fail6;
2919
2920	return 0;
2921
2922 fail6:
2923	ef4_nic_fini_interrupt(efx);
2924 fail5:
2925	ef4_fini_port(efx);
2926 fail4:
2927	efx->type->fini(efx);
2928 fail3:
2929	ef4_fini_napi(efx);
2930	ef4_remove_all(efx);
2931 fail1:
2932	return rc;
2933}
2934
2935/* NIC initialisation
2936 *
2937 * This is called at module load (or hotplug insertion,
2938 * theoretically).  It sets up PCI mappings, resets the NIC,
2939 * sets up and registers the network devices with the kernel and hooks
2940 * the interrupt service routine.  It does not prepare the device for
2941 * transmission; this is left to the first time one of the network
2942 * interfaces is brought up (i.e. ef4_net_open).
2943 */
2944static int ef4_pci_probe(struct pci_dev *pci_dev,
2945			 const struct pci_device_id *entry)
2946{
2947	struct net_device *net_dev;
2948	struct ef4_nic *efx;
2949	int rc;
2950
2951	/* Allocate and initialise a struct net_device and struct ef4_nic */
2952	net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2953				     EF4_MAX_RX_QUEUES);
2954	if (!net_dev)
2955		return -ENOMEM;
2956	efx = netdev_priv(net_dev);
2957	efx->type = (const struct ef4_nic_type *) entry->driver_data;
2958	efx->fixed_features |= NETIF_F_HIGHDMA;
2959
2960	pci_set_drvdata(pci_dev, efx);
2961	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2962	rc = ef4_init_struct(efx, pci_dev, net_dev);
2963	if (rc)
2964		goto fail1;
2965
2966	netif_info(efx, probe, efx->net_dev,
2967		   "Solarflare NIC detected\n");
2968
2969	ef4_probe_vpd_strings(efx);
2970
2971	/* Set up basic I/O (BAR mappings etc) */
2972	rc = ef4_init_io(efx);
2973	if (rc)
2974		goto fail2;
2975
2976	rc = ef4_pci_probe_main(efx);
2977	if (rc)
2978		goto fail3;
2979
2980	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2981			      NETIF_F_RXCSUM);
2982	/* Mask for features that also apply to VLAN devices */
2983	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
2984				   NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
2985
2986	net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2987
2988	/* Disable VLAN filtering by default.  It may be enforced if
2989	 * the feature is fixed (i.e. VLAN filters are required to
2990	 * receive VLAN tagged packets due to vPort restrictions).
2991	 */
2992	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2993	net_dev->features |= efx->fixed_features;
2994
2995	rc = ef4_register_netdev(efx);
2996	if (rc)
2997		goto fail4;
2998
2999	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3000
3001	/* Try to create MTDs, but allow this to fail */
3002	rtnl_lock();
3003	rc = ef4_mtd_probe(efx);
3004	rtnl_unlock();
3005	if (rc && rc != -EPERM)
3006		netif_warn(efx, probe, efx->net_dev,
3007			   "failed to create MTDs (%d)\n", rc);
3008
3009	rc = pci_enable_pcie_error_reporting(pci_dev);
3010	if (rc && rc != -EINVAL)
3011		netif_notice(efx, probe, efx->net_dev,
3012			     "PCIE error reporting unavailable (%d).\n",
3013			     rc);
3014
3015	return 0;
3016
3017 fail4:
3018	ef4_pci_remove_main(efx);
3019 fail3:
3020	ef4_fini_io(efx);
3021 fail2:
3022	ef4_fini_struct(efx);
3023 fail1:
3024	WARN_ON(rc > 0);
3025	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3026	free_netdev(net_dev);
3027	return rc;
3028}
3029
3030static int ef4_pm_freeze(struct device *dev)
3031{
3032	struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3033
3034	rtnl_lock();
3035
3036	if (efx->state != STATE_DISABLED) {
3037		efx->state = STATE_UNINIT;
3038
3039		ef4_device_detach_sync(efx);
3040
3041		ef4_stop_all(efx);
3042		ef4_disable_interrupts(efx);
3043	}
3044
3045	rtnl_unlock();
3046
3047	return 0;
3048}
3049
3050static int ef4_pm_thaw(struct device *dev)
3051{
3052	int rc;
3053	struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3054
3055	rtnl_lock();
3056
3057	if (efx->state != STATE_DISABLED) {
3058		rc = ef4_enable_interrupts(efx);
3059		if (rc)
3060			goto fail;
3061
3062		mutex_lock(&efx->mac_lock);
3063		efx->phy_op->reconfigure(efx);
3064		mutex_unlock(&efx->mac_lock);
3065
3066		ef4_start_all(efx);
3067
3068		netif_device_attach(efx->net_dev);
3069
3070		efx->state = STATE_READY;
3071
3072		efx->type->resume_wol(efx);
3073	}
3074
3075	rtnl_unlock();
3076
3077	/* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
3078	queue_work(reset_workqueue, &efx->reset_work);
3079
3080	return 0;
3081
3082fail:
3083	rtnl_unlock();
3084
3085	return rc;
3086}
3087
3088static int ef4_pm_poweroff(struct device *dev)
3089{
3090	struct pci_dev *pci_dev = to_pci_dev(dev);
3091	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3092
3093	efx->type->fini(efx);
3094
3095	efx->reset_pending = 0;
3096
3097	pci_save_state(pci_dev);
3098	return pci_set_power_state(pci_dev, PCI_D3hot);
3099}
3100
3101/* Used for both resume and restore */
3102static int ef4_pm_resume(struct device *dev)
3103{
3104	struct pci_dev *pci_dev = to_pci_dev(dev);
3105	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3106	int rc;
3107
3108	rc = pci_set_power_state(pci_dev, PCI_D0);
3109	if (rc)
3110		return rc;
3111	pci_restore_state(pci_dev);
3112	rc = pci_enable_device(pci_dev);
3113	if (rc)
3114		return rc;
3115	pci_set_master(efx->pci_dev);
3116	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3117	if (rc)
3118		return rc;
3119	rc = efx->type->init(efx);
3120	if (rc)
3121		return rc;
3122	rc = ef4_pm_thaw(dev);
3123	return rc;
3124}
3125
3126static int ef4_pm_suspend(struct device *dev)
3127{
3128	int rc;
3129
3130	ef4_pm_freeze(dev);
3131	rc = ef4_pm_poweroff(dev);
3132	if (rc)
3133		ef4_pm_resume(dev);
3134	return rc;
3135}
3136
3137static const struct dev_pm_ops ef4_pm_ops = {
3138	.suspend	= ef4_pm_suspend,
3139	.resume		= ef4_pm_resume,
3140	.freeze		= ef4_pm_freeze,
3141	.thaw		= ef4_pm_thaw,
3142	.poweroff	= ef4_pm_poweroff,
3143	.restore	= ef4_pm_resume,
3144};
3145
3146/* A PCI error affecting this device was detected.
3147 * At this point MMIO and DMA may be disabled.
3148 * Stop the software path and request a slot reset.
3149 */
3150static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
3151					      enum pci_channel_state state)
3152{
3153	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3154	struct ef4_nic *efx = pci_get_drvdata(pdev);
3155
3156	if (state == pci_channel_io_perm_failure)
3157		return PCI_ERS_RESULT_DISCONNECT;
3158
3159	rtnl_lock();
3160
3161	if (efx->state != STATE_DISABLED) {
3162		efx->state = STATE_RECOVERY;
3163		efx->reset_pending = 0;
3164
3165		ef4_device_detach_sync(efx);
3166
3167		ef4_stop_all(efx);
3168		ef4_disable_interrupts(efx);
3169
3170		status = PCI_ERS_RESULT_NEED_RESET;
3171	} else {
3172		/* If the interface is disabled we don't want to do anything
3173		 * with it.
3174		 */
3175		status = PCI_ERS_RESULT_RECOVERED;
3176	}
3177
3178	rtnl_unlock();
3179
3180	pci_disable_device(pdev);
3181
3182	return status;
3183}
3184
3185/* Fake a successful reset, which will be performed later in ef4_io_resume. */
3186static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
3187{
3188	struct ef4_nic *efx = pci_get_drvdata(pdev);
3189	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3190	int rc;
3191
3192	if (pci_enable_device(pdev)) {
3193		netif_err(efx, hw, efx->net_dev,
3194			  "Cannot re-enable PCI device after reset.\n");
3195		status =  PCI_ERS_RESULT_DISCONNECT;
3196	}
3197
3198	rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3199	if (rc) {
3200		netif_err(efx, hw, efx->net_dev,
3201		"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3202		/* Non-fatal error. Continue. */
3203	}
3204
3205	return status;
3206}
3207
3208/* Perform the actual reset and resume I/O operations. */
3209static void ef4_io_resume(struct pci_dev *pdev)
3210{
3211	struct ef4_nic *efx = pci_get_drvdata(pdev);
3212	int rc;
3213
3214	rtnl_lock();
3215
3216	if (efx->state == STATE_DISABLED)
3217		goto out;
3218
3219	rc = ef4_reset(efx, RESET_TYPE_ALL);
3220	if (rc) {
3221		netif_err(efx, hw, efx->net_dev,
3222			  "ef4_reset failed after PCI error (%d)\n", rc);
3223	} else {
3224		efx->state = STATE_READY;
3225		netif_dbg(efx, hw, efx->net_dev,
3226			  "Done resetting and resuming IO after PCI error.\n");
3227	}
3228
3229out:
3230	rtnl_unlock();
3231}
3232
3233/* For simplicity and reliability, we always require a slot reset and try to
3234 * reset the hardware when a pci error affecting the device is detected.
3235 * We leave both the link_reset and mmio_enabled callback unimplemented:
3236 * with our request for slot reset the mmio_enabled callback will never be
3237 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3238 */
3239static const struct pci_error_handlers ef4_err_handlers = {
3240	.error_detected = ef4_io_error_detected,
3241	.slot_reset	= ef4_io_slot_reset,
3242	.resume		= ef4_io_resume,
3243};
3244
3245static struct pci_driver ef4_pci_driver = {
3246	.name		= KBUILD_MODNAME,
3247	.id_table	= ef4_pci_table,
3248	.probe		= ef4_pci_probe,
3249	.remove		= ef4_pci_remove,
3250	.driver.pm	= &ef4_pm_ops,
3251	.err_handler	= &ef4_err_handlers,
3252};
3253
3254/**************************************************************************
3255 *
3256 * Kernel module interface
3257 *
3258 *************************************************************************/
3259
3260module_param(interrupt_mode, uint, 0444);
3261MODULE_PARM_DESC(interrupt_mode,
3262		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3263
3264static int __init ef4_init_module(void)
3265{
3266	int rc;
3267
3268	printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
3269
3270	rc = register_netdevice_notifier(&ef4_netdev_notifier);
3271	if (rc)
3272		goto err_notifier;
3273
3274	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3275	if (!reset_workqueue) {
3276		rc = -ENOMEM;
3277		goto err_reset;
3278	}
3279
3280	rc = pci_register_driver(&ef4_pci_driver);
3281	if (rc < 0)
3282		goto err_pci;
3283
3284	return 0;
3285
3286 err_pci:
3287	destroy_workqueue(reset_workqueue);
3288 err_reset:
3289	unregister_netdevice_notifier(&ef4_netdev_notifier);
3290 err_notifier:
3291	return rc;
3292}
3293
3294static void __exit ef4_exit_module(void)
3295{
3296	printk(KERN_INFO "Solarflare Falcon driver unloading\n");
3297
3298	pci_unregister_driver(&ef4_pci_driver);
3299	destroy_workqueue(reset_workqueue);
3300	unregister_netdevice_notifier(&ef4_netdev_notifier);
3301
3302}
3303
3304module_init(ef4_init_module);
3305module_exit(ef4_exit_module);
3306
3307MODULE_AUTHOR("Solarflare Communications and "
3308	      "Michael Brown <mbrown@fensystems.co.uk>");
3309MODULE_DESCRIPTION("Solarflare Falcon network driver");
3310MODULE_LICENSE("GPL");
3311MODULE_DEVICE_TABLE(pci, ef4_pci_table);
3312MODULE_VERSION(EF4_DRIVER_VERSION);
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2005-2013 Solarflare Communications Inc.
 
 
 
 
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/delay.h>
  13#include <linux/notifier.h>
  14#include <linux/ip.h>
  15#include <linux/tcp.h>
  16#include <linux/in.h>
  17#include <linux/ethtool.h>
  18#include <linux/topology.h>
  19#include <linux/gfp.h>
 
  20#include <linux/interrupt.h>
  21#include "net_driver.h"
  22#include "efx.h"
  23#include "nic.h"
  24#include "selftest.h"
  25
  26#include "workarounds.h"
  27
  28/**************************************************************************
  29 *
  30 * Type name strings
  31 *
  32 **************************************************************************
  33 */
  34
  35/* Loopback mode names (see LOOPBACK_MODE()) */
  36const unsigned int ef4_loopback_mode_max = LOOPBACK_MAX;
  37const char *const ef4_loopback_mode_names[] = {
  38	[LOOPBACK_NONE]		= "NONE",
  39	[LOOPBACK_DATA]		= "DATAPATH",
  40	[LOOPBACK_GMAC]		= "GMAC",
  41	[LOOPBACK_XGMII]	= "XGMII",
  42	[LOOPBACK_XGXS]		= "XGXS",
  43	[LOOPBACK_XAUI]		= "XAUI",
  44	[LOOPBACK_GMII]		= "GMII",
  45	[LOOPBACK_SGMII]	= "SGMII",
  46	[LOOPBACK_XGBR]		= "XGBR",
  47	[LOOPBACK_XFI]		= "XFI",
  48	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
  49	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
  50	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
  51	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
  52	[LOOPBACK_GPHY]		= "GPHY",
  53	[LOOPBACK_PHYXS]	= "PHYXS",
  54	[LOOPBACK_PCS]		= "PCS",
  55	[LOOPBACK_PMAPMD]	= "PMA/PMD",
  56	[LOOPBACK_XPORT]	= "XPORT",
  57	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
  58	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
  59	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
  60	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
  61	[LOOPBACK_GMII_WS]	= "GMII_WS",
  62	[LOOPBACK_XFI_WS]	= "XFI_WS",
  63	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
  64	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
  65};
  66
  67const unsigned int ef4_reset_type_max = RESET_TYPE_MAX;
  68const char *const ef4_reset_type_names[] = {
  69	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
  70	[RESET_TYPE_ALL]                = "ALL",
  71	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
  72	[RESET_TYPE_WORLD]              = "WORLD",
  73	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
  74	[RESET_TYPE_DATAPATH]           = "DATAPATH",
  75	[RESET_TYPE_DISABLE]            = "DISABLE",
  76	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
  77	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
  78	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
  79	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
  80	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
  81};
  82
  83/* Reset workqueue. If any NIC has a hardware failure then a reset will be
  84 * queued onto this work queue. This is not a per-nic work queue, because
  85 * ef4_reset_work() acquires the rtnl lock, so resets are naturally serialised.
  86 */
  87static struct workqueue_struct *reset_workqueue;
  88
  89/* How often and how many times to poll for a reset while waiting for a
  90 * BIST that another function started to complete.
  91 */
  92#define BIST_WAIT_DELAY_MS	100
  93#define BIST_WAIT_DELAY_COUNT	100
  94
  95/**************************************************************************
  96 *
  97 * Configurable values
  98 *
  99 *************************************************************************/
 100
 101/*
 102 * Use separate channels for TX and RX events
 103 *
 104 * Set this to 1 to use separate channels for TX and RX. It allows us
 105 * to control interrupt affinity separately for TX and RX.
 106 *
 107 * This is only used in MSI-X interrupt mode
 108 */
 109bool ef4_separate_tx_channels;
 110module_param(ef4_separate_tx_channels, bool, 0444);
 111MODULE_PARM_DESC(ef4_separate_tx_channels,
 112		 "Use separate channels for TX and RX");
 113
 
 
 
 
 
 114/* This is the time (in jiffies) between invocations of the hardware
 115 * monitor.
 116 * On Falcon-based NICs, this will:
 117 * - Check the on-board hardware monitor;
 118 * - Poll the link state and reconfigure the hardware as necessary.
 119 * On Siena-based NICs for power systems with EEH support, this will give EEH a
 120 * chance to start.
 121 */
 122static unsigned int ef4_monitor_interval = 1 * HZ;
 123
 124/* Initial interrupt moderation settings.  They can be modified after
 125 * module load with ethtool.
 126 *
 127 * The default for RX should strike a balance between increasing the
 128 * round-trip latency and reducing overhead.
 129 */
 130static unsigned int rx_irq_mod_usec = 60;
 131
 132/* Initial interrupt moderation settings.  They can be modified after
 133 * module load with ethtool.
 134 *
 135 * This default is chosen to ensure that a 10G link does not go idle
 136 * while a TX queue is stopped after it has become full.  A queue is
 137 * restarted when it drops below half full.  The time this takes (assuming
 138 * worst case 3 descriptors per packet and 1024 descriptors) is
 139 *   512 / 3 * 1.2 = 205 usec.
 140 */
 141static unsigned int tx_irq_mod_usec = 150;
 142
 143/* This is the first interrupt mode to try out of:
 144 * 0 => MSI-X
 145 * 1 => MSI
 146 * 2 => legacy
 147 */
 148static unsigned int interrupt_mode;
 149
 150/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
 151 * i.e. the number of CPUs among which we may distribute simultaneous
 152 * interrupt handling.
 153 *
 154 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
 155 * The default (0) means to assign an interrupt to each core.
 156 */
 157static unsigned int rss_cpus;
 158module_param(rss_cpus, uint, 0444);
 159MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
 160
 161static bool phy_flash_cfg;
 162module_param(phy_flash_cfg, bool, 0644);
 163MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
 164
 165static unsigned irq_adapt_low_thresh = 8000;
 166module_param(irq_adapt_low_thresh, uint, 0644);
 167MODULE_PARM_DESC(irq_adapt_low_thresh,
 168		 "Threshold score for reducing IRQ moderation");
 169
 170static unsigned irq_adapt_high_thresh = 16000;
 171module_param(irq_adapt_high_thresh, uint, 0644);
 172MODULE_PARM_DESC(irq_adapt_high_thresh,
 173		 "Threshold score for increasing IRQ moderation");
 174
 175static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 176			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
 177			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
 178			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
 179module_param(debug, uint, 0);
 180MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 181
 182/**************************************************************************
 183 *
 184 * Utility functions and prototypes
 185 *
 186 *************************************************************************/
 187
 188static int ef4_soft_enable_interrupts(struct ef4_nic *efx);
 189static void ef4_soft_disable_interrupts(struct ef4_nic *efx);
 190static void ef4_remove_channel(struct ef4_channel *channel);
 191static void ef4_remove_channels(struct ef4_nic *efx);
 192static const struct ef4_channel_type ef4_default_channel_type;
 193static void ef4_remove_port(struct ef4_nic *efx);
 194static void ef4_init_napi_channel(struct ef4_channel *channel);
 195static void ef4_fini_napi(struct ef4_nic *efx);
 196static void ef4_fini_napi_channel(struct ef4_channel *channel);
 197static void ef4_fini_struct(struct ef4_nic *efx);
 198static void ef4_start_all(struct ef4_nic *efx);
 199static void ef4_stop_all(struct ef4_nic *efx);
 200
 201#define EF4_ASSERT_RESET_SERIALISED(efx)		\
 202	do {						\
 203		if ((efx->state == STATE_READY) ||	\
 204		    (efx->state == STATE_RECOVERY) ||	\
 205		    (efx->state == STATE_DISABLED))	\
 206			ASSERT_RTNL();			\
 207	} while (0)
 208
 209static int ef4_check_disabled(struct ef4_nic *efx)
 210{
 211	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
 212		netif_err(efx, drv, efx->net_dev,
 213			  "device is disabled due to earlier errors\n");
 214		return -EIO;
 215	}
 216	return 0;
 217}
 218
 219/**************************************************************************
 220 *
 221 * Event queue processing
 222 *
 223 *************************************************************************/
 224
 225/* Process channel's event queue
 226 *
 227 * This function is responsible for processing the event queue of a
 228 * single channel.  The caller must guarantee that this function will
 229 * never be concurrently called more than once on the same channel,
 230 * though different channels may be being processed concurrently.
 231 */
 232static int ef4_process_channel(struct ef4_channel *channel, int budget)
 233{
 234	struct ef4_tx_queue *tx_queue;
 235	int spent;
 236
 237	if (unlikely(!channel->enabled))
 238		return 0;
 239
 240	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 241		tx_queue->pkts_compl = 0;
 242		tx_queue->bytes_compl = 0;
 243	}
 244
 245	spent = ef4_nic_process_eventq(channel, budget);
 246	if (spent && ef4_channel_has_rx_queue(channel)) {
 247		struct ef4_rx_queue *rx_queue =
 248			ef4_channel_get_rx_queue(channel);
 249
 250		ef4_rx_flush_packet(channel);
 251		ef4_fast_push_rx_descriptors(rx_queue, true);
 252	}
 253
 254	/* Update BQL */
 255	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 256		if (tx_queue->bytes_compl) {
 257			netdev_tx_completed_queue(tx_queue->core_txq,
 258				tx_queue->pkts_compl, tx_queue->bytes_compl);
 259		}
 260	}
 261
 262	return spent;
 263}
 264
 265/* NAPI poll handler
 266 *
 267 * NAPI guarantees serialisation of polls of the same device, which
 268 * provides the guarantee required by ef4_process_channel().
 269 */
 270static void ef4_update_irq_mod(struct ef4_nic *efx, struct ef4_channel *channel)
 271{
 272	int step = efx->irq_mod_step_us;
 273
 274	if (channel->irq_mod_score < irq_adapt_low_thresh) {
 275		if (channel->irq_moderation_us > step) {
 276			channel->irq_moderation_us -= step;
 277			efx->type->push_irq_moderation(channel);
 278		}
 279	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
 280		if (channel->irq_moderation_us <
 281		    efx->irq_rx_moderation_us) {
 282			channel->irq_moderation_us += step;
 283			efx->type->push_irq_moderation(channel);
 284		}
 285	}
 286
 287	channel->irq_count = 0;
 288	channel->irq_mod_score = 0;
 289}
 290
 291static int ef4_poll(struct napi_struct *napi, int budget)
 292{
 293	struct ef4_channel *channel =
 294		container_of(napi, struct ef4_channel, napi_str);
 295	struct ef4_nic *efx = channel->efx;
 296	int spent;
 297
 298	netif_vdbg(efx, intr, efx->net_dev,
 299		   "channel %d NAPI poll executing on CPU %d\n",
 300		   channel->channel, raw_smp_processor_id());
 301
 302	spent = ef4_process_channel(channel, budget);
 303
 304	if (spent < budget) {
 305		if (ef4_channel_has_rx_queue(channel) &&
 306		    efx->irq_rx_adaptive &&
 307		    unlikely(++channel->irq_count == 1000)) {
 308			ef4_update_irq_mod(efx, channel);
 309		}
 310
 311		ef4_filter_rfs_expire(channel);
 312
 313		/* There is no race here; although napi_disable() will
 314		 * only wait for napi_complete(), this isn't a problem
 315		 * since ef4_nic_eventq_read_ack() will have no effect if
 316		 * interrupts have already been disabled.
 317		 */
 318		napi_complete_done(napi, spent);
 319		ef4_nic_eventq_read_ack(channel);
 320	}
 321
 322	return spent;
 323}
 324
 325/* Create event queue
 326 * Event queue memory allocations are done only once.  If the channel
 327 * is reset, the memory buffer will be reused; this guards against
 328 * errors during channel reset and also simplifies interrupt handling.
 329 */
 330static int ef4_probe_eventq(struct ef4_channel *channel)
 331{
 332	struct ef4_nic *efx = channel->efx;
 333	unsigned long entries;
 334
 335	netif_dbg(efx, probe, efx->net_dev,
 336		  "chan %d create event queue\n", channel->channel);
 337
 338	/* Build an event queue with room for one event per tx and rx buffer,
 339	 * plus some extra for link state events and MCDI completions. */
 340	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
 341	EF4_BUG_ON_PARANOID(entries > EF4_MAX_EVQ_SIZE);
 342	channel->eventq_mask = max(entries, EF4_MIN_EVQ_SIZE) - 1;
 343
 344	return ef4_nic_probe_eventq(channel);
 345}
 346
 347/* Prepare channel's event queue */
 348static int ef4_init_eventq(struct ef4_channel *channel)
 349{
 350	struct ef4_nic *efx = channel->efx;
 351	int rc;
 352
 353	EF4_WARN_ON_PARANOID(channel->eventq_init);
 354
 355	netif_dbg(efx, drv, efx->net_dev,
 356		  "chan %d init event queue\n", channel->channel);
 357
 358	rc = ef4_nic_init_eventq(channel);
 359	if (rc == 0) {
 360		efx->type->push_irq_moderation(channel);
 361		channel->eventq_read_ptr = 0;
 362		channel->eventq_init = true;
 363	}
 364	return rc;
 365}
 366
 367/* Enable event queue processing and NAPI */
 368void ef4_start_eventq(struct ef4_channel *channel)
 369{
 370	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
 371		  "chan %d start event queue\n", channel->channel);
 372
 373	/* Make sure the NAPI handler sees the enabled flag set */
 374	channel->enabled = true;
 375	smp_wmb();
 376
 377	napi_enable(&channel->napi_str);
 378	ef4_nic_eventq_read_ack(channel);
 379}
 380
 381/* Disable event queue processing and NAPI */
 382void ef4_stop_eventq(struct ef4_channel *channel)
 383{
 384	if (!channel->enabled)
 385		return;
 386
 387	napi_disable(&channel->napi_str);
 388	channel->enabled = false;
 389}
 390
 391static void ef4_fini_eventq(struct ef4_channel *channel)
 392{
 393	if (!channel->eventq_init)
 394		return;
 395
 396	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 397		  "chan %d fini event queue\n", channel->channel);
 398
 399	ef4_nic_fini_eventq(channel);
 400	channel->eventq_init = false;
 401}
 402
 403static void ef4_remove_eventq(struct ef4_channel *channel)
 404{
 405	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 406		  "chan %d remove event queue\n", channel->channel);
 407
 408	ef4_nic_remove_eventq(channel);
 409}
 410
 411/**************************************************************************
 412 *
 413 * Channel handling
 414 *
 415 *************************************************************************/
 416
 417/* Allocate and initialise a channel structure. */
 418static struct ef4_channel *
 419ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel)
 420{
 421	struct ef4_channel *channel;
 422	struct ef4_rx_queue *rx_queue;
 423	struct ef4_tx_queue *tx_queue;
 424	int j;
 425
 426	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
 427	if (!channel)
 428		return NULL;
 429
 430	channel->efx = efx;
 431	channel->channel = i;
 432	channel->type = &ef4_default_channel_type;
 433
 434	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 435		tx_queue = &channel->tx_queue[j];
 436		tx_queue->efx = efx;
 437		tx_queue->queue = i * EF4_TXQ_TYPES + j;
 438		tx_queue->channel = channel;
 439	}
 440
 441	rx_queue = &channel->rx_queue;
 442	rx_queue->efx = efx;
 443	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 444
 445	return channel;
 446}
 447
 448/* Allocate and initialise a channel structure, copying parameters
 449 * (but not resources) from an old channel structure.
 450 */
 451static struct ef4_channel *
 452ef4_copy_channel(const struct ef4_channel *old_channel)
 453{
 454	struct ef4_channel *channel;
 455	struct ef4_rx_queue *rx_queue;
 456	struct ef4_tx_queue *tx_queue;
 457	int j;
 458
 459	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
 460	if (!channel)
 461		return NULL;
 462
 463	*channel = *old_channel;
 464
 465	channel->napi_dev = NULL;
 466	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
 467	channel->napi_str.napi_id = 0;
 468	channel->napi_str.state = 0;
 469	memset(&channel->eventq, 0, sizeof(channel->eventq));
 470
 471	for (j = 0; j < EF4_TXQ_TYPES; j++) {
 472		tx_queue = &channel->tx_queue[j];
 473		if (tx_queue->channel)
 474			tx_queue->channel = channel;
 475		tx_queue->buffer = NULL;
 476		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
 477	}
 478
 479	rx_queue = &channel->rx_queue;
 480	rx_queue->buffer = NULL;
 481	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
 482	timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0);
 483
 484	return channel;
 485}
 486
 487static int ef4_probe_channel(struct ef4_channel *channel)
 488{
 489	struct ef4_tx_queue *tx_queue;
 490	struct ef4_rx_queue *rx_queue;
 491	int rc;
 492
 493	netif_dbg(channel->efx, probe, channel->efx->net_dev,
 494		  "creating channel %d\n", channel->channel);
 495
 496	rc = channel->type->pre_probe(channel);
 497	if (rc)
 498		goto fail;
 499
 500	rc = ef4_probe_eventq(channel);
 501	if (rc)
 502		goto fail;
 503
 504	ef4_for_each_channel_tx_queue(tx_queue, channel) {
 505		rc = ef4_probe_tx_queue(tx_queue);
 506		if (rc)
 507			goto fail;
 508	}
 509
 510	ef4_for_each_channel_rx_queue(rx_queue, channel) {
 511		rc = ef4_probe_rx_queue(rx_queue);
 512		if (rc)
 513			goto fail;
 514	}
 515
 516	return 0;
 517
 518fail:
 519	ef4_remove_channel(channel);
 520	return rc;
 521}
 522
 523static void
 524ef4_get_channel_name(struct ef4_channel *channel, char *buf, size_t len)
 525{
 526	struct ef4_nic *efx = channel->efx;
 527	const char *type;
 528	int number;
 529
 530	number = channel->channel;
 531	if (efx->tx_channel_offset == 0) {
 532		type = "";
 533	} else if (channel->channel < efx->tx_channel_offset) {
 534		type = "-rx";
 535	} else {
 536		type = "-tx";
 537		number -= efx->tx_channel_offset;
 538	}
 539	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
 540}
 541
 542static void ef4_set_channel_names(struct ef4_nic *efx)
 543{
 544	struct ef4_channel *channel;
 545
 546	ef4_for_each_channel(channel, efx)
 547		channel->type->get_name(channel,
 548					efx->msi_context[channel->channel].name,
 549					sizeof(efx->msi_context[0].name));
 550}
 551
 552static int ef4_probe_channels(struct ef4_nic *efx)
 553{
 554	struct ef4_channel *channel;
 555	int rc;
 556
 557	/* Restart special buffer allocation */
 558	efx->next_buffer_table = 0;
 559
 560	/* Probe channels in reverse, so that any 'extra' channels
 561	 * use the start of the buffer table. This allows the traffic
 562	 * channels to be resized without moving them or wasting the
 563	 * entries before them.
 564	 */
 565	ef4_for_each_channel_rev(channel, efx) {
 566		rc = ef4_probe_channel(channel);
 567		if (rc) {
 568			netif_err(efx, probe, efx->net_dev,
 569				  "failed to create channel %d\n",
 570				  channel->channel);
 571			goto fail;
 572		}
 573	}
 574	ef4_set_channel_names(efx);
 575
 576	return 0;
 577
 578fail:
 579	ef4_remove_channels(efx);
 580	return rc;
 581}
 582
 583/* Channels are shutdown and reinitialised whilst the NIC is running
 584 * to propagate configuration changes (mtu, checksum offload), or
 585 * to clear hardware error conditions
 586 */
 587static void ef4_start_datapath(struct ef4_nic *efx)
 588{
 589	netdev_features_t old_features = efx->net_dev->features;
 590	bool old_rx_scatter = efx->rx_scatter;
 591	struct ef4_tx_queue *tx_queue;
 592	struct ef4_rx_queue *rx_queue;
 593	struct ef4_channel *channel;
 594	size_t rx_buf_len;
 595
 596	/* Calculate the rx buffer allocation parameters required to
 597	 * support the current MTU, including padding for header
 598	 * alignment and overruns.
 599	 */
 600	efx->rx_dma_len = (efx->rx_prefix_size +
 601			   EF4_MAX_FRAME_LEN(efx->net_dev->mtu) +
 602			   efx->type->rx_buffer_padding);
 603	rx_buf_len = (sizeof(struct ef4_rx_page_state) +
 604		      efx->rx_ip_align + efx->rx_dma_len);
 605	if (rx_buf_len <= PAGE_SIZE) {
 606		efx->rx_scatter = efx->type->always_rx_scatter;
 607		efx->rx_buffer_order = 0;
 608	} else if (efx->type->can_rx_scatter) {
 609		BUILD_BUG_ON(EF4_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
 610		BUILD_BUG_ON(sizeof(struct ef4_rx_page_state) +
 611			     2 * ALIGN(NET_IP_ALIGN + EF4_RX_USR_BUF_SIZE,
 612				       EF4_RX_BUF_ALIGNMENT) >
 613			     PAGE_SIZE);
 614		efx->rx_scatter = true;
 615		efx->rx_dma_len = EF4_RX_USR_BUF_SIZE;
 616		efx->rx_buffer_order = 0;
 617	} else {
 618		efx->rx_scatter = false;
 619		efx->rx_buffer_order = get_order(rx_buf_len);
 620	}
 621
 622	ef4_rx_config_page_split(efx);
 623	if (efx->rx_buffer_order)
 624		netif_dbg(efx, drv, efx->net_dev,
 625			  "RX buf len=%u; page order=%u batch=%u\n",
 626			  efx->rx_dma_len, efx->rx_buffer_order,
 627			  efx->rx_pages_per_batch);
 628	else
 629		netif_dbg(efx, drv, efx->net_dev,
 630			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
 631			  efx->rx_dma_len, efx->rx_page_buf_step,
 632			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
 633
 634	/* Restore previously fixed features in hw_features and remove
 635	 * features which are fixed now
 636	 */
 637	efx->net_dev->hw_features |= efx->net_dev->features;
 638	efx->net_dev->hw_features &= ~efx->fixed_features;
 639	efx->net_dev->features |= efx->fixed_features;
 640	if (efx->net_dev->features != old_features)
 641		netdev_features_change(efx->net_dev);
 642
 643	/* RX filters may also have scatter-enabled flags */
 644	if (efx->rx_scatter != old_rx_scatter)
 645		efx->type->filter_update_rx_scatter(efx);
 646
 647	/* We must keep at least one descriptor in a TX ring empty.
 648	 * We could avoid this when the queue size does not exactly
 649	 * match the hardware ring size, but it's not that important.
 650	 * Therefore we stop the queue when one more skb might fill
 651	 * the ring completely.  We wake it when half way back to
 652	 * empty.
 653	 */
 654	efx->txq_stop_thresh = efx->txq_entries - ef4_tx_max_skb_descs(efx);
 655	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
 656
 657	/* Initialise the channels */
 658	ef4_for_each_channel(channel, efx) {
 659		ef4_for_each_channel_tx_queue(tx_queue, channel) {
 660			ef4_init_tx_queue(tx_queue);
 661			atomic_inc(&efx->active_queues);
 662		}
 663
 664		ef4_for_each_channel_rx_queue(rx_queue, channel) {
 665			ef4_init_rx_queue(rx_queue);
 666			atomic_inc(&efx->active_queues);
 667			ef4_stop_eventq(channel);
 668			ef4_fast_push_rx_descriptors(rx_queue, false);
 669			ef4_start_eventq(channel);
 670		}
 671
 672		WARN_ON(channel->rx_pkt_n_frags);
 673	}
 674
 675	if (netif_device_present(efx->net_dev))
 676		netif_tx_wake_all_queues(efx->net_dev);
 677}
 678
 679static void ef4_stop_datapath(struct ef4_nic *efx)
 680{
 681	struct ef4_channel *channel;
 682	struct ef4_tx_queue *tx_queue;
 683	struct ef4_rx_queue *rx_queue;
 684	int rc;
 685
 686	EF4_ASSERT_RESET_SERIALISED(efx);
 687	BUG_ON(efx->port_enabled);
 688
 689	/* Stop RX refill */
 690	ef4_for_each_channel(channel, efx) {
 691		ef4_for_each_channel_rx_queue(rx_queue, channel)
 692			rx_queue->refill_enabled = false;
 693	}
 694
 695	ef4_for_each_channel(channel, efx) {
 696		/* RX packet processing is pipelined, so wait for the
 697		 * NAPI handler to complete.  At least event queue 0
 698		 * might be kept active by non-data events, so don't
 699		 * use napi_synchronize() but actually disable NAPI
 700		 * temporarily.
 701		 */
 702		if (ef4_channel_has_rx_queue(channel)) {
 703			ef4_stop_eventq(channel);
 704			ef4_start_eventq(channel);
 705		}
 706	}
 707
 708	rc = efx->type->fini_dmaq(efx);
 709	if (rc && EF4_WORKAROUND_7803(efx)) {
 710		/* Schedule a reset to recover from the flush failure. The
 711		 * descriptor caches reference memory we're about to free,
 712		 * but falcon_reconfigure_mac_wrapper() won't reconnect
 713		 * the MACs because of the pending reset.
 714		 */
 715		netif_err(efx, drv, efx->net_dev,
 716			  "Resetting to recover from flush failure\n");
 717		ef4_schedule_reset(efx, RESET_TYPE_ALL);
 718	} else if (rc) {
 719		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
 720	} else {
 721		netif_dbg(efx, drv, efx->net_dev,
 722			  "successfully flushed all queues\n");
 723	}
 724
 725	ef4_for_each_channel(channel, efx) {
 726		ef4_for_each_channel_rx_queue(rx_queue, channel)
 727			ef4_fini_rx_queue(rx_queue);
 728		ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 729			ef4_fini_tx_queue(tx_queue);
 730	}
 731}
 732
 733static void ef4_remove_channel(struct ef4_channel *channel)
 734{
 735	struct ef4_tx_queue *tx_queue;
 736	struct ef4_rx_queue *rx_queue;
 737
 738	netif_dbg(channel->efx, drv, channel->efx->net_dev,
 739		  "destroy chan %d\n", channel->channel);
 740
 741	ef4_for_each_channel_rx_queue(rx_queue, channel)
 742		ef4_remove_rx_queue(rx_queue);
 743	ef4_for_each_possible_channel_tx_queue(tx_queue, channel)
 744		ef4_remove_tx_queue(tx_queue);
 745	ef4_remove_eventq(channel);
 746	channel->type->post_remove(channel);
 747}
 748
 749static void ef4_remove_channels(struct ef4_nic *efx)
 750{
 751	struct ef4_channel *channel;
 752
 753	ef4_for_each_channel(channel, efx)
 754		ef4_remove_channel(channel);
 755}
 756
 757int
 758ef4_realloc_channels(struct ef4_nic *efx, u32 rxq_entries, u32 txq_entries)
 759{
 760	struct ef4_channel *other_channel[EF4_MAX_CHANNELS], *channel;
 761	u32 old_rxq_entries, old_txq_entries;
 762	unsigned i, next_buffer_table = 0;
 763	int rc, rc2;
 764
 765	rc = ef4_check_disabled(efx);
 766	if (rc)
 767		return rc;
 768
 769	/* Not all channels should be reallocated. We must avoid
 770	 * reallocating their buffer table entries.
 771	 */
 772	ef4_for_each_channel(channel, efx) {
 773		struct ef4_rx_queue *rx_queue;
 774		struct ef4_tx_queue *tx_queue;
 775
 776		if (channel->type->copy)
 777			continue;
 778		next_buffer_table = max(next_buffer_table,
 779					channel->eventq.index +
 780					channel->eventq.entries);
 781		ef4_for_each_channel_rx_queue(rx_queue, channel)
 782			next_buffer_table = max(next_buffer_table,
 783						rx_queue->rxd.index +
 784						rx_queue->rxd.entries);
 785		ef4_for_each_channel_tx_queue(tx_queue, channel)
 786			next_buffer_table = max(next_buffer_table,
 787						tx_queue->txd.index +
 788						tx_queue->txd.entries);
 789	}
 790
 791	ef4_device_detach_sync(efx);
 792	ef4_stop_all(efx);
 793	ef4_soft_disable_interrupts(efx);
 794
 795	/* Clone channels (where possible) */
 796	memset(other_channel, 0, sizeof(other_channel));
 797	for (i = 0; i < efx->n_channels; i++) {
 798		channel = efx->channel[i];
 799		if (channel->type->copy)
 800			channel = channel->type->copy(channel);
 801		if (!channel) {
 802			rc = -ENOMEM;
 803			goto out;
 804		}
 805		other_channel[i] = channel;
 806	}
 807
 808	/* Swap entry counts and channel pointers */
 809	old_rxq_entries = efx->rxq_entries;
 810	old_txq_entries = efx->txq_entries;
 811	efx->rxq_entries = rxq_entries;
 812	efx->txq_entries = txq_entries;
 813	for (i = 0; i < efx->n_channels; i++) {
 814		swap(efx->channel[i], other_channel[i]);
 
 
 815	}
 816
 817	/* Restart buffer table allocation */
 818	efx->next_buffer_table = next_buffer_table;
 819
 820	for (i = 0; i < efx->n_channels; i++) {
 821		channel = efx->channel[i];
 822		if (!channel->type->copy)
 823			continue;
 824		rc = ef4_probe_channel(channel);
 825		if (rc)
 826			goto rollback;
 827		ef4_init_napi_channel(efx->channel[i]);
 828	}
 829
 830out:
 831	/* Destroy unused channel structures */
 832	for (i = 0; i < efx->n_channels; i++) {
 833		channel = other_channel[i];
 834		if (channel && channel->type->copy) {
 835			ef4_fini_napi_channel(channel);
 836			ef4_remove_channel(channel);
 837			kfree(channel);
 838		}
 839	}
 840
 841	rc2 = ef4_soft_enable_interrupts(efx);
 842	if (rc2) {
 843		rc = rc ? rc : rc2;
 844		netif_err(efx, drv, efx->net_dev,
 845			  "unable to restart interrupts on channel reallocation\n");
 846		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
 847	} else {
 848		ef4_start_all(efx);
 849		netif_device_attach(efx->net_dev);
 850	}
 851	return rc;
 852
 853rollback:
 854	/* Swap back */
 855	efx->rxq_entries = old_rxq_entries;
 856	efx->txq_entries = old_txq_entries;
 857	for (i = 0; i < efx->n_channels; i++) {
 858		swap(efx->channel[i], other_channel[i]);
 
 
 859	}
 860	goto out;
 861}
 862
 863void ef4_schedule_slow_fill(struct ef4_rx_queue *rx_queue)
 864{
 865	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
 866}
 867
 868static const struct ef4_channel_type ef4_default_channel_type = {
 869	.pre_probe		= ef4_channel_dummy_op_int,
 870	.post_remove		= ef4_channel_dummy_op_void,
 871	.get_name		= ef4_get_channel_name,
 872	.copy			= ef4_copy_channel,
 873	.keep_eventq		= false,
 874};
 875
 876int ef4_channel_dummy_op_int(struct ef4_channel *channel)
 877{
 878	return 0;
 879}
 880
 881void ef4_channel_dummy_op_void(struct ef4_channel *channel)
 882{
 883}
 884
 885/**************************************************************************
 886 *
 887 * Port handling
 888 *
 889 **************************************************************************/
 890
 891/* This ensures that the kernel is kept informed (via
 892 * netif_carrier_on/off) of the link status, and also maintains the
 893 * link status's stop on the port's TX queue.
 894 */
 895void ef4_link_status_changed(struct ef4_nic *efx)
 896{
 897	struct ef4_link_state *link_state = &efx->link_state;
 898
 899	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
 900	 * that no events are triggered between unregister_netdev() and the
 901	 * driver unloading. A more general condition is that NETDEV_CHANGE
 902	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
 903	if (!netif_running(efx->net_dev))
 904		return;
 905
 906	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
 907		efx->n_link_state_changes++;
 908
 909		if (link_state->up)
 910			netif_carrier_on(efx->net_dev);
 911		else
 912			netif_carrier_off(efx->net_dev);
 913	}
 914
 915	/* Status message for kernel log */
 916	if (link_state->up)
 917		netif_info(efx, link, efx->net_dev,
 918			   "link up at %uMbps %s-duplex (MTU %d)\n",
 919			   link_state->speed, link_state->fd ? "full" : "half",
 920			   efx->net_dev->mtu);
 921	else
 922		netif_info(efx, link, efx->net_dev, "link down\n");
 923}
 924
 925void ef4_link_set_advertising(struct ef4_nic *efx, u32 advertising)
 926{
 927	efx->link_advertising = advertising;
 928	if (advertising) {
 929		if (advertising & ADVERTISED_Pause)
 930			efx->wanted_fc |= (EF4_FC_TX | EF4_FC_RX);
 931		else
 932			efx->wanted_fc &= ~(EF4_FC_TX | EF4_FC_RX);
 933		if (advertising & ADVERTISED_Asym_Pause)
 934			efx->wanted_fc ^= EF4_FC_TX;
 935	}
 936}
 937
 938void ef4_link_set_wanted_fc(struct ef4_nic *efx, u8 wanted_fc)
 939{
 940	efx->wanted_fc = wanted_fc;
 941	if (efx->link_advertising) {
 942		if (wanted_fc & EF4_FC_RX)
 943			efx->link_advertising |= (ADVERTISED_Pause |
 944						  ADVERTISED_Asym_Pause);
 945		else
 946			efx->link_advertising &= ~(ADVERTISED_Pause |
 947						   ADVERTISED_Asym_Pause);
 948		if (wanted_fc & EF4_FC_TX)
 949			efx->link_advertising ^= ADVERTISED_Asym_Pause;
 950	}
 951}
 952
 953static void ef4_fini_port(struct ef4_nic *efx);
 954
 955/* We assume that efx->type->reconfigure_mac will always try to sync RX
 956 * filters and therefore needs to read-lock the filter table against freeing
 957 */
 958void ef4_mac_reconfigure(struct ef4_nic *efx)
 959{
 960	down_read(&efx->filter_sem);
 961	efx->type->reconfigure_mac(efx);
 962	up_read(&efx->filter_sem);
 963}
 964
 965/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
 966 * the MAC appropriately. All other PHY configuration changes are pushed
 967 * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
 968 * through ef4_monitor().
 969 *
 970 * Callers must hold the mac_lock
 971 */
 972int __ef4_reconfigure_port(struct ef4_nic *efx)
 973{
 974	enum ef4_phy_mode phy_mode;
 975	int rc;
 976
 977	WARN_ON(!mutex_is_locked(&efx->mac_lock));
 978
 979	/* Disable PHY transmit in mac level loopbacks */
 980	phy_mode = efx->phy_mode;
 981	if (LOOPBACK_INTERNAL(efx))
 982		efx->phy_mode |= PHY_MODE_TX_DISABLED;
 983	else
 984		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
 985
 986	rc = efx->type->reconfigure_port(efx);
 987
 988	if (rc)
 989		efx->phy_mode = phy_mode;
 990
 991	return rc;
 992}
 993
 994/* Reinitialise the MAC to pick up new PHY settings, even if the port is
 995 * disabled. */
 996int ef4_reconfigure_port(struct ef4_nic *efx)
 997{
 998	int rc;
 999
1000	EF4_ASSERT_RESET_SERIALISED(efx);
1001
1002	mutex_lock(&efx->mac_lock);
1003	rc = __ef4_reconfigure_port(efx);
1004	mutex_unlock(&efx->mac_lock);
1005
1006	return rc;
1007}
1008
1009/* Asynchronous work item for changing MAC promiscuity and multicast
1010 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1011 * MAC directly. */
1012static void ef4_mac_work(struct work_struct *data)
1013{
1014	struct ef4_nic *efx = container_of(data, struct ef4_nic, mac_work);
1015
1016	mutex_lock(&efx->mac_lock);
1017	if (efx->port_enabled)
1018		ef4_mac_reconfigure(efx);
1019	mutex_unlock(&efx->mac_lock);
1020}
1021
1022static int ef4_probe_port(struct ef4_nic *efx)
1023{
1024	int rc;
1025
1026	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1027
1028	if (phy_flash_cfg)
1029		efx->phy_mode = PHY_MODE_SPECIAL;
1030
1031	/* Connect up MAC/PHY operations table */
1032	rc = efx->type->probe_port(efx);
1033	if (rc)
1034		return rc;
1035
1036	/* Initialise MAC address to permanent address */
1037	eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
1038
1039	return 0;
1040}
1041
1042static int ef4_init_port(struct ef4_nic *efx)
1043{
1044	int rc;
1045
1046	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1047
1048	mutex_lock(&efx->mac_lock);
1049
1050	rc = efx->phy_op->init(efx);
1051	if (rc)
1052		goto fail1;
1053
1054	efx->port_initialized = true;
1055
1056	/* Reconfigure the MAC before creating dma queues (required for
1057	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1058	ef4_mac_reconfigure(efx);
1059
1060	/* Ensure the PHY advertises the correct flow control settings */
1061	rc = efx->phy_op->reconfigure(efx);
1062	if (rc && rc != -EPERM)
1063		goto fail2;
1064
1065	mutex_unlock(&efx->mac_lock);
1066	return 0;
1067
1068fail2:
1069	efx->phy_op->fini(efx);
1070fail1:
1071	mutex_unlock(&efx->mac_lock);
1072	return rc;
1073}
1074
1075static void ef4_start_port(struct ef4_nic *efx)
1076{
1077	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1078	BUG_ON(efx->port_enabled);
1079
1080	mutex_lock(&efx->mac_lock);
1081	efx->port_enabled = true;
1082
1083	/* Ensure MAC ingress/egress is enabled */
1084	ef4_mac_reconfigure(efx);
1085
1086	mutex_unlock(&efx->mac_lock);
1087}
1088
1089/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1090 * and the async self-test, wait for them to finish and prevent them
1091 * being scheduled again.  This doesn't cover online resets, which
1092 * should only be cancelled when removing the device.
1093 */
1094static void ef4_stop_port(struct ef4_nic *efx)
1095{
1096	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1097
1098	EF4_ASSERT_RESET_SERIALISED(efx);
1099
1100	mutex_lock(&efx->mac_lock);
1101	efx->port_enabled = false;
1102	mutex_unlock(&efx->mac_lock);
1103
1104	/* Serialise against ef4_set_multicast_list() */
1105	netif_addr_lock_bh(efx->net_dev);
1106	netif_addr_unlock_bh(efx->net_dev);
1107
1108	cancel_delayed_work_sync(&efx->monitor_work);
1109	ef4_selftest_async_cancel(efx);
1110	cancel_work_sync(&efx->mac_work);
1111}
1112
1113static void ef4_fini_port(struct ef4_nic *efx)
1114{
1115	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1116
1117	if (!efx->port_initialized)
1118		return;
1119
1120	efx->phy_op->fini(efx);
1121	efx->port_initialized = false;
1122
1123	efx->link_state.up = false;
1124	ef4_link_status_changed(efx);
1125}
1126
1127static void ef4_remove_port(struct ef4_nic *efx)
1128{
1129	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1130
1131	efx->type->remove_port(efx);
1132}
1133
1134/**************************************************************************
1135 *
1136 * NIC handling
1137 *
1138 **************************************************************************/
1139
1140static LIST_HEAD(ef4_primary_list);
1141static LIST_HEAD(ef4_unassociated_list);
1142
1143static bool ef4_same_controller(struct ef4_nic *left, struct ef4_nic *right)
1144{
1145	return left->type == right->type &&
1146		left->vpd_sn && right->vpd_sn &&
1147		!strcmp(left->vpd_sn, right->vpd_sn);
1148}
1149
1150static void ef4_associate(struct ef4_nic *efx)
1151{
1152	struct ef4_nic *other, *next;
1153
1154	if (efx->primary == efx) {
1155		/* Adding primary function; look for secondaries */
1156
1157		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1158		list_add_tail(&efx->node, &ef4_primary_list);
1159
1160		list_for_each_entry_safe(other, next, &ef4_unassociated_list,
1161					 node) {
1162			if (ef4_same_controller(efx, other)) {
1163				list_del(&other->node);
1164				netif_dbg(other, probe, other->net_dev,
1165					  "moving to secondary list of %s %s\n",
1166					  pci_name(efx->pci_dev),
1167					  efx->net_dev->name);
1168				list_add_tail(&other->node,
1169					      &efx->secondary_list);
1170				other->primary = efx;
1171			}
1172		}
1173	} else {
1174		/* Adding secondary function; look for primary */
1175
1176		list_for_each_entry(other, &ef4_primary_list, node) {
1177			if (ef4_same_controller(efx, other)) {
1178				netif_dbg(efx, probe, efx->net_dev,
1179					  "adding to secondary list of %s %s\n",
1180					  pci_name(other->pci_dev),
1181					  other->net_dev->name);
1182				list_add_tail(&efx->node,
1183					      &other->secondary_list);
1184				efx->primary = other;
1185				return;
1186			}
1187		}
1188
1189		netif_dbg(efx, probe, efx->net_dev,
1190			  "adding to unassociated list\n");
1191		list_add_tail(&efx->node, &ef4_unassociated_list);
1192	}
1193}
1194
1195static void ef4_dissociate(struct ef4_nic *efx)
1196{
1197	struct ef4_nic *other, *next;
1198
1199	list_del(&efx->node);
1200	efx->primary = NULL;
1201
1202	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1203		list_del(&other->node);
1204		netif_dbg(other, probe, other->net_dev,
1205			  "moving to unassociated list\n");
1206		list_add_tail(&other->node, &ef4_unassociated_list);
1207		other->primary = NULL;
1208	}
1209}
1210
1211/* This configures the PCI device to enable I/O and DMA. */
1212static int ef4_init_io(struct ef4_nic *efx)
1213{
1214	struct pci_dev *pci_dev = efx->pci_dev;
1215	dma_addr_t dma_mask = efx->type->max_dma_mask;
1216	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1217	int rc, bar;
1218
1219	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1220
1221	bar = efx->type->mem_bar;
1222
1223	rc = pci_enable_device(pci_dev);
1224	if (rc) {
1225		netif_err(efx, probe, efx->net_dev,
1226			  "failed to enable PCI device\n");
1227		goto fail1;
1228	}
1229
1230	pci_set_master(pci_dev);
1231
1232	/* Set the PCI DMA mask.  Try all possibilities from our genuine mask
1233	 * down to 32 bits, because some architectures will allow 40 bit
 
1234	 * masks event though they reject 46 bit masks.
1235	 */
1236	while (dma_mask > 0x7fffffffUL) {
1237		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1238		if (rc == 0)
1239			break;
1240		dma_mask >>= 1;
1241	}
1242	if (rc) {
1243		netif_err(efx, probe, efx->net_dev,
1244			  "could not find a suitable DMA mask\n");
1245		goto fail2;
1246	}
1247	netif_dbg(efx, probe, efx->net_dev,
1248		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1249
1250	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1251	rc = pci_request_region(pci_dev, bar, "sfc");
1252	if (rc) {
1253		netif_err(efx, probe, efx->net_dev,
1254			  "request for memory BAR failed\n");
1255		rc = -EIO;
1256		goto fail3;
1257	}
1258	efx->membase = ioremap(efx->membase_phys, mem_map_size);
1259	if (!efx->membase) {
1260		netif_err(efx, probe, efx->net_dev,
1261			  "could not map memory BAR at %llx+%x\n",
1262			  (unsigned long long)efx->membase_phys, mem_map_size);
1263		rc = -ENOMEM;
1264		goto fail4;
1265	}
1266	netif_dbg(efx, probe, efx->net_dev,
1267		  "memory BAR at %llx+%x (virtual %p)\n",
1268		  (unsigned long long)efx->membase_phys, mem_map_size,
1269		  efx->membase);
1270
1271	return 0;
1272
1273 fail4:
1274	pci_release_region(efx->pci_dev, bar);
1275 fail3:
1276	efx->membase_phys = 0;
1277 fail2:
1278	pci_disable_device(efx->pci_dev);
1279 fail1:
1280	return rc;
1281}
1282
1283static void ef4_fini_io(struct ef4_nic *efx)
1284{
1285	int bar;
1286
1287	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1288
1289	if (efx->membase) {
1290		iounmap(efx->membase);
1291		efx->membase = NULL;
1292	}
1293
1294	if (efx->membase_phys) {
1295		bar = efx->type->mem_bar;
1296		pci_release_region(efx->pci_dev, bar);
1297		efx->membase_phys = 0;
1298	}
1299
1300	/* Don't disable bus-mastering if VFs are assigned */
1301	if (!pci_vfs_assigned(efx->pci_dev))
1302		pci_disable_device(efx->pci_dev);
1303}
1304
1305void ef4_set_default_rx_indir_table(struct ef4_nic *efx)
1306{
1307	size_t i;
1308
1309	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1310		efx->rx_indir_table[i] =
1311			ethtool_rxfh_indir_default(i, efx->rss_spread);
1312}
1313
1314static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx)
1315{
1316	cpumask_var_t thread_mask;
1317	unsigned int count;
1318	int cpu;
1319
1320	if (rss_cpus) {
1321		count = rss_cpus;
1322	} else {
1323		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1324			netif_warn(efx, probe, efx->net_dev,
1325				   "RSS disabled due to allocation failure\n");
1326			return 1;
1327		}
1328
1329		count = 0;
1330		for_each_online_cpu(cpu) {
1331			if (!cpumask_test_cpu(cpu, thread_mask)) {
1332				++count;
1333				cpumask_or(thread_mask, thread_mask,
1334					   topology_sibling_cpumask(cpu));
1335			}
1336		}
1337
1338		free_cpumask_var(thread_mask);
1339	}
1340
1341	if (count > EF4_MAX_RX_QUEUES) {
1342		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1343			       "Reducing number of rx queues from %u to %u.\n",
1344			       count, EF4_MAX_RX_QUEUES);
1345		count = EF4_MAX_RX_QUEUES;
1346	}
1347
1348	return count;
1349}
1350
1351/* Probe the number and type of interrupts we are able to obtain, and
1352 * the resulting numbers of channels and RX queues.
1353 */
1354static int ef4_probe_interrupts(struct ef4_nic *efx)
1355{
1356	unsigned int extra_channels = 0;
1357	unsigned int i, j;
1358	int rc;
1359
1360	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++)
1361		if (efx->extra_channel_type[i])
1362			++extra_channels;
1363
1364	if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
1365		struct msix_entry xentries[EF4_MAX_CHANNELS];
1366		unsigned int n_channels;
1367
1368		n_channels = ef4_wanted_parallelism(efx);
1369		if (ef4_separate_tx_channels)
1370			n_channels *= 2;
1371		n_channels += extra_channels;
1372		n_channels = min(n_channels, efx->max_channels);
1373
1374		for (i = 0; i < n_channels; i++)
1375			xentries[i].entry = i;
1376		rc = pci_enable_msix_range(efx->pci_dev,
1377					   xentries, 1, n_channels);
1378		if (rc < 0) {
1379			/* Fall back to single channel MSI */
1380			efx->interrupt_mode = EF4_INT_MODE_MSI;
1381			netif_err(efx, drv, efx->net_dev,
1382				  "could not enable MSI-X\n");
1383		} else if (rc < n_channels) {
1384			netif_err(efx, drv, efx->net_dev,
1385				  "WARNING: Insufficient MSI-X vectors"
1386				  " available (%d < %u).\n", rc, n_channels);
1387			netif_err(efx, drv, efx->net_dev,
1388				  "WARNING: Performance may be reduced.\n");
1389			n_channels = rc;
1390		}
1391
1392		if (rc > 0) {
1393			efx->n_channels = n_channels;
1394			if (n_channels > extra_channels)
1395				n_channels -= extra_channels;
1396			if (ef4_separate_tx_channels) {
1397				efx->n_tx_channels = min(max(n_channels / 2,
1398							     1U),
1399							 efx->max_tx_channels);
1400				efx->n_rx_channels = max(n_channels -
1401							 efx->n_tx_channels,
1402							 1U);
1403			} else {
1404				efx->n_tx_channels = min(n_channels,
1405							 efx->max_tx_channels);
1406				efx->n_rx_channels = n_channels;
1407			}
1408			for (i = 0; i < efx->n_channels; i++)
1409				ef4_get_channel(efx, i)->irq =
1410					xentries[i].vector;
1411		}
1412	}
1413
1414	/* Try single interrupt MSI */
1415	if (efx->interrupt_mode == EF4_INT_MODE_MSI) {
1416		efx->n_channels = 1;
1417		efx->n_rx_channels = 1;
1418		efx->n_tx_channels = 1;
1419		rc = pci_enable_msi(efx->pci_dev);
1420		if (rc == 0) {
1421			ef4_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1422		} else {
1423			netif_err(efx, drv, efx->net_dev,
1424				  "could not enable MSI\n");
1425			efx->interrupt_mode = EF4_INT_MODE_LEGACY;
1426		}
1427	}
1428
1429	/* Assume legacy interrupts */
1430	if (efx->interrupt_mode == EF4_INT_MODE_LEGACY) {
1431		efx->n_channels = 1 + (ef4_separate_tx_channels ? 1 : 0);
1432		efx->n_rx_channels = 1;
1433		efx->n_tx_channels = 1;
1434		efx->legacy_irq = efx->pci_dev->irq;
1435	}
1436
1437	/* Assign extra channels if possible */
1438	j = efx->n_channels;
1439	for (i = 0; i < EF4_MAX_EXTRA_CHANNELS; i++) {
1440		if (!efx->extra_channel_type[i])
1441			continue;
1442		if (efx->interrupt_mode != EF4_INT_MODE_MSIX ||
1443		    efx->n_channels <= extra_channels) {
1444			efx->extra_channel_type[i]->handle_no_channel(efx);
1445		} else {
1446			--j;
1447			ef4_get_channel(efx, j)->type =
1448				efx->extra_channel_type[i];
1449		}
1450	}
1451
1452	efx->rss_spread = efx->n_rx_channels;
1453
1454	return 0;
1455}
1456
1457static int ef4_soft_enable_interrupts(struct ef4_nic *efx)
1458{
1459	struct ef4_channel *channel, *end_channel;
1460	int rc;
1461
1462	BUG_ON(efx->state == STATE_DISABLED);
1463
1464	efx->irq_soft_enabled = true;
1465	smp_wmb();
1466
1467	ef4_for_each_channel(channel, efx) {
1468		if (!channel->type->keep_eventq) {
1469			rc = ef4_init_eventq(channel);
1470			if (rc)
1471				goto fail;
1472		}
1473		ef4_start_eventq(channel);
1474	}
1475
1476	return 0;
1477fail:
1478	end_channel = channel;
1479	ef4_for_each_channel(channel, efx) {
1480		if (channel == end_channel)
1481			break;
1482		ef4_stop_eventq(channel);
1483		if (!channel->type->keep_eventq)
1484			ef4_fini_eventq(channel);
1485	}
1486
1487	return rc;
1488}
1489
1490static void ef4_soft_disable_interrupts(struct ef4_nic *efx)
1491{
1492	struct ef4_channel *channel;
1493
1494	if (efx->state == STATE_DISABLED)
1495		return;
1496
1497	efx->irq_soft_enabled = false;
1498	smp_wmb();
1499
1500	if (efx->legacy_irq)
1501		synchronize_irq(efx->legacy_irq);
1502
1503	ef4_for_each_channel(channel, efx) {
1504		if (channel->irq)
1505			synchronize_irq(channel->irq);
1506
1507		ef4_stop_eventq(channel);
1508		if (!channel->type->keep_eventq)
1509			ef4_fini_eventq(channel);
1510	}
1511}
1512
1513static int ef4_enable_interrupts(struct ef4_nic *efx)
1514{
1515	struct ef4_channel *channel, *end_channel;
1516	int rc;
1517
1518	BUG_ON(efx->state == STATE_DISABLED);
1519
1520	if (efx->eeh_disabled_legacy_irq) {
1521		enable_irq(efx->legacy_irq);
1522		efx->eeh_disabled_legacy_irq = false;
1523	}
1524
1525	efx->type->irq_enable_master(efx);
1526
1527	ef4_for_each_channel(channel, efx) {
1528		if (channel->type->keep_eventq) {
1529			rc = ef4_init_eventq(channel);
1530			if (rc)
1531				goto fail;
1532		}
1533	}
1534
1535	rc = ef4_soft_enable_interrupts(efx);
1536	if (rc)
1537		goto fail;
1538
1539	return 0;
1540
1541fail:
1542	end_channel = channel;
1543	ef4_for_each_channel(channel, efx) {
1544		if (channel == end_channel)
1545			break;
1546		if (channel->type->keep_eventq)
1547			ef4_fini_eventq(channel);
1548	}
1549
1550	efx->type->irq_disable_non_ev(efx);
1551
1552	return rc;
1553}
1554
1555static void ef4_disable_interrupts(struct ef4_nic *efx)
1556{
1557	struct ef4_channel *channel;
1558
1559	ef4_soft_disable_interrupts(efx);
1560
1561	ef4_for_each_channel(channel, efx) {
1562		if (channel->type->keep_eventq)
1563			ef4_fini_eventq(channel);
1564	}
1565
1566	efx->type->irq_disable_non_ev(efx);
1567}
1568
1569static void ef4_remove_interrupts(struct ef4_nic *efx)
1570{
1571	struct ef4_channel *channel;
1572
1573	/* Remove MSI/MSI-X interrupts */
1574	ef4_for_each_channel(channel, efx)
1575		channel->irq = 0;
1576	pci_disable_msi(efx->pci_dev);
1577	pci_disable_msix(efx->pci_dev);
1578
1579	/* Remove legacy interrupt */
1580	efx->legacy_irq = 0;
1581}
1582
1583static void ef4_set_channels(struct ef4_nic *efx)
1584{
1585	struct ef4_channel *channel;
1586	struct ef4_tx_queue *tx_queue;
1587
1588	efx->tx_channel_offset =
1589		ef4_separate_tx_channels ?
1590		efx->n_channels - efx->n_tx_channels : 0;
1591
1592	/* We need to mark which channels really have RX and TX
1593	 * queues, and adjust the TX queue numbers if we have separate
1594	 * RX-only and TX-only channels.
1595	 */
1596	ef4_for_each_channel(channel, efx) {
1597		if (channel->channel < efx->n_rx_channels)
1598			channel->rx_queue.core_index = channel->channel;
1599		else
1600			channel->rx_queue.core_index = -1;
1601
1602		ef4_for_each_channel_tx_queue(tx_queue, channel)
1603			tx_queue->queue -= (efx->tx_channel_offset *
1604					    EF4_TXQ_TYPES);
1605	}
1606}
1607
1608static int ef4_probe_nic(struct ef4_nic *efx)
1609{
1610	int rc;
1611
1612	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1613
1614	/* Carry out hardware-type specific initialisation */
1615	rc = efx->type->probe(efx);
1616	if (rc)
1617		return rc;
1618
1619	do {
1620		if (!efx->max_channels || !efx->max_tx_channels) {
1621			netif_err(efx, drv, efx->net_dev,
1622				  "Insufficient resources to allocate"
1623				  " any channels\n");
1624			rc = -ENOSPC;
1625			goto fail1;
1626		}
1627
1628		/* Determine the number of channels and queues by trying
1629		 * to hook in MSI-X interrupts.
1630		 */
1631		rc = ef4_probe_interrupts(efx);
1632		if (rc)
1633			goto fail1;
1634
1635		ef4_set_channels(efx);
1636
1637		/* dimension_resources can fail with EAGAIN */
1638		rc = efx->type->dimension_resources(efx);
1639		if (rc != 0 && rc != -EAGAIN)
1640			goto fail2;
1641
1642		if (rc == -EAGAIN)
1643			/* try again with new max_channels */
1644			ef4_remove_interrupts(efx);
1645
1646	} while (rc == -EAGAIN);
1647
1648	if (efx->n_channels > 1)
1649		netdev_rss_key_fill(&efx->rx_hash_key,
1650				    sizeof(efx->rx_hash_key));
1651	ef4_set_default_rx_indir_table(efx);
1652
1653	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1654	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1655
1656	/* Initialise the interrupt moderation settings */
1657	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1658	ef4_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1659				true);
1660
1661	return 0;
1662
1663fail2:
1664	ef4_remove_interrupts(efx);
1665fail1:
1666	efx->type->remove(efx);
1667	return rc;
1668}
1669
1670static void ef4_remove_nic(struct ef4_nic *efx)
1671{
1672	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1673
1674	ef4_remove_interrupts(efx);
1675	efx->type->remove(efx);
1676}
1677
1678static int ef4_probe_filters(struct ef4_nic *efx)
1679{
1680	int rc;
1681
1682	spin_lock_init(&efx->filter_lock);
1683	init_rwsem(&efx->filter_sem);
1684	mutex_lock(&efx->mac_lock);
1685	down_write(&efx->filter_sem);
1686	rc = efx->type->filter_table_probe(efx);
1687	if (rc)
1688		goto out_unlock;
1689
1690#ifdef CONFIG_RFS_ACCEL
1691	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1692		struct ef4_channel *channel;
1693		int i, success = 1;
1694
1695		ef4_for_each_channel(channel, efx) {
1696			channel->rps_flow_id =
1697				kcalloc(efx->type->max_rx_ip_filters,
1698					sizeof(*channel->rps_flow_id),
1699					GFP_KERNEL);
1700			if (!channel->rps_flow_id)
1701				success = 0;
1702			else
1703				for (i = 0;
1704				     i < efx->type->max_rx_ip_filters;
1705				     ++i)
1706					channel->rps_flow_id[i] =
1707						RPS_FLOW_ID_INVALID;
1708		}
1709
1710		if (!success) {
1711			ef4_for_each_channel(channel, efx)
1712				kfree(channel->rps_flow_id);
1713			efx->type->filter_table_remove(efx);
1714			rc = -ENOMEM;
1715			goto out_unlock;
1716		}
1717
1718		efx->rps_expire_index = efx->rps_expire_channel = 0;
1719	}
1720#endif
1721out_unlock:
1722	up_write(&efx->filter_sem);
1723	mutex_unlock(&efx->mac_lock);
1724	return rc;
1725}
1726
1727static void ef4_remove_filters(struct ef4_nic *efx)
1728{
1729#ifdef CONFIG_RFS_ACCEL
1730	struct ef4_channel *channel;
1731
1732	ef4_for_each_channel(channel, efx)
1733		kfree(channel->rps_flow_id);
1734#endif
1735	down_write(&efx->filter_sem);
1736	efx->type->filter_table_remove(efx);
1737	up_write(&efx->filter_sem);
1738}
1739
1740static void ef4_restore_filters(struct ef4_nic *efx)
1741{
1742	down_read(&efx->filter_sem);
1743	efx->type->filter_table_restore(efx);
1744	up_read(&efx->filter_sem);
1745}
1746
1747/**************************************************************************
1748 *
1749 * NIC startup/shutdown
1750 *
1751 *************************************************************************/
1752
1753static int ef4_probe_all(struct ef4_nic *efx)
1754{
1755	int rc;
1756
1757	rc = ef4_probe_nic(efx);
1758	if (rc) {
1759		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1760		goto fail1;
1761	}
1762
1763	rc = ef4_probe_port(efx);
1764	if (rc) {
1765		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1766		goto fail2;
1767	}
1768
1769	BUILD_BUG_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_RXQ_MIN_ENT);
1770	if (WARN_ON(EF4_DEFAULT_DMAQ_SIZE < EF4_TXQ_MIN_ENT(efx))) {
1771		rc = -EINVAL;
1772		goto fail3;
1773	}
1774	efx->rxq_entries = efx->txq_entries = EF4_DEFAULT_DMAQ_SIZE;
1775
1776	rc = ef4_probe_filters(efx);
1777	if (rc) {
1778		netif_err(efx, probe, efx->net_dev,
1779			  "failed to create filter tables\n");
1780		goto fail4;
1781	}
1782
1783	rc = ef4_probe_channels(efx);
1784	if (rc)
1785		goto fail5;
1786
1787	return 0;
1788
1789 fail5:
1790	ef4_remove_filters(efx);
1791 fail4:
1792 fail3:
1793	ef4_remove_port(efx);
1794 fail2:
1795	ef4_remove_nic(efx);
1796 fail1:
1797	return rc;
1798}
1799
1800/* If the interface is supposed to be running but is not, start
1801 * the hardware and software data path, regular activity for the port
1802 * (MAC statistics, link polling, etc.) and schedule the port to be
1803 * reconfigured.  Interrupts must already be enabled.  This function
1804 * is safe to call multiple times, so long as the NIC is not disabled.
1805 * Requires the RTNL lock.
1806 */
1807static void ef4_start_all(struct ef4_nic *efx)
1808{
1809	EF4_ASSERT_RESET_SERIALISED(efx);
1810	BUG_ON(efx->state == STATE_DISABLED);
1811
1812	/* Check that it is appropriate to restart the interface. All
1813	 * of these flags are safe to read under just the rtnl lock */
1814	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1815	    efx->reset_pending)
1816		return;
1817
1818	ef4_start_port(efx);
1819	ef4_start_datapath(efx);
1820
1821	/* Start the hardware monitor if there is one */
1822	if (efx->type->monitor != NULL)
1823		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1824				   ef4_monitor_interval);
1825
1826	efx->type->start_stats(efx);
1827	efx->type->pull_stats(efx);
1828	spin_lock_bh(&efx->stats_lock);
1829	efx->type->update_stats(efx, NULL, NULL);
1830	spin_unlock_bh(&efx->stats_lock);
1831}
1832
1833/* Quiesce the hardware and software data path, and regular activity
1834 * for the port without bringing the link down.  Safe to call multiple
1835 * times with the NIC in almost any state, but interrupts should be
1836 * enabled.  Requires the RTNL lock.
1837 */
1838static void ef4_stop_all(struct ef4_nic *efx)
1839{
1840	EF4_ASSERT_RESET_SERIALISED(efx);
1841
1842	/* port_enabled can be read safely under the rtnl lock */
1843	if (!efx->port_enabled)
1844		return;
1845
1846	/* update stats before we go down so we can accurately count
1847	 * rx_nodesc_drops
1848	 */
1849	efx->type->pull_stats(efx);
1850	spin_lock_bh(&efx->stats_lock);
1851	efx->type->update_stats(efx, NULL, NULL);
1852	spin_unlock_bh(&efx->stats_lock);
1853	efx->type->stop_stats(efx);
1854	ef4_stop_port(efx);
1855
1856	/* Stop the kernel transmit interface.  This is only valid if
1857	 * the device is stopped or detached; otherwise the watchdog
1858	 * may fire immediately.
1859	 */
1860	WARN_ON(netif_running(efx->net_dev) &&
1861		netif_device_present(efx->net_dev));
1862	netif_tx_disable(efx->net_dev);
1863
1864	ef4_stop_datapath(efx);
1865}
1866
1867static void ef4_remove_all(struct ef4_nic *efx)
1868{
1869	ef4_remove_channels(efx);
1870	ef4_remove_filters(efx);
1871	ef4_remove_port(efx);
1872	ef4_remove_nic(efx);
1873}
1874
1875/**************************************************************************
1876 *
1877 * Interrupt moderation
1878 *
1879 **************************************************************************/
1880unsigned int ef4_usecs_to_ticks(struct ef4_nic *efx, unsigned int usecs)
1881{
1882	if (usecs == 0)
1883		return 0;
1884	if (usecs * 1000 < efx->timer_quantum_ns)
1885		return 1; /* never round down to 0 */
1886	return usecs * 1000 / efx->timer_quantum_ns;
1887}
1888
1889unsigned int ef4_ticks_to_usecs(struct ef4_nic *efx, unsigned int ticks)
1890{
1891	/* We must round up when converting ticks to microseconds
1892	 * because we round down when converting the other way.
1893	 */
1894	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
1895}
1896
1897/* Set interrupt moderation parameters */
1898int ef4_init_irq_moderation(struct ef4_nic *efx, unsigned int tx_usecs,
1899			    unsigned int rx_usecs, bool rx_adaptive,
1900			    bool rx_may_override_tx)
1901{
1902	struct ef4_channel *channel;
1903	unsigned int timer_max_us;
1904
1905	EF4_ASSERT_RESET_SERIALISED(efx);
1906
1907	timer_max_us = efx->timer_max_ns / 1000;
1908
1909	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
1910		return -EINVAL;
1911
1912	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
1913	    !rx_may_override_tx) {
1914		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1915			  "RX and TX IRQ moderation must be equal\n");
1916		return -EINVAL;
1917	}
1918
1919	efx->irq_rx_adaptive = rx_adaptive;
1920	efx->irq_rx_moderation_us = rx_usecs;
1921	ef4_for_each_channel(channel, efx) {
1922		if (ef4_channel_has_rx_queue(channel))
1923			channel->irq_moderation_us = rx_usecs;
1924		else if (ef4_channel_has_tx_queues(channel))
1925			channel->irq_moderation_us = tx_usecs;
1926	}
1927
1928	return 0;
1929}
1930
1931void ef4_get_irq_moderation(struct ef4_nic *efx, unsigned int *tx_usecs,
1932			    unsigned int *rx_usecs, bool *rx_adaptive)
1933{
1934	*rx_adaptive = efx->irq_rx_adaptive;
1935	*rx_usecs = efx->irq_rx_moderation_us;
1936
1937	/* If channels are shared between RX and TX, so is IRQ
1938	 * moderation.  Otherwise, IRQ moderation is the same for all
1939	 * TX channels and is not adaptive.
1940	 */
1941	if (efx->tx_channel_offset == 0) {
1942		*tx_usecs = *rx_usecs;
1943	} else {
1944		struct ef4_channel *tx_channel;
1945
1946		tx_channel = efx->channel[efx->tx_channel_offset];
1947		*tx_usecs = tx_channel->irq_moderation_us;
1948	}
1949}
1950
1951/**************************************************************************
1952 *
1953 * Hardware monitor
1954 *
1955 **************************************************************************/
1956
1957/* Run periodically off the general workqueue */
1958static void ef4_monitor(struct work_struct *data)
1959{
1960	struct ef4_nic *efx = container_of(data, struct ef4_nic,
1961					   monitor_work.work);
1962
1963	netif_vdbg(efx, timer, efx->net_dev,
1964		   "hardware monitor executing on CPU %d\n",
1965		   raw_smp_processor_id());
1966	BUG_ON(efx->type->monitor == NULL);
1967
1968	/* If the mac_lock is already held then it is likely a port
1969	 * reconfiguration is already in place, which will likely do
1970	 * most of the work of monitor() anyway. */
1971	if (mutex_trylock(&efx->mac_lock)) {
1972		if (efx->port_enabled)
1973			efx->type->monitor(efx);
1974		mutex_unlock(&efx->mac_lock);
1975	}
1976
1977	queue_delayed_work(efx->workqueue, &efx->monitor_work,
1978			   ef4_monitor_interval);
1979}
1980
1981/**************************************************************************
1982 *
1983 * ioctls
1984 *
1985 *************************************************************************/
1986
1987/* Net device ioctl
1988 * Context: process, rtnl_lock() held.
1989 */
1990static int ef4_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1991{
1992	struct ef4_nic *efx = netdev_priv(net_dev);
1993	struct mii_ioctl_data *data = if_mii(ifr);
1994
1995	/* Convert phy_id from older PRTAD/DEVAD format */
1996	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
1997	    (data->phy_id & 0xfc00) == 0x0400)
1998		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
1999
2000	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2001}
2002
2003/**************************************************************************
2004 *
2005 * NAPI interface
2006 *
2007 **************************************************************************/
2008
2009static void ef4_init_napi_channel(struct ef4_channel *channel)
2010{
2011	struct ef4_nic *efx = channel->efx;
2012
2013	channel->napi_dev = efx->net_dev;
2014	netif_napi_add(channel->napi_dev, &channel->napi_str, ef4_poll);
 
2015}
2016
2017static void ef4_init_napi(struct ef4_nic *efx)
2018{
2019	struct ef4_channel *channel;
2020
2021	ef4_for_each_channel(channel, efx)
2022		ef4_init_napi_channel(channel);
2023}
2024
2025static void ef4_fini_napi_channel(struct ef4_channel *channel)
2026{
2027	if (channel->napi_dev)
2028		netif_napi_del(&channel->napi_str);
2029
2030	channel->napi_dev = NULL;
2031}
2032
2033static void ef4_fini_napi(struct ef4_nic *efx)
2034{
2035	struct ef4_channel *channel;
2036
2037	ef4_for_each_channel(channel, efx)
2038		ef4_fini_napi_channel(channel);
2039}
2040
2041/**************************************************************************
2042 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2043 * Kernel net device interface
2044 *
2045 *************************************************************************/
2046
2047/* Context: process, rtnl_lock() held. */
2048int ef4_net_open(struct net_device *net_dev)
2049{
2050	struct ef4_nic *efx = netdev_priv(net_dev);
2051	int rc;
2052
2053	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2054		  raw_smp_processor_id());
2055
2056	rc = ef4_check_disabled(efx);
2057	if (rc)
2058		return rc;
2059	if (efx->phy_mode & PHY_MODE_SPECIAL)
2060		return -EBUSY;
2061
2062	/* Notify the kernel of the link state polled during driver load,
2063	 * before the monitor starts running */
2064	ef4_link_status_changed(efx);
2065
2066	ef4_start_all(efx);
2067	ef4_selftest_async_start(efx);
2068	return 0;
2069}
2070
2071/* Context: process, rtnl_lock() held.
2072 * Note that the kernel will ignore our return code; this method
2073 * should really be a void.
2074 */
2075int ef4_net_stop(struct net_device *net_dev)
2076{
2077	struct ef4_nic *efx = netdev_priv(net_dev);
2078
2079	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2080		  raw_smp_processor_id());
2081
2082	/* Stop the device and flush all the channels */
2083	ef4_stop_all(efx);
2084
2085	return 0;
2086}
2087
2088/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2089static void ef4_net_stats(struct net_device *net_dev,
2090			  struct rtnl_link_stats64 *stats)
2091{
2092	struct ef4_nic *efx = netdev_priv(net_dev);
2093
2094	spin_lock_bh(&efx->stats_lock);
2095	efx->type->update_stats(efx, NULL, stats);
2096	spin_unlock_bh(&efx->stats_lock);
2097}
2098
2099/* Context: netif_tx_lock held, BHs disabled. */
2100static void ef4_watchdog(struct net_device *net_dev, unsigned int txqueue)
2101{
2102	struct ef4_nic *efx = netdev_priv(net_dev);
2103
2104	netif_err(efx, tx_err, efx->net_dev,
2105		  "TX stuck with port_enabled=%d: resetting channels\n",
2106		  efx->port_enabled);
2107
2108	ef4_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2109}
2110
2111
2112/* Context: process, rtnl_lock() held. */
2113static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
2114{
2115	struct ef4_nic *efx = netdev_priv(net_dev);
2116	int rc;
2117
2118	rc = ef4_check_disabled(efx);
2119	if (rc)
2120		return rc;
2121
2122	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2123
2124	ef4_device_detach_sync(efx);
2125	ef4_stop_all(efx);
2126
2127	mutex_lock(&efx->mac_lock);
2128	net_dev->mtu = new_mtu;
2129	ef4_mac_reconfigure(efx);
2130	mutex_unlock(&efx->mac_lock);
2131
2132	ef4_start_all(efx);
2133	netif_device_attach(efx->net_dev);
2134	return 0;
2135}
2136
2137static int ef4_set_mac_address(struct net_device *net_dev, void *data)
2138{
2139	struct ef4_nic *efx = netdev_priv(net_dev);
2140	struct sockaddr *addr = data;
2141	u8 *new_addr = addr->sa_data;
2142	u8 old_addr[6];
2143	int rc;
2144
2145	if (!is_valid_ether_addr(new_addr)) {
2146		netif_err(efx, drv, efx->net_dev,
2147			  "invalid ethernet MAC address requested: %pM\n",
2148			  new_addr);
2149		return -EADDRNOTAVAIL;
2150	}
2151
2152	/* save old address */
2153	ether_addr_copy(old_addr, net_dev->dev_addr);
2154	eth_hw_addr_set(net_dev, new_addr);
2155	if (efx->type->set_mac_address) {
2156		rc = efx->type->set_mac_address(efx);
2157		if (rc) {
2158			eth_hw_addr_set(net_dev, old_addr);
2159			return rc;
2160		}
2161	}
2162
2163	/* Reconfigure the MAC */
2164	mutex_lock(&efx->mac_lock);
2165	ef4_mac_reconfigure(efx);
2166	mutex_unlock(&efx->mac_lock);
2167
2168	return 0;
2169}
2170
2171/* Context: netif_addr_lock held, BHs disabled. */
2172static void ef4_set_rx_mode(struct net_device *net_dev)
2173{
2174	struct ef4_nic *efx = netdev_priv(net_dev);
2175
2176	if (efx->port_enabled)
2177		queue_work(efx->workqueue, &efx->mac_work);
2178	/* Otherwise ef4_start_port() will do this */
2179}
2180
2181static int ef4_set_features(struct net_device *net_dev, netdev_features_t data)
2182{
2183	struct ef4_nic *efx = netdev_priv(net_dev);
2184	int rc;
2185
2186	/* If disabling RX n-tuple filtering, clear existing filters */
2187	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2188		rc = efx->type->filter_clear_rx(efx, EF4_FILTER_PRI_MANUAL);
2189		if (rc)
2190			return rc;
2191	}
2192
2193	/* If Rx VLAN filter is changed, update filters via mac_reconfigure */
2194	if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
2195		/* ef4_set_rx_mode() will schedule MAC work to update filters
2196		 * when a new features are finally set in net_dev.
2197		 */
2198		ef4_set_rx_mode(net_dev);
2199	}
2200
2201	return 0;
2202}
2203
2204static const struct net_device_ops ef4_netdev_ops = {
2205	.ndo_open		= ef4_net_open,
2206	.ndo_stop		= ef4_net_stop,
2207	.ndo_get_stats64	= ef4_net_stats,
2208	.ndo_tx_timeout		= ef4_watchdog,
2209	.ndo_start_xmit		= ef4_hard_start_xmit,
2210	.ndo_validate_addr	= eth_validate_addr,
2211	.ndo_eth_ioctl		= ef4_ioctl,
2212	.ndo_change_mtu		= ef4_change_mtu,
2213	.ndo_set_mac_address	= ef4_set_mac_address,
2214	.ndo_set_rx_mode	= ef4_set_rx_mode,
2215	.ndo_set_features	= ef4_set_features,
 
 
 
2216	.ndo_setup_tc		= ef4_setup_tc,
2217#ifdef CONFIG_RFS_ACCEL
2218	.ndo_rx_flow_steer	= ef4_filter_rfs,
2219#endif
2220};
2221
2222static void ef4_update_name(struct ef4_nic *efx)
2223{
2224	strcpy(efx->name, efx->net_dev->name);
2225	ef4_mtd_rename(efx);
2226	ef4_set_channel_names(efx);
2227}
2228
2229static int ef4_netdev_event(struct notifier_block *this,
2230			    unsigned long event, void *ptr)
2231{
2232	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2233
2234	if ((net_dev->netdev_ops == &ef4_netdev_ops) &&
2235	    event == NETDEV_CHANGENAME)
2236		ef4_update_name(netdev_priv(net_dev));
2237
2238	return NOTIFY_DONE;
2239}
2240
2241static struct notifier_block ef4_netdev_notifier = {
2242	.notifier_call = ef4_netdev_event,
2243};
2244
2245static ssize_t
2246phy_type_show(struct device *dev, struct device_attribute *attr, char *buf)
2247{
2248	struct ef4_nic *efx = dev_get_drvdata(dev);
2249	return sprintf(buf, "%d\n", efx->phy_type);
2250}
2251static DEVICE_ATTR_RO(phy_type);
2252
2253static int ef4_register_netdev(struct ef4_nic *efx)
2254{
2255	struct net_device *net_dev = efx->net_dev;
2256	struct ef4_channel *channel;
2257	int rc;
2258
2259	net_dev->watchdog_timeo = 5 * HZ;
2260	net_dev->irq = efx->pci_dev->irq;
2261	net_dev->netdev_ops = &ef4_netdev_ops;
2262	net_dev->ethtool_ops = &ef4_ethtool_ops;
2263	netif_set_tso_max_segs(net_dev, EF4_TSO_MAX_SEGS);
2264	net_dev->min_mtu = EF4_MIN_MTU;
2265	net_dev->max_mtu = EF4_MAX_MTU;
2266
2267	rtnl_lock();
2268
2269	/* Enable resets to be scheduled and check whether any were
2270	 * already requested.  If so, the NIC is probably hosed so we
2271	 * abort.
2272	 */
2273	efx->state = STATE_READY;
2274	smp_mb(); /* ensure we change state before checking reset_pending */
2275	if (efx->reset_pending) {
2276		netif_err(efx, probe, efx->net_dev,
2277			  "aborting probe due to scheduled reset\n");
2278		rc = -EIO;
2279		goto fail_locked;
2280	}
2281
2282	rc = dev_alloc_name(net_dev, net_dev->name);
2283	if (rc < 0)
2284		goto fail_locked;
2285	ef4_update_name(efx);
2286
2287	/* Always start with carrier off; PHY events will detect the link */
2288	netif_carrier_off(net_dev);
2289
2290	rc = register_netdevice(net_dev);
2291	if (rc)
2292		goto fail_locked;
2293
2294	ef4_for_each_channel(channel, efx) {
2295		struct ef4_tx_queue *tx_queue;
2296		ef4_for_each_channel_tx_queue(tx_queue, channel)
2297			ef4_init_tx_queue_core_txq(tx_queue);
2298	}
2299
2300	ef4_associate(efx);
2301
2302	rtnl_unlock();
2303
2304	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2305	if (rc) {
2306		netif_err(efx, drv, efx->net_dev,
2307			  "failed to init net dev attributes\n");
2308		goto fail_registered;
2309	}
2310	return 0;
2311
2312fail_registered:
2313	rtnl_lock();
2314	ef4_dissociate(efx);
2315	unregister_netdevice(net_dev);
2316fail_locked:
2317	efx->state = STATE_UNINIT;
2318	rtnl_unlock();
2319	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2320	return rc;
2321}
2322
2323static void ef4_unregister_netdev(struct ef4_nic *efx)
2324{
2325	if (!efx->net_dev)
2326		return;
2327
2328	BUG_ON(netdev_priv(efx->net_dev) != efx);
2329
2330	if (ef4_dev_registered(efx)) {
2331		strscpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2332		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2333		unregister_netdev(efx->net_dev);
2334	}
2335}
2336
2337/**************************************************************************
2338 *
2339 * Device reset and suspend
2340 *
2341 **************************************************************************/
2342
2343/* Tears down the entire software state and most of the hardware state
2344 * before reset.  */
2345void ef4_reset_down(struct ef4_nic *efx, enum reset_type method)
2346{
2347	EF4_ASSERT_RESET_SERIALISED(efx);
2348
2349	ef4_stop_all(efx);
2350	ef4_disable_interrupts(efx);
2351
2352	mutex_lock(&efx->mac_lock);
2353	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2354	    method != RESET_TYPE_DATAPATH)
2355		efx->phy_op->fini(efx);
2356	efx->type->fini(efx);
2357}
2358
2359/* This function will always ensure that the locks acquired in
2360 * ef4_reset_down() are released. A failure return code indicates
2361 * that we were unable to reinitialise the hardware, and the
2362 * driver should be disabled. If ok is false, then the rx and tx
2363 * engines are not restarted, pending a RESET_DISABLE. */
2364int ef4_reset_up(struct ef4_nic *efx, enum reset_type method, bool ok)
2365{
2366	int rc;
2367
2368	EF4_ASSERT_RESET_SERIALISED(efx);
2369
2370	/* Ensure that SRAM is initialised even if we're disabling the device */
2371	rc = efx->type->init(efx);
2372	if (rc) {
2373		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2374		goto fail;
2375	}
2376
2377	if (!ok)
2378		goto fail;
2379
2380	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2381	    method != RESET_TYPE_DATAPATH) {
2382		rc = efx->phy_op->init(efx);
2383		if (rc)
2384			goto fail;
2385		rc = efx->phy_op->reconfigure(efx);
2386		if (rc && rc != -EPERM)
2387			netif_err(efx, drv, efx->net_dev,
2388				  "could not restore PHY settings\n");
2389	}
2390
2391	rc = ef4_enable_interrupts(efx);
2392	if (rc)
2393		goto fail;
2394
2395	down_read(&efx->filter_sem);
2396	ef4_restore_filters(efx);
2397	up_read(&efx->filter_sem);
2398
2399	mutex_unlock(&efx->mac_lock);
2400
2401	ef4_start_all(efx);
2402
2403	return 0;
2404
2405fail:
2406	efx->port_initialized = false;
2407
2408	mutex_unlock(&efx->mac_lock);
2409
2410	return rc;
2411}
2412
2413/* Reset the NIC using the specified method.  Note that the reset may
2414 * fail, in which case the card will be left in an unusable state.
2415 *
2416 * Caller must hold the rtnl_lock.
2417 */
2418int ef4_reset(struct ef4_nic *efx, enum reset_type method)
2419{
2420	int rc, rc2;
2421	bool disabled;
2422
2423	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2424		   RESET_TYPE(method));
2425
2426	ef4_device_detach_sync(efx);
2427	ef4_reset_down(efx, method);
2428
2429	rc = efx->type->reset(efx, method);
2430	if (rc) {
2431		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2432		goto out;
2433	}
2434
2435	/* Clear flags for the scopes we covered.  We assume the NIC and
2436	 * driver are now quiescent so that there is no race here.
2437	 */
2438	if (method < RESET_TYPE_MAX_METHOD)
2439		efx->reset_pending &= -(1 << (method + 1));
2440	else /* it doesn't fit into the well-ordered scope hierarchy */
2441		__clear_bit(method, &efx->reset_pending);
2442
2443	/* Reinitialise bus-mastering, which may have been turned off before
2444	 * the reset was scheduled. This is still appropriate, even in the
2445	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2446	 * can respond to requests. */
2447	pci_set_master(efx->pci_dev);
2448
2449out:
2450	/* Leave device stopped if necessary */
2451	disabled = rc ||
2452		method == RESET_TYPE_DISABLE ||
2453		method == RESET_TYPE_RECOVER_OR_DISABLE;
2454	rc2 = ef4_reset_up(efx, method, !disabled);
2455	if (rc2) {
2456		disabled = true;
2457		if (!rc)
2458			rc = rc2;
2459	}
2460
2461	if (disabled) {
2462		dev_close(efx->net_dev);
2463		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2464		efx->state = STATE_DISABLED;
2465	} else {
2466		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2467		netif_device_attach(efx->net_dev);
2468	}
2469	return rc;
2470}
2471
2472/* Try recovery mechanisms.
2473 * For now only EEH is supported.
2474 * Returns 0 if the recovery mechanisms are unsuccessful.
2475 * Returns a non-zero value otherwise.
2476 */
2477int ef4_try_recovery(struct ef4_nic *efx)
2478{
2479#ifdef CONFIG_EEH
2480	/* A PCI error can occur and not be seen by EEH because nothing
2481	 * happens on the PCI bus. In this case the driver may fail and
2482	 * schedule a 'recover or reset', leading to this recovery handler.
2483	 * Manually call the eeh failure check function.
2484	 */
2485	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2486	if (eeh_dev_check_failure(eehdev)) {
2487		/* The EEH mechanisms will handle the error and reset the
2488		 * device if necessary.
2489		 */
2490		return 1;
2491	}
2492#endif
2493	return 0;
2494}
2495
2496/* The worker thread exists so that code that cannot sleep can
2497 * schedule a reset for later.
2498 */
2499static void ef4_reset_work(struct work_struct *data)
2500{
2501	struct ef4_nic *efx = container_of(data, struct ef4_nic, reset_work);
2502	unsigned long pending;
2503	enum reset_type method;
2504
2505	pending = READ_ONCE(efx->reset_pending);
2506	method = fls(pending) - 1;
2507
2508	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2509	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2510	    ef4_try_recovery(efx))
2511		return;
2512
2513	if (!pending)
2514		return;
2515
2516	rtnl_lock();
2517
2518	/* We checked the state in ef4_schedule_reset() but it may
2519	 * have changed by now.  Now that we have the RTNL lock,
2520	 * it cannot change again.
2521	 */
2522	if (efx->state == STATE_READY)
2523		(void)ef4_reset(efx, method);
2524
2525	rtnl_unlock();
2526}
2527
2528void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type)
2529{
2530	enum reset_type method;
2531
2532	if (efx->state == STATE_RECOVERY) {
2533		netif_dbg(efx, drv, efx->net_dev,
2534			  "recovering: skip scheduling %s reset\n",
2535			  RESET_TYPE(type));
2536		return;
2537	}
2538
2539	switch (type) {
2540	case RESET_TYPE_INVISIBLE:
2541	case RESET_TYPE_ALL:
2542	case RESET_TYPE_RECOVER_OR_ALL:
2543	case RESET_TYPE_WORLD:
2544	case RESET_TYPE_DISABLE:
2545	case RESET_TYPE_RECOVER_OR_DISABLE:
2546	case RESET_TYPE_DATAPATH:
2547		method = type;
2548		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2549			  RESET_TYPE(method));
2550		break;
2551	default:
2552		method = efx->type->map_reset_reason(type);
2553		netif_dbg(efx, drv, efx->net_dev,
2554			  "scheduling %s reset for %s\n",
2555			  RESET_TYPE(method), RESET_TYPE(type));
2556		break;
2557	}
2558
2559	set_bit(method, &efx->reset_pending);
2560	smp_mb(); /* ensure we change reset_pending before checking state */
2561
2562	/* If we're not READY then just leave the flags set as the cue
2563	 * to abort probing or reschedule the reset later.
2564	 */
2565	if (READ_ONCE(efx->state) != STATE_READY)
2566		return;
2567
2568	queue_work(reset_workqueue, &efx->reset_work);
2569}
2570
2571/**************************************************************************
2572 *
2573 * List of NICs we support
2574 *
2575 **************************************************************************/
2576
2577/* PCI device ID table */
2578static const struct pci_device_id ef4_pci_table[] = {
2579	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2580		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2581	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2582	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2583		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2584	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2585	{0}			/* end of list */
2586};
2587
2588/**************************************************************************
2589 *
2590 * Dummy PHY/MAC operations
2591 *
2592 * Can be used for some unimplemented operations
2593 * Needed so all function pointers are valid and do not have to be tested
2594 * before use
2595 *
2596 **************************************************************************/
2597int ef4_port_dummy_op_int(struct ef4_nic *efx)
2598{
2599	return 0;
2600}
2601void ef4_port_dummy_op_void(struct ef4_nic *efx) {}
2602
2603static bool ef4_port_dummy_op_poll(struct ef4_nic *efx)
2604{
2605	return false;
2606}
2607
2608static const struct ef4_phy_operations ef4_dummy_phy_operations = {
2609	.init		 = ef4_port_dummy_op_int,
2610	.reconfigure	 = ef4_port_dummy_op_int,
2611	.poll		 = ef4_port_dummy_op_poll,
2612	.fini		 = ef4_port_dummy_op_void,
2613};
2614
2615/**************************************************************************
2616 *
2617 * Data housekeeping
2618 *
2619 **************************************************************************/
2620
2621/* This zeroes out and then fills in the invariants in a struct
2622 * ef4_nic (including all sub-structures).
2623 */
2624static int ef4_init_struct(struct ef4_nic *efx,
2625			   struct pci_dev *pci_dev, struct net_device *net_dev)
2626{
2627	int i;
2628
2629	/* Initialise common structures */
2630	INIT_LIST_HEAD(&efx->node);
2631	INIT_LIST_HEAD(&efx->secondary_list);
2632	spin_lock_init(&efx->biu_lock);
2633#ifdef CONFIG_SFC_FALCON_MTD
2634	INIT_LIST_HEAD(&efx->mtd_list);
2635#endif
2636	INIT_WORK(&efx->reset_work, ef4_reset_work);
2637	INIT_DELAYED_WORK(&efx->monitor_work, ef4_monitor);
2638	INIT_DELAYED_WORK(&efx->selftest_work, ef4_selftest_async_work);
2639	efx->pci_dev = pci_dev;
2640	efx->msg_enable = debug;
2641	efx->state = STATE_UNINIT;
2642	strscpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2643
2644	efx->net_dev = net_dev;
2645	efx->rx_prefix_size = efx->type->rx_prefix_size;
2646	efx->rx_ip_align =
2647		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2648	efx->rx_packet_hash_offset =
2649		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2650	efx->rx_packet_ts_offset =
2651		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2652	spin_lock_init(&efx->stats_lock);
2653	mutex_init(&efx->mac_lock);
2654	efx->phy_op = &ef4_dummy_phy_operations;
2655	efx->mdio.dev = net_dev;
2656	INIT_WORK(&efx->mac_work, ef4_mac_work);
2657	init_waitqueue_head(&efx->flush_wq);
2658
2659	for (i = 0; i < EF4_MAX_CHANNELS; i++) {
2660		efx->channel[i] = ef4_alloc_channel(efx, i, NULL);
2661		if (!efx->channel[i])
2662			goto fail;
2663		efx->msi_context[i].efx = efx;
2664		efx->msi_context[i].index = i;
2665	}
2666
2667	/* Higher numbered interrupt modes are less capable! */
2668	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2669				  interrupt_mode);
2670
2671	/* Would be good to use the net_dev name, but we're too early */
2672	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2673		 pci_name(pci_dev));
2674	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2675	if (!efx->workqueue)
2676		goto fail;
2677
2678	return 0;
2679
2680fail:
2681	ef4_fini_struct(efx);
2682	return -ENOMEM;
2683}
2684
2685static void ef4_fini_struct(struct ef4_nic *efx)
2686{
2687	int i;
2688
2689	for (i = 0; i < EF4_MAX_CHANNELS; i++)
2690		kfree(efx->channel[i]);
2691
2692	kfree(efx->vpd_sn);
2693
2694	if (efx->workqueue) {
2695		destroy_workqueue(efx->workqueue);
2696		efx->workqueue = NULL;
2697	}
2698}
2699
2700void ef4_update_sw_stats(struct ef4_nic *efx, u64 *stats)
2701{
2702	u64 n_rx_nodesc_trunc = 0;
2703	struct ef4_channel *channel;
2704
2705	ef4_for_each_channel(channel, efx)
2706		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2707	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2708	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2709}
2710
2711/**************************************************************************
2712 *
2713 * PCI interface
2714 *
2715 **************************************************************************/
2716
2717/* Main body of final NIC shutdown code
2718 * This is called only at module unload (or hotplug removal).
2719 */
2720static void ef4_pci_remove_main(struct ef4_nic *efx)
2721{
2722	/* Flush reset_work. It can no longer be scheduled since we
2723	 * are not READY.
2724	 */
2725	BUG_ON(efx->state == STATE_READY);
2726	cancel_work_sync(&efx->reset_work);
2727
2728	ef4_disable_interrupts(efx);
2729	ef4_nic_fini_interrupt(efx);
2730	ef4_fini_port(efx);
2731	efx->type->fini(efx);
2732	ef4_fini_napi(efx);
2733	ef4_remove_all(efx);
2734}
2735
2736/* Final NIC shutdown
2737 * This is called only at module unload (or hotplug removal).  A PF can call
2738 * this on its VFs to ensure they are unbound first.
2739 */
2740static void ef4_pci_remove(struct pci_dev *pci_dev)
2741{
2742	struct ef4_nic *efx;
2743
2744	efx = pci_get_drvdata(pci_dev);
2745	if (!efx)
2746		return;
2747
2748	/* Mark the NIC as fini, then stop the interface */
2749	rtnl_lock();
2750	ef4_dissociate(efx);
2751	dev_close(efx->net_dev);
2752	ef4_disable_interrupts(efx);
2753	efx->state = STATE_UNINIT;
2754	rtnl_unlock();
2755
2756	ef4_unregister_netdev(efx);
2757
2758	ef4_mtd_remove(efx);
2759
2760	ef4_pci_remove_main(efx);
2761
2762	ef4_fini_io(efx);
2763	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2764
2765	ef4_fini_struct(efx);
2766	free_netdev(efx->net_dev);
 
 
2767};
2768
2769/* NIC VPD information
2770 * Called during probe to display the part number of the installed NIC.
 
 
2771 */
 
2772static void ef4_probe_vpd_strings(struct ef4_nic *efx)
2773{
2774	struct pci_dev *dev = efx->pci_dev;
2775	unsigned int vpd_size, kw_len;
2776	u8 *vpd_data;
2777	int start;
2778
2779	vpd_data = pci_vpd_alloc(dev, &vpd_size);
2780	if (IS_ERR(vpd_data)) {
2781		pci_warn(dev, "Unable to read VPD\n");
 
2782		return;
2783	}
2784
2785	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2786					     PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
2787	if (start < 0)
2788		pci_warn(dev, "Part number not found or incomplete\n");
2789	else
2790		pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2791
2792	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
2793					     PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
2794	if (start < 0)
2795		pci_warn(dev, "Serial number not found or incomplete\n");
2796	else
2797		efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
2798
2799	kfree(vpd_data);
2800}
2801
2802
2803/* Main body of NIC initialisation
2804 * This is called at module load (or hotplug insertion, theoretically).
2805 */
2806static int ef4_pci_probe_main(struct ef4_nic *efx)
2807{
2808	int rc;
2809
2810	/* Do start-of-day initialisation */
2811	rc = ef4_probe_all(efx);
2812	if (rc)
2813		goto fail1;
2814
2815	ef4_init_napi(efx);
2816
2817	rc = efx->type->init(efx);
2818	if (rc) {
2819		netif_err(efx, probe, efx->net_dev,
2820			  "failed to initialise NIC\n");
2821		goto fail3;
2822	}
2823
2824	rc = ef4_init_port(efx);
2825	if (rc) {
2826		netif_err(efx, probe, efx->net_dev,
2827			  "failed to initialise port\n");
2828		goto fail4;
2829	}
2830
2831	rc = ef4_nic_init_interrupt(efx);
2832	if (rc)
2833		goto fail5;
2834	rc = ef4_enable_interrupts(efx);
2835	if (rc)
2836		goto fail6;
2837
2838	return 0;
2839
2840 fail6:
2841	ef4_nic_fini_interrupt(efx);
2842 fail5:
2843	ef4_fini_port(efx);
2844 fail4:
2845	efx->type->fini(efx);
2846 fail3:
2847	ef4_fini_napi(efx);
2848	ef4_remove_all(efx);
2849 fail1:
2850	return rc;
2851}
2852
2853/* NIC initialisation
2854 *
2855 * This is called at module load (or hotplug insertion,
2856 * theoretically).  It sets up PCI mappings, resets the NIC,
2857 * sets up and registers the network devices with the kernel and hooks
2858 * the interrupt service routine.  It does not prepare the device for
2859 * transmission; this is left to the first time one of the network
2860 * interfaces is brought up (i.e. ef4_net_open).
2861 */
2862static int ef4_pci_probe(struct pci_dev *pci_dev,
2863			 const struct pci_device_id *entry)
2864{
2865	struct net_device *net_dev;
2866	struct ef4_nic *efx;
2867	int rc;
2868
2869	/* Allocate and initialise a struct net_device and struct ef4_nic */
2870	net_dev = alloc_etherdev_mqs(sizeof(*efx), EF4_MAX_CORE_TX_QUEUES,
2871				     EF4_MAX_RX_QUEUES);
2872	if (!net_dev)
2873		return -ENOMEM;
2874	efx = netdev_priv(net_dev);
2875	efx->type = (const struct ef4_nic_type *) entry->driver_data;
2876	efx->fixed_features |= NETIF_F_HIGHDMA;
2877
2878	pci_set_drvdata(pci_dev, efx);
2879	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
2880	rc = ef4_init_struct(efx, pci_dev, net_dev);
2881	if (rc)
2882		goto fail1;
2883
2884	netif_info(efx, probe, efx->net_dev,
2885		   "Solarflare NIC detected\n");
2886
2887	ef4_probe_vpd_strings(efx);
2888
2889	/* Set up basic I/O (BAR mappings etc) */
2890	rc = ef4_init_io(efx);
2891	if (rc)
2892		goto fail2;
2893
2894	rc = ef4_pci_probe_main(efx);
2895	if (rc)
2896		goto fail3;
2897
2898	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
2899			      NETIF_F_RXCSUM);
2900	/* Mask for features that also apply to VLAN devices */
2901	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
2902				   NETIF_F_HIGHDMA | NETIF_F_RXCSUM);
2903
2904	net_dev->hw_features = net_dev->features & ~efx->fixed_features;
2905
2906	/* Disable VLAN filtering by default.  It may be enforced if
2907	 * the feature is fixed (i.e. VLAN filters are required to
2908	 * receive VLAN tagged packets due to vPort restrictions).
2909	 */
2910	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2911	net_dev->features |= efx->fixed_features;
2912
2913	rc = ef4_register_netdev(efx);
2914	if (rc)
2915		goto fail4;
2916
2917	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
2918
2919	/* Try to create MTDs, but allow this to fail */
2920	rtnl_lock();
2921	rc = ef4_mtd_probe(efx);
2922	rtnl_unlock();
2923	if (rc && rc != -EPERM)
2924		netif_warn(efx, probe, efx->net_dev,
2925			   "failed to create MTDs (%d)\n", rc);
2926
 
 
 
 
 
 
2927	return 0;
2928
2929 fail4:
2930	ef4_pci_remove_main(efx);
2931 fail3:
2932	ef4_fini_io(efx);
2933 fail2:
2934	ef4_fini_struct(efx);
2935 fail1:
2936	WARN_ON(rc > 0);
2937	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
2938	free_netdev(net_dev);
2939	return rc;
2940}
2941
2942static int ef4_pm_freeze(struct device *dev)
2943{
2944	struct ef4_nic *efx = dev_get_drvdata(dev);
2945
2946	rtnl_lock();
2947
2948	if (efx->state != STATE_DISABLED) {
2949		efx->state = STATE_UNINIT;
2950
2951		ef4_device_detach_sync(efx);
2952
2953		ef4_stop_all(efx);
2954		ef4_disable_interrupts(efx);
2955	}
2956
2957	rtnl_unlock();
2958
2959	return 0;
2960}
2961
2962static int ef4_pm_thaw(struct device *dev)
2963{
2964	int rc;
2965	struct ef4_nic *efx = dev_get_drvdata(dev);
2966
2967	rtnl_lock();
2968
2969	if (efx->state != STATE_DISABLED) {
2970		rc = ef4_enable_interrupts(efx);
2971		if (rc)
2972			goto fail;
2973
2974		mutex_lock(&efx->mac_lock);
2975		efx->phy_op->reconfigure(efx);
2976		mutex_unlock(&efx->mac_lock);
2977
2978		ef4_start_all(efx);
2979
2980		netif_device_attach(efx->net_dev);
2981
2982		efx->state = STATE_READY;
2983
2984		efx->type->resume_wol(efx);
2985	}
2986
2987	rtnl_unlock();
2988
2989	/* Reschedule any quenched resets scheduled during ef4_pm_freeze() */
2990	queue_work(reset_workqueue, &efx->reset_work);
2991
2992	return 0;
2993
2994fail:
2995	rtnl_unlock();
2996
2997	return rc;
2998}
2999
3000static int ef4_pm_poweroff(struct device *dev)
3001{
3002	struct pci_dev *pci_dev = to_pci_dev(dev);
3003	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3004
3005	efx->type->fini(efx);
3006
3007	efx->reset_pending = 0;
3008
3009	pci_save_state(pci_dev);
3010	return pci_set_power_state(pci_dev, PCI_D3hot);
3011}
3012
3013/* Used for both resume and restore */
3014static int ef4_pm_resume(struct device *dev)
3015{
3016	struct pci_dev *pci_dev = to_pci_dev(dev);
3017	struct ef4_nic *efx = pci_get_drvdata(pci_dev);
3018	int rc;
3019
3020	rc = pci_set_power_state(pci_dev, PCI_D0);
3021	if (rc)
3022		return rc;
3023	pci_restore_state(pci_dev);
3024	rc = pci_enable_device(pci_dev);
3025	if (rc)
3026		return rc;
3027	pci_set_master(efx->pci_dev);
3028	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3029	if (rc)
3030		return rc;
3031	rc = efx->type->init(efx);
3032	if (rc)
3033		return rc;
3034	rc = ef4_pm_thaw(dev);
3035	return rc;
3036}
3037
3038static int ef4_pm_suspend(struct device *dev)
3039{
3040	int rc;
3041
3042	ef4_pm_freeze(dev);
3043	rc = ef4_pm_poweroff(dev);
3044	if (rc)
3045		ef4_pm_resume(dev);
3046	return rc;
3047}
3048
3049static const struct dev_pm_ops ef4_pm_ops = {
3050	.suspend	= ef4_pm_suspend,
3051	.resume		= ef4_pm_resume,
3052	.freeze		= ef4_pm_freeze,
3053	.thaw		= ef4_pm_thaw,
3054	.poweroff	= ef4_pm_poweroff,
3055	.restore	= ef4_pm_resume,
3056};
3057
3058/* A PCI error affecting this device was detected.
3059 * At this point MMIO and DMA may be disabled.
3060 * Stop the software path and request a slot reset.
3061 */
3062static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev,
3063					      pci_channel_state_t state)
3064{
3065	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3066	struct ef4_nic *efx = pci_get_drvdata(pdev);
3067
3068	if (state == pci_channel_io_perm_failure)
3069		return PCI_ERS_RESULT_DISCONNECT;
3070
3071	rtnl_lock();
3072
3073	if (efx->state != STATE_DISABLED) {
3074		efx->state = STATE_RECOVERY;
3075		efx->reset_pending = 0;
3076
3077		ef4_device_detach_sync(efx);
3078
3079		ef4_stop_all(efx);
3080		ef4_disable_interrupts(efx);
3081
3082		status = PCI_ERS_RESULT_NEED_RESET;
3083	} else {
3084		/* If the interface is disabled we don't want to do anything
3085		 * with it.
3086		 */
3087		status = PCI_ERS_RESULT_RECOVERED;
3088	}
3089
3090	rtnl_unlock();
3091
3092	pci_disable_device(pdev);
3093
3094	return status;
3095}
3096
3097/* Fake a successful reset, which will be performed later in ef4_io_resume. */
3098static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
3099{
3100	struct ef4_nic *efx = pci_get_drvdata(pdev);
3101	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
 
3102
3103	if (pci_enable_device(pdev)) {
3104		netif_err(efx, hw, efx->net_dev,
3105			  "Cannot re-enable PCI device after reset.\n");
3106		status =  PCI_ERS_RESULT_DISCONNECT;
 
 
 
 
 
 
 
3107	}
3108
3109	return status;
3110}
3111
3112/* Perform the actual reset and resume I/O operations. */
3113static void ef4_io_resume(struct pci_dev *pdev)
3114{
3115	struct ef4_nic *efx = pci_get_drvdata(pdev);
3116	int rc;
3117
3118	rtnl_lock();
3119
3120	if (efx->state == STATE_DISABLED)
3121		goto out;
3122
3123	rc = ef4_reset(efx, RESET_TYPE_ALL);
3124	if (rc) {
3125		netif_err(efx, hw, efx->net_dev,
3126			  "ef4_reset failed after PCI error (%d)\n", rc);
3127	} else {
3128		efx->state = STATE_READY;
3129		netif_dbg(efx, hw, efx->net_dev,
3130			  "Done resetting and resuming IO after PCI error.\n");
3131	}
3132
3133out:
3134	rtnl_unlock();
3135}
3136
3137/* For simplicity and reliability, we always require a slot reset and try to
3138 * reset the hardware when a pci error affecting the device is detected.
3139 * We leave both the link_reset and mmio_enabled callback unimplemented:
3140 * with our request for slot reset the mmio_enabled callback will never be
3141 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3142 */
3143static const struct pci_error_handlers ef4_err_handlers = {
3144	.error_detected = ef4_io_error_detected,
3145	.slot_reset	= ef4_io_slot_reset,
3146	.resume		= ef4_io_resume,
3147};
3148
3149static struct pci_driver ef4_pci_driver = {
3150	.name		= KBUILD_MODNAME,
3151	.id_table	= ef4_pci_table,
3152	.probe		= ef4_pci_probe,
3153	.remove		= ef4_pci_remove,
3154	.driver.pm	= &ef4_pm_ops,
3155	.err_handler	= &ef4_err_handlers,
3156};
3157
3158/**************************************************************************
3159 *
3160 * Kernel module interface
3161 *
3162 *************************************************************************/
3163
3164module_param(interrupt_mode, uint, 0444);
3165MODULE_PARM_DESC(interrupt_mode,
3166		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3167
3168static int __init ef4_init_module(void)
3169{
3170	int rc;
3171
3172	printk(KERN_INFO "Solarflare Falcon driver v" EF4_DRIVER_VERSION "\n");
3173
3174	rc = register_netdevice_notifier(&ef4_netdev_notifier);
3175	if (rc)
3176		goto err_notifier;
3177
3178	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3179	if (!reset_workqueue) {
3180		rc = -ENOMEM;
3181		goto err_reset;
3182	}
3183
3184	rc = pci_register_driver(&ef4_pci_driver);
3185	if (rc < 0)
3186		goto err_pci;
3187
3188	return 0;
3189
3190 err_pci:
3191	destroy_workqueue(reset_workqueue);
3192 err_reset:
3193	unregister_netdevice_notifier(&ef4_netdev_notifier);
3194 err_notifier:
3195	return rc;
3196}
3197
3198static void __exit ef4_exit_module(void)
3199{
3200	printk(KERN_INFO "Solarflare Falcon driver unloading\n");
3201
3202	pci_unregister_driver(&ef4_pci_driver);
3203	destroy_workqueue(reset_workqueue);
3204	unregister_netdevice_notifier(&ef4_netdev_notifier);
3205
3206}
3207
3208module_init(ef4_init_module);
3209module_exit(ef4_exit_module);
3210
3211MODULE_AUTHOR("Solarflare Communications and "
3212	      "Michael Brown <mbrown@fensystems.co.uk>");
3213MODULE_DESCRIPTION("Solarflare Falcon network driver");
3214MODULE_LICENSE("GPL");
3215MODULE_DEVICE_TABLE(pci, ef4_pci_table);
3216MODULE_VERSION(EF4_DRIVER_VERSION);