Loading...
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/ethtool.h>
21#include <linux/topology.h>
22#include <linux/gfp.h>
23#include <linux/aer.h>
24#include <linux/interrupt.h>
25#include "net_driver.h"
26#include <net/gre.h>
27#include <net/udp_tunnel.h>
28#include "efx.h"
29#include "nic.h"
30#include "io.h"
31#include "selftest.h"
32#include "sriov.h"
33
34#include "mcdi.h"
35#include "mcdi_pcol.h"
36#include "workarounds.h"
37
38/**************************************************************************
39 *
40 * Type name strings
41 *
42 **************************************************************************
43 */
44
45/* Loopback mode names (see LOOPBACK_MODE()) */
46const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
47const char *const efx_loopback_mode_names[] = {
48 [LOOPBACK_NONE] = "NONE",
49 [LOOPBACK_DATA] = "DATAPATH",
50 [LOOPBACK_GMAC] = "GMAC",
51 [LOOPBACK_XGMII] = "XGMII",
52 [LOOPBACK_XGXS] = "XGXS",
53 [LOOPBACK_XAUI] = "XAUI",
54 [LOOPBACK_GMII] = "GMII",
55 [LOOPBACK_SGMII] = "SGMII",
56 [LOOPBACK_XGBR] = "XGBR",
57 [LOOPBACK_XFI] = "XFI",
58 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
59 [LOOPBACK_GMII_FAR] = "GMII_FAR",
60 [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
61 [LOOPBACK_XFI_FAR] = "XFI_FAR",
62 [LOOPBACK_GPHY] = "GPHY",
63 [LOOPBACK_PHYXS] = "PHYXS",
64 [LOOPBACK_PCS] = "PCS",
65 [LOOPBACK_PMAPMD] = "PMA/PMD",
66 [LOOPBACK_XPORT] = "XPORT",
67 [LOOPBACK_XGMII_WS] = "XGMII_WS",
68 [LOOPBACK_XAUI_WS] = "XAUI_WS",
69 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
70 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
71 [LOOPBACK_GMII_WS] = "GMII_WS",
72 [LOOPBACK_XFI_WS] = "XFI_WS",
73 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
74 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
75};
76
77const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
78const char *const efx_reset_type_names[] = {
79 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
80 [RESET_TYPE_ALL] = "ALL",
81 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
82 [RESET_TYPE_WORLD] = "WORLD",
83 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
84 [RESET_TYPE_DATAPATH] = "DATAPATH",
85 [RESET_TYPE_MC_BIST] = "MC_BIST",
86 [RESET_TYPE_DISABLE] = "DISABLE",
87 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
88 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
89 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
90 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
91 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
92 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
93};
94
95/* UDP tunnel type names */
96static const char *const efx_udp_tunnel_type_names[] = {
97 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
98 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
99};
100
101void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
102{
103 if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
104 efx_udp_tunnel_type_names[type] != NULL)
105 snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
106 else
107 snprintf(buf, buflen, "type %d", type);
108}
109
110/* Reset workqueue. If any NIC has a hardware failure then a reset will be
111 * queued onto this work queue. This is not a per-nic work queue, because
112 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
113 */
114static struct workqueue_struct *reset_workqueue;
115
116/* How often and how many times to poll for a reset while waiting for a
117 * BIST that another function started to complete.
118 */
119#define BIST_WAIT_DELAY_MS 100
120#define BIST_WAIT_DELAY_COUNT 100
121
122/**************************************************************************
123 *
124 * Configurable values
125 *
126 *************************************************************************/
127
128/*
129 * Use separate channels for TX and RX events
130 *
131 * Set this to 1 to use separate channels for TX and RX. It allows us
132 * to control interrupt affinity separately for TX and RX.
133 *
134 * This is only used in MSI-X interrupt mode
135 */
136bool efx_separate_tx_channels;
137module_param(efx_separate_tx_channels, bool, 0444);
138MODULE_PARM_DESC(efx_separate_tx_channels,
139 "Use separate channels for TX and RX");
140
141/* This is the weight assigned to each of the (per-channel) virtual
142 * NAPI devices.
143 */
144static int napi_weight = 64;
145
146/* This is the time (in jiffies) between invocations of the hardware
147 * monitor.
148 * On Falcon-based NICs, this will:
149 * - Check the on-board hardware monitor;
150 * - Poll the link state and reconfigure the hardware as necessary.
151 * On Siena-based NICs for power systems with EEH support, this will give EEH a
152 * chance to start.
153 */
154static unsigned int efx_monitor_interval = 1 * HZ;
155
156/* Initial interrupt moderation settings. They can be modified after
157 * module load with ethtool.
158 *
159 * The default for RX should strike a balance between increasing the
160 * round-trip latency and reducing overhead.
161 */
162static unsigned int rx_irq_mod_usec = 60;
163
164/* Initial interrupt moderation settings. They can be modified after
165 * module load with ethtool.
166 *
167 * This default is chosen to ensure that a 10G link does not go idle
168 * while a TX queue is stopped after it has become full. A queue is
169 * restarted when it drops below half full. The time this takes (assuming
170 * worst case 3 descriptors per packet and 1024 descriptors) is
171 * 512 / 3 * 1.2 = 205 usec.
172 */
173static unsigned int tx_irq_mod_usec = 150;
174
175/* This is the first interrupt mode to try out of:
176 * 0 => MSI-X
177 * 1 => MSI
178 * 2 => legacy
179 */
180static unsigned int interrupt_mode;
181
182/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
183 * i.e. the number of CPUs among which we may distribute simultaneous
184 * interrupt handling.
185 *
186 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
187 * The default (0) means to assign an interrupt to each core.
188 */
189static unsigned int rss_cpus;
190module_param(rss_cpus, uint, 0444);
191MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
192
193static bool phy_flash_cfg;
194module_param(phy_flash_cfg, bool, 0644);
195MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
196
197static unsigned irq_adapt_low_thresh = 8000;
198module_param(irq_adapt_low_thresh, uint, 0644);
199MODULE_PARM_DESC(irq_adapt_low_thresh,
200 "Threshold score for reducing IRQ moderation");
201
202static unsigned irq_adapt_high_thresh = 16000;
203module_param(irq_adapt_high_thresh, uint, 0644);
204MODULE_PARM_DESC(irq_adapt_high_thresh,
205 "Threshold score for increasing IRQ moderation");
206
207static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
208 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
209 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
210 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
211module_param(debug, uint, 0);
212MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
213
214/**************************************************************************
215 *
216 * Utility functions and prototypes
217 *
218 *************************************************************************/
219
220static int efx_soft_enable_interrupts(struct efx_nic *efx);
221static void efx_soft_disable_interrupts(struct efx_nic *efx);
222static void efx_remove_channel(struct efx_channel *channel);
223static void efx_remove_channels(struct efx_nic *efx);
224static const struct efx_channel_type efx_default_channel_type;
225static void efx_remove_port(struct efx_nic *efx);
226static void efx_init_napi_channel(struct efx_channel *channel);
227static void efx_fini_napi(struct efx_nic *efx);
228static void efx_fini_napi_channel(struct efx_channel *channel);
229static void efx_fini_struct(struct efx_nic *efx);
230static void efx_start_all(struct efx_nic *efx);
231static void efx_stop_all(struct efx_nic *efx);
232
233#define EFX_ASSERT_RESET_SERIALISED(efx) \
234 do { \
235 if ((efx->state == STATE_READY) || \
236 (efx->state == STATE_RECOVERY) || \
237 (efx->state == STATE_DISABLED)) \
238 ASSERT_RTNL(); \
239 } while (0)
240
241static int efx_check_disabled(struct efx_nic *efx)
242{
243 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
244 netif_err(efx, drv, efx->net_dev,
245 "device is disabled due to earlier errors\n");
246 return -EIO;
247 }
248 return 0;
249}
250
251/**************************************************************************
252 *
253 * Event queue processing
254 *
255 *************************************************************************/
256
257/* Process channel's event queue
258 *
259 * This function is responsible for processing the event queue of a
260 * single channel. The caller must guarantee that this function will
261 * never be concurrently called more than once on the same channel,
262 * though different channels may be being processed concurrently.
263 */
264static int efx_process_channel(struct efx_channel *channel, int budget)
265{
266 struct efx_tx_queue *tx_queue;
267 int spent;
268
269 if (unlikely(!channel->enabled))
270 return 0;
271
272 efx_for_each_channel_tx_queue(tx_queue, channel) {
273 tx_queue->pkts_compl = 0;
274 tx_queue->bytes_compl = 0;
275 }
276
277 spent = efx_nic_process_eventq(channel, budget);
278 if (spent && efx_channel_has_rx_queue(channel)) {
279 struct efx_rx_queue *rx_queue =
280 efx_channel_get_rx_queue(channel);
281
282 efx_rx_flush_packet(channel);
283 efx_fast_push_rx_descriptors(rx_queue, true);
284 }
285
286 /* Update BQL */
287 efx_for_each_channel_tx_queue(tx_queue, channel) {
288 if (tx_queue->bytes_compl) {
289 netdev_tx_completed_queue(tx_queue->core_txq,
290 tx_queue->pkts_compl, tx_queue->bytes_compl);
291 }
292 }
293
294 return spent;
295}
296
297/* NAPI poll handler
298 *
299 * NAPI guarantees serialisation of polls of the same device, which
300 * provides the guarantee required by efx_process_channel().
301 */
302static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
303{
304 int step = efx->irq_mod_step_us;
305
306 if (channel->irq_mod_score < irq_adapt_low_thresh) {
307 if (channel->irq_moderation_us > step) {
308 channel->irq_moderation_us -= step;
309 efx->type->push_irq_moderation(channel);
310 }
311 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
312 if (channel->irq_moderation_us <
313 efx->irq_rx_moderation_us) {
314 channel->irq_moderation_us += step;
315 efx->type->push_irq_moderation(channel);
316 }
317 }
318
319 channel->irq_count = 0;
320 channel->irq_mod_score = 0;
321}
322
323static int efx_poll(struct napi_struct *napi, int budget)
324{
325 struct efx_channel *channel =
326 container_of(napi, struct efx_channel, napi_str);
327 struct efx_nic *efx = channel->efx;
328 int spent;
329
330 netif_vdbg(efx, intr, efx->net_dev,
331 "channel %d NAPI poll executing on CPU %d\n",
332 channel->channel, raw_smp_processor_id());
333
334 spent = efx_process_channel(channel, budget);
335
336 if (spent < budget) {
337 if (efx_channel_has_rx_queue(channel) &&
338 efx->irq_rx_adaptive &&
339 unlikely(++channel->irq_count == 1000)) {
340 efx_update_irq_mod(efx, channel);
341 }
342
343#ifdef CONFIG_RFS_ACCEL
344 /* Perhaps expire some ARFS filters */
345 schedule_work(&channel->filter_work);
346#endif
347
348 /* There is no race here; although napi_disable() will
349 * only wait for napi_complete(), this isn't a problem
350 * since efx_nic_eventq_read_ack() will have no effect if
351 * interrupts have already been disabled.
352 */
353 if (napi_complete_done(napi, spent))
354 efx_nic_eventq_read_ack(channel);
355 }
356
357 return spent;
358}
359
360/* Create event queue
361 * Event queue memory allocations are done only once. If the channel
362 * is reset, the memory buffer will be reused; this guards against
363 * errors during channel reset and also simplifies interrupt handling.
364 */
365static int efx_probe_eventq(struct efx_channel *channel)
366{
367 struct efx_nic *efx = channel->efx;
368 unsigned long entries;
369
370 netif_dbg(efx, probe, efx->net_dev,
371 "chan %d create event queue\n", channel->channel);
372
373 /* Build an event queue with room for one event per tx and rx buffer,
374 * plus some extra for link state events and MCDI completions. */
375 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
376 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
377 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
378
379 return efx_nic_probe_eventq(channel);
380}
381
382/* Prepare channel's event queue */
383static int efx_init_eventq(struct efx_channel *channel)
384{
385 struct efx_nic *efx = channel->efx;
386 int rc;
387
388 EFX_WARN_ON_PARANOID(channel->eventq_init);
389
390 netif_dbg(efx, drv, efx->net_dev,
391 "chan %d init event queue\n", channel->channel);
392
393 rc = efx_nic_init_eventq(channel);
394 if (rc == 0) {
395 efx->type->push_irq_moderation(channel);
396 channel->eventq_read_ptr = 0;
397 channel->eventq_init = true;
398 }
399 return rc;
400}
401
402/* Enable event queue processing and NAPI */
403void efx_start_eventq(struct efx_channel *channel)
404{
405 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
406 "chan %d start event queue\n", channel->channel);
407
408 /* Make sure the NAPI handler sees the enabled flag set */
409 channel->enabled = true;
410 smp_wmb();
411
412 napi_enable(&channel->napi_str);
413 efx_nic_eventq_read_ack(channel);
414}
415
416/* Disable event queue processing and NAPI */
417void efx_stop_eventq(struct efx_channel *channel)
418{
419 if (!channel->enabled)
420 return;
421
422 napi_disable(&channel->napi_str);
423 channel->enabled = false;
424}
425
426static void efx_fini_eventq(struct efx_channel *channel)
427{
428 if (!channel->eventq_init)
429 return;
430
431 netif_dbg(channel->efx, drv, channel->efx->net_dev,
432 "chan %d fini event queue\n", channel->channel);
433
434 efx_nic_fini_eventq(channel);
435 channel->eventq_init = false;
436}
437
438static void efx_remove_eventq(struct efx_channel *channel)
439{
440 netif_dbg(channel->efx, drv, channel->efx->net_dev,
441 "chan %d remove event queue\n", channel->channel);
442
443 efx_nic_remove_eventq(channel);
444}
445
446/**************************************************************************
447 *
448 * Channel handling
449 *
450 *************************************************************************/
451
452/* Allocate and initialise a channel structure. */
453static struct efx_channel *
454efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
455{
456 struct efx_channel *channel;
457 struct efx_rx_queue *rx_queue;
458 struct efx_tx_queue *tx_queue;
459 int j;
460
461 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
462 if (!channel)
463 return NULL;
464
465 channel->efx = efx;
466 channel->channel = i;
467 channel->type = &efx_default_channel_type;
468
469 for (j = 0; j < EFX_TXQ_TYPES; j++) {
470 tx_queue = &channel->tx_queue[j];
471 tx_queue->efx = efx;
472 tx_queue->queue = i * EFX_TXQ_TYPES + j;
473 tx_queue->channel = channel;
474 }
475
476#ifdef CONFIG_RFS_ACCEL
477 INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
478#endif
479
480 rx_queue = &channel->rx_queue;
481 rx_queue->efx = efx;
482 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
483
484 return channel;
485}
486
487/* Allocate and initialise a channel structure, copying parameters
488 * (but not resources) from an old channel structure.
489 */
490static struct efx_channel *
491efx_copy_channel(const struct efx_channel *old_channel)
492{
493 struct efx_channel *channel;
494 struct efx_rx_queue *rx_queue;
495 struct efx_tx_queue *tx_queue;
496 int j;
497
498 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
499 if (!channel)
500 return NULL;
501
502 *channel = *old_channel;
503
504 channel->napi_dev = NULL;
505 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
506 channel->napi_str.napi_id = 0;
507 channel->napi_str.state = 0;
508 memset(&channel->eventq, 0, sizeof(channel->eventq));
509
510 for (j = 0; j < EFX_TXQ_TYPES; j++) {
511 tx_queue = &channel->tx_queue[j];
512 if (tx_queue->channel)
513 tx_queue->channel = channel;
514 tx_queue->buffer = NULL;
515 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
516 }
517
518 rx_queue = &channel->rx_queue;
519 rx_queue->buffer = NULL;
520 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
521 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
522#ifdef CONFIG_RFS_ACCEL
523 INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
524#endif
525
526 return channel;
527}
528
529static int efx_probe_channel(struct efx_channel *channel)
530{
531 struct efx_tx_queue *tx_queue;
532 struct efx_rx_queue *rx_queue;
533 int rc;
534
535 netif_dbg(channel->efx, probe, channel->efx->net_dev,
536 "creating channel %d\n", channel->channel);
537
538 rc = channel->type->pre_probe(channel);
539 if (rc)
540 goto fail;
541
542 rc = efx_probe_eventq(channel);
543 if (rc)
544 goto fail;
545
546 efx_for_each_channel_tx_queue(tx_queue, channel) {
547 rc = efx_probe_tx_queue(tx_queue);
548 if (rc)
549 goto fail;
550 }
551
552 efx_for_each_channel_rx_queue(rx_queue, channel) {
553 rc = efx_probe_rx_queue(rx_queue);
554 if (rc)
555 goto fail;
556 }
557
558 return 0;
559
560fail:
561 efx_remove_channel(channel);
562 return rc;
563}
564
565static void
566efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
567{
568 struct efx_nic *efx = channel->efx;
569 const char *type;
570 int number;
571
572 number = channel->channel;
573 if (efx->tx_channel_offset == 0) {
574 type = "";
575 } else if (channel->channel < efx->tx_channel_offset) {
576 type = "-rx";
577 } else {
578 type = "-tx";
579 number -= efx->tx_channel_offset;
580 }
581 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
582}
583
584static void efx_set_channel_names(struct efx_nic *efx)
585{
586 struct efx_channel *channel;
587
588 efx_for_each_channel(channel, efx)
589 channel->type->get_name(channel,
590 efx->msi_context[channel->channel].name,
591 sizeof(efx->msi_context[0].name));
592}
593
594static int efx_probe_channels(struct efx_nic *efx)
595{
596 struct efx_channel *channel;
597 int rc;
598
599 /* Restart special buffer allocation */
600 efx->next_buffer_table = 0;
601
602 /* Probe channels in reverse, so that any 'extra' channels
603 * use the start of the buffer table. This allows the traffic
604 * channels to be resized without moving them or wasting the
605 * entries before them.
606 */
607 efx_for_each_channel_rev(channel, efx) {
608 rc = efx_probe_channel(channel);
609 if (rc) {
610 netif_err(efx, probe, efx->net_dev,
611 "failed to create channel %d\n",
612 channel->channel);
613 goto fail;
614 }
615 }
616 efx_set_channel_names(efx);
617
618 return 0;
619
620fail:
621 efx_remove_channels(efx);
622 return rc;
623}
624
625/* Channels are shutdown and reinitialised whilst the NIC is running
626 * to propagate configuration changes (mtu, checksum offload), or
627 * to clear hardware error conditions
628 */
629static void efx_start_datapath(struct efx_nic *efx)
630{
631 netdev_features_t old_features = efx->net_dev->features;
632 bool old_rx_scatter = efx->rx_scatter;
633 struct efx_tx_queue *tx_queue;
634 struct efx_rx_queue *rx_queue;
635 struct efx_channel *channel;
636 size_t rx_buf_len;
637
638 /* Calculate the rx buffer allocation parameters required to
639 * support the current MTU, including padding for header
640 * alignment and overruns.
641 */
642 efx->rx_dma_len = (efx->rx_prefix_size +
643 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
644 efx->type->rx_buffer_padding);
645 rx_buf_len = (sizeof(struct efx_rx_page_state) +
646 efx->rx_ip_align + efx->rx_dma_len);
647 if (rx_buf_len <= PAGE_SIZE) {
648 efx->rx_scatter = efx->type->always_rx_scatter;
649 efx->rx_buffer_order = 0;
650 } else if (efx->type->can_rx_scatter) {
651 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
652 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
653 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
654 EFX_RX_BUF_ALIGNMENT) >
655 PAGE_SIZE);
656 efx->rx_scatter = true;
657 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
658 efx->rx_buffer_order = 0;
659 } else {
660 efx->rx_scatter = false;
661 efx->rx_buffer_order = get_order(rx_buf_len);
662 }
663
664 efx_rx_config_page_split(efx);
665 if (efx->rx_buffer_order)
666 netif_dbg(efx, drv, efx->net_dev,
667 "RX buf len=%u; page order=%u batch=%u\n",
668 efx->rx_dma_len, efx->rx_buffer_order,
669 efx->rx_pages_per_batch);
670 else
671 netif_dbg(efx, drv, efx->net_dev,
672 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
673 efx->rx_dma_len, efx->rx_page_buf_step,
674 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
675
676 /* Restore previously fixed features in hw_features and remove
677 * features which are fixed now
678 */
679 efx->net_dev->hw_features |= efx->net_dev->features;
680 efx->net_dev->hw_features &= ~efx->fixed_features;
681 efx->net_dev->features |= efx->fixed_features;
682 if (efx->net_dev->features != old_features)
683 netdev_features_change(efx->net_dev);
684
685 /* RX filters may also have scatter-enabled flags */
686 if (efx->rx_scatter != old_rx_scatter)
687 efx->type->filter_update_rx_scatter(efx);
688
689 /* We must keep at least one descriptor in a TX ring empty.
690 * We could avoid this when the queue size does not exactly
691 * match the hardware ring size, but it's not that important.
692 * Therefore we stop the queue when one more skb might fill
693 * the ring completely. We wake it when half way back to
694 * empty.
695 */
696 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
697 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
698
699 /* Initialise the channels */
700 efx_for_each_channel(channel, efx) {
701 efx_for_each_channel_tx_queue(tx_queue, channel) {
702 efx_init_tx_queue(tx_queue);
703 atomic_inc(&efx->active_queues);
704 }
705
706 efx_for_each_channel_rx_queue(rx_queue, channel) {
707 efx_init_rx_queue(rx_queue);
708 atomic_inc(&efx->active_queues);
709 efx_stop_eventq(channel);
710 efx_fast_push_rx_descriptors(rx_queue, false);
711 efx_start_eventq(channel);
712 }
713
714 WARN_ON(channel->rx_pkt_n_frags);
715 }
716
717 efx_ptp_start_datapath(efx);
718
719 if (netif_device_present(efx->net_dev))
720 netif_tx_wake_all_queues(efx->net_dev);
721}
722
723static void efx_stop_datapath(struct efx_nic *efx)
724{
725 struct efx_channel *channel;
726 struct efx_tx_queue *tx_queue;
727 struct efx_rx_queue *rx_queue;
728 int rc;
729
730 EFX_ASSERT_RESET_SERIALISED(efx);
731 BUG_ON(efx->port_enabled);
732
733 efx_ptp_stop_datapath(efx);
734
735 /* Stop RX refill */
736 efx_for_each_channel(channel, efx) {
737 efx_for_each_channel_rx_queue(rx_queue, channel)
738 rx_queue->refill_enabled = false;
739 }
740
741 efx_for_each_channel(channel, efx) {
742 /* RX packet processing is pipelined, so wait for the
743 * NAPI handler to complete. At least event queue 0
744 * might be kept active by non-data events, so don't
745 * use napi_synchronize() but actually disable NAPI
746 * temporarily.
747 */
748 if (efx_channel_has_rx_queue(channel)) {
749 efx_stop_eventq(channel);
750 efx_start_eventq(channel);
751 }
752 }
753
754 rc = efx->type->fini_dmaq(efx);
755 if (rc) {
756 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
757 } else {
758 netif_dbg(efx, drv, efx->net_dev,
759 "successfully flushed all queues\n");
760 }
761
762 efx_for_each_channel(channel, efx) {
763 efx_for_each_channel_rx_queue(rx_queue, channel)
764 efx_fini_rx_queue(rx_queue);
765 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
766 efx_fini_tx_queue(tx_queue);
767 }
768}
769
770static void efx_remove_channel(struct efx_channel *channel)
771{
772 struct efx_tx_queue *tx_queue;
773 struct efx_rx_queue *rx_queue;
774
775 netif_dbg(channel->efx, drv, channel->efx->net_dev,
776 "destroy chan %d\n", channel->channel);
777
778 efx_for_each_channel_rx_queue(rx_queue, channel)
779 efx_remove_rx_queue(rx_queue);
780 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
781 efx_remove_tx_queue(tx_queue);
782 efx_remove_eventq(channel);
783 channel->type->post_remove(channel);
784}
785
786static void efx_remove_channels(struct efx_nic *efx)
787{
788 struct efx_channel *channel;
789
790 efx_for_each_channel(channel, efx)
791 efx_remove_channel(channel);
792}
793
794int
795efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
796{
797 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
798 u32 old_rxq_entries, old_txq_entries;
799 unsigned i, next_buffer_table = 0;
800 int rc, rc2;
801
802 rc = efx_check_disabled(efx);
803 if (rc)
804 return rc;
805
806 /* Not all channels should be reallocated. We must avoid
807 * reallocating their buffer table entries.
808 */
809 efx_for_each_channel(channel, efx) {
810 struct efx_rx_queue *rx_queue;
811 struct efx_tx_queue *tx_queue;
812
813 if (channel->type->copy)
814 continue;
815 next_buffer_table = max(next_buffer_table,
816 channel->eventq.index +
817 channel->eventq.entries);
818 efx_for_each_channel_rx_queue(rx_queue, channel)
819 next_buffer_table = max(next_buffer_table,
820 rx_queue->rxd.index +
821 rx_queue->rxd.entries);
822 efx_for_each_channel_tx_queue(tx_queue, channel)
823 next_buffer_table = max(next_buffer_table,
824 tx_queue->txd.index +
825 tx_queue->txd.entries);
826 }
827
828 efx_device_detach_sync(efx);
829 efx_stop_all(efx);
830 efx_soft_disable_interrupts(efx);
831
832 /* Clone channels (where possible) */
833 memset(other_channel, 0, sizeof(other_channel));
834 for (i = 0; i < efx->n_channels; i++) {
835 channel = efx->channel[i];
836 if (channel->type->copy)
837 channel = channel->type->copy(channel);
838 if (!channel) {
839 rc = -ENOMEM;
840 goto out;
841 }
842 other_channel[i] = channel;
843 }
844
845 /* Swap entry counts and channel pointers */
846 old_rxq_entries = efx->rxq_entries;
847 old_txq_entries = efx->txq_entries;
848 efx->rxq_entries = rxq_entries;
849 efx->txq_entries = txq_entries;
850 for (i = 0; i < efx->n_channels; i++) {
851 channel = efx->channel[i];
852 efx->channel[i] = other_channel[i];
853 other_channel[i] = channel;
854 }
855
856 /* Restart buffer table allocation */
857 efx->next_buffer_table = next_buffer_table;
858
859 for (i = 0; i < efx->n_channels; i++) {
860 channel = efx->channel[i];
861 if (!channel->type->copy)
862 continue;
863 rc = efx_probe_channel(channel);
864 if (rc)
865 goto rollback;
866 efx_init_napi_channel(efx->channel[i]);
867 }
868
869out:
870 /* Destroy unused channel structures */
871 for (i = 0; i < efx->n_channels; i++) {
872 channel = other_channel[i];
873 if (channel && channel->type->copy) {
874 efx_fini_napi_channel(channel);
875 efx_remove_channel(channel);
876 kfree(channel);
877 }
878 }
879
880 rc2 = efx_soft_enable_interrupts(efx);
881 if (rc2) {
882 rc = rc ? rc : rc2;
883 netif_err(efx, drv, efx->net_dev,
884 "unable to restart interrupts on channel reallocation\n");
885 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
886 } else {
887 efx_start_all(efx);
888 efx_device_attach_if_not_resetting(efx);
889 }
890 return rc;
891
892rollback:
893 /* Swap back */
894 efx->rxq_entries = old_rxq_entries;
895 efx->txq_entries = old_txq_entries;
896 for (i = 0; i < efx->n_channels; i++) {
897 channel = efx->channel[i];
898 efx->channel[i] = other_channel[i];
899 other_channel[i] = channel;
900 }
901 goto out;
902}
903
904void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
905{
906 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
907}
908
909static bool efx_default_channel_want_txqs(struct efx_channel *channel)
910{
911 return channel->channel - channel->efx->tx_channel_offset <
912 channel->efx->n_tx_channels;
913}
914
915static const struct efx_channel_type efx_default_channel_type = {
916 .pre_probe = efx_channel_dummy_op_int,
917 .post_remove = efx_channel_dummy_op_void,
918 .get_name = efx_get_channel_name,
919 .copy = efx_copy_channel,
920 .want_txqs = efx_default_channel_want_txqs,
921 .keep_eventq = false,
922 .want_pio = true,
923};
924
925int efx_channel_dummy_op_int(struct efx_channel *channel)
926{
927 return 0;
928}
929
930void efx_channel_dummy_op_void(struct efx_channel *channel)
931{
932}
933
934/**************************************************************************
935 *
936 * Port handling
937 *
938 **************************************************************************/
939
940/* This ensures that the kernel is kept informed (via
941 * netif_carrier_on/off) of the link status, and also maintains the
942 * link status's stop on the port's TX queue.
943 */
944void efx_link_status_changed(struct efx_nic *efx)
945{
946 struct efx_link_state *link_state = &efx->link_state;
947
948 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
949 * that no events are triggered between unregister_netdev() and the
950 * driver unloading. A more general condition is that NETDEV_CHANGE
951 * can only be generated between NETDEV_UP and NETDEV_DOWN */
952 if (!netif_running(efx->net_dev))
953 return;
954
955 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
956 efx->n_link_state_changes++;
957
958 if (link_state->up)
959 netif_carrier_on(efx->net_dev);
960 else
961 netif_carrier_off(efx->net_dev);
962 }
963
964 /* Status message for kernel log */
965 if (link_state->up)
966 netif_info(efx, link, efx->net_dev,
967 "link up at %uMbps %s-duplex (MTU %d)\n",
968 link_state->speed, link_state->fd ? "full" : "half",
969 efx->net_dev->mtu);
970 else
971 netif_info(efx, link, efx->net_dev, "link down\n");
972}
973
974void efx_link_set_advertising(struct efx_nic *efx,
975 const unsigned long *advertising)
976{
977 memcpy(efx->link_advertising, advertising,
978 sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
979
980 efx->link_advertising[0] |= ADVERTISED_Autoneg;
981 if (advertising[0] & ADVERTISED_Pause)
982 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
983 else
984 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
985 if (advertising[0] & ADVERTISED_Asym_Pause)
986 efx->wanted_fc ^= EFX_FC_TX;
987}
988
989/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
990 * force the Autoneg bit on.
991 */
992void efx_link_clear_advertising(struct efx_nic *efx)
993{
994 bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
995 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
996}
997
998void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
999{
1000 efx->wanted_fc = wanted_fc;
1001 if (efx->link_advertising[0]) {
1002 if (wanted_fc & EFX_FC_RX)
1003 efx->link_advertising[0] |= (ADVERTISED_Pause |
1004 ADVERTISED_Asym_Pause);
1005 else
1006 efx->link_advertising[0] &= ~(ADVERTISED_Pause |
1007 ADVERTISED_Asym_Pause);
1008 if (wanted_fc & EFX_FC_TX)
1009 efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
1010 }
1011}
1012
1013static void efx_fini_port(struct efx_nic *efx);
1014
1015/* We assume that efx->type->reconfigure_mac will always try to sync RX
1016 * filters and therefore needs to read-lock the filter table against freeing
1017 */
1018void efx_mac_reconfigure(struct efx_nic *efx)
1019{
1020 down_read(&efx->filter_sem);
1021 efx->type->reconfigure_mac(efx);
1022 up_read(&efx->filter_sem);
1023}
1024
1025/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
1026 * the MAC appropriately. All other PHY configuration changes are pushed
1027 * through phy_op->set_settings(), and pushed asynchronously to the MAC
1028 * through efx_monitor().
1029 *
1030 * Callers must hold the mac_lock
1031 */
1032int __efx_reconfigure_port(struct efx_nic *efx)
1033{
1034 enum efx_phy_mode phy_mode;
1035 int rc;
1036
1037 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1038
1039 /* Disable PHY transmit in mac level loopbacks */
1040 phy_mode = efx->phy_mode;
1041 if (LOOPBACK_INTERNAL(efx))
1042 efx->phy_mode |= PHY_MODE_TX_DISABLED;
1043 else
1044 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
1045
1046 rc = efx->type->reconfigure_port(efx);
1047
1048 if (rc)
1049 efx->phy_mode = phy_mode;
1050
1051 return rc;
1052}
1053
1054/* Reinitialise the MAC to pick up new PHY settings, even if the port is
1055 * disabled. */
1056int efx_reconfigure_port(struct efx_nic *efx)
1057{
1058 int rc;
1059
1060 EFX_ASSERT_RESET_SERIALISED(efx);
1061
1062 mutex_lock(&efx->mac_lock);
1063 rc = __efx_reconfigure_port(efx);
1064 mutex_unlock(&efx->mac_lock);
1065
1066 return rc;
1067}
1068
1069/* Asynchronous work item for changing MAC promiscuity and multicast
1070 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1071 * MAC directly. */
1072static void efx_mac_work(struct work_struct *data)
1073{
1074 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1075
1076 mutex_lock(&efx->mac_lock);
1077 if (efx->port_enabled)
1078 efx_mac_reconfigure(efx);
1079 mutex_unlock(&efx->mac_lock);
1080}
1081
1082static int efx_probe_port(struct efx_nic *efx)
1083{
1084 int rc;
1085
1086 netif_dbg(efx, probe, efx->net_dev, "create port\n");
1087
1088 if (phy_flash_cfg)
1089 efx->phy_mode = PHY_MODE_SPECIAL;
1090
1091 /* Connect up MAC/PHY operations table */
1092 rc = efx->type->probe_port(efx);
1093 if (rc)
1094 return rc;
1095
1096 /* Initialise MAC address to permanent address */
1097 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1098
1099 return 0;
1100}
1101
1102static int efx_init_port(struct efx_nic *efx)
1103{
1104 int rc;
1105
1106 netif_dbg(efx, drv, efx->net_dev, "init port\n");
1107
1108 mutex_lock(&efx->mac_lock);
1109
1110 rc = efx->phy_op->init(efx);
1111 if (rc)
1112 goto fail1;
1113
1114 efx->port_initialized = true;
1115
1116 /* Reconfigure the MAC before creating dma queues (required for
1117 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1118 efx_mac_reconfigure(efx);
1119
1120 /* Ensure the PHY advertises the correct flow control settings */
1121 rc = efx->phy_op->reconfigure(efx);
1122 if (rc && rc != -EPERM)
1123 goto fail2;
1124
1125 mutex_unlock(&efx->mac_lock);
1126 return 0;
1127
1128fail2:
1129 efx->phy_op->fini(efx);
1130fail1:
1131 mutex_unlock(&efx->mac_lock);
1132 return rc;
1133}
1134
1135static void efx_start_port(struct efx_nic *efx)
1136{
1137 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1138 BUG_ON(efx->port_enabled);
1139
1140 mutex_lock(&efx->mac_lock);
1141 efx->port_enabled = true;
1142
1143 /* Ensure MAC ingress/egress is enabled */
1144 efx_mac_reconfigure(efx);
1145
1146 mutex_unlock(&efx->mac_lock);
1147}
1148
1149/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1150 * and the async self-test, wait for them to finish and prevent them
1151 * being scheduled again. This doesn't cover online resets, which
1152 * should only be cancelled when removing the device.
1153 */
1154static void efx_stop_port(struct efx_nic *efx)
1155{
1156 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1157
1158 EFX_ASSERT_RESET_SERIALISED(efx);
1159
1160 mutex_lock(&efx->mac_lock);
1161 efx->port_enabled = false;
1162 mutex_unlock(&efx->mac_lock);
1163
1164 /* Serialise against efx_set_multicast_list() */
1165 netif_addr_lock_bh(efx->net_dev);
1166 netif_addr_unlock_bh(efx->net_dev);
1167
1168 cancel_delayed_work_sync(&efx->monitor_work);
1169 efx_selftest_async_cancel(efx);
1170 cancel_work_sync(&efx->mac_work);
1171}
1172
1173static void efx_fini_port(struct efx_nic *efx)
1174{
1175 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1176
1177 if (!efx->port_initialized)
1178 return;
1179
1180 efx->phy_op->fini(efx);
1181 efx->port_initialized = false;
1182
1183 efx->link_state.up = false;
1184 efx_link_status_changed(efx);
1185}
1186
1187static void efx_remove_port(struct efx_nic *efx)
1188{
1189 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1190
1191 efx->type->remove_port(efx);
1192}
1193
1194/**************************************************************************
1195 *
1196 * NIC handling
1197 *
1198 **************************************************************************/
1199
1200static LIST_HEAD(efx_primary_list);
1201static LIST_HEAD(efx_unassociated_list);
1202
1203static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
1204{
1205 return left->type == right->type &&
1206 left->vpd_sn && right->vpd_sn &&
1207 !strcmp(left->vpd_sn, right->vpd_sn);
1208}
1209
1210static void efx_associate(struct efx_nic *efx)
1211{
1212 struct efx_nic *other, *next;
1213
1214 if (efx->primary == efx) {
1215 /* Adding primary function; look for secondaries */
1216
1217 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1218 list_add_tail(&efx->node, &efx_primary_list);
1219
1220 list_for_each_entry_safe(other, next, &efx_unassociated_list,
1221 node) {
1222 if (efx_same_controller(efx, other)) {
1223 list_del(&other->node);
1224 netif_dbg(other, probe, other->net_dev,
1225 "moving to secondary list of %s %s\n",
1226 pci_name(efx->pci_dev),
1227 efx->net_dev->name);
1228 list_add_tail(&other->node,
1229 &efx->secondary_list);
1230 other->primary = efx;
1231 }
1232 }
1233 } else {
1234 /* Adding secondary function; look for primary */
1235
1236 list_for_each_entry(other, &efx_primary_list, node) {
1237 if (efx_same_controller(efx, other)) {
1238 netif_dbg(efx, probe, efx->net_dev,
1239 "adding to secondary list of %s %s\n",
1240 pci_name(other->pci_dev),
1241 other->net_dev->name);
1242 list_add_tail(&efx->node,
1243 &other->secondary_list);
1244 efx->primary = other;
1245 return;
1246 }
1247 }
1248
1249 netif_dbg(efx, probe, efx->net_dev,
1250 "adding to unassociated list\n");
1251 list_add_tail(&efx->node, &efx_unassociated_list);
1252 }
1253}
1254
1255static void efx_dissociate(struct efx_nic *efx)
1256{
1257 struct efx_nic *other, *next;
1258
1259 list_del(&efx->node);
1260 efx->primary = NULL;
1261
1262 list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1263 list_del(&other->node);
1264 netif_dbg(other, probe, other->net_dev,
1265 "moving to unassociated list\n");
1266 list_add_tail(&other->node, &efx_unassociated_list);
1267 other->primary = NULL;
1268 }
1269}
1270
1271/* This configures the PCI device to enable I/O and DMA. */
1272static int efx_init_io(struct efx_nic *efx)
1273{
1274 struct pci_dev *pci_dev = efx->pci_dev;
1275 dma_addr_t dma_mask = efx->type->max_dma_mask;
1276 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1277 int rc, bar;
1278
1279 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1280
1281 bar = efx->type->mem_bar(efx);
1282
1283 rc = pci_enable_device(pci_dev);
1284 if (rc) {
1285 netif_err(efx, probe, efx->net_dev,
1286 "failed to enable PCI device\n");
1287 goto fail1;
1288 }
1289
1290 pci_set_master(pci_dev);
1291
1292 /* Set the PCI DMA mask. Try all possibilities from our
1293 * genuine mask down to 32 bits, because some architectures
1294 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1295 * masks event though they reject 46 bit masks.
1296 */
1297 while (dma_mask > 0x7fffffffUL) {
1298 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1299 if (rc == 0)
1300 break;
1301 dma_mask >>= 1;
1302 }
1303 if (rc) {
1304 netif_err(efx, probe, efx->net_dev,
1305 "could not find a suitable DMA mask\n");
1306 goto fail2;
1307 }
1308 netif_dbg(efx, probe, efx->net_dev,
1309 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1310
1311 efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1312 rc = pci_request_region(pci_dev, bar, "sfc");
1313 if (rc) {
1314 netif_err(efx, probe, efx->net_dev,
1315 "request for memory BAR failed\n");
1316 rc = -EIO;
1317 goto fail3;
1318 }
1319 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1320 if (!efx->membase) {
1321 netif_err(efx, probe, efx->net_dev,
1322 "could not map memory BAR at %llx+%x\n",
1323 (unsigned long long)efx->membase_phys, mem_map_size);
1324 rc = -ENOMEM;
1325 goto fail4;
1326 }
1327 netif_dbg(efx, probe, efx->net_dev,
1328 "memory BAR at %llx+%x (virtual %p)\n",
1329 (unsigned long long)efx->membase_phys, mem_map_size,
1330 efx->membase);
1331
1332 return 0;
1333
1334 fail4:
1335 pci_release_region(efx->pci_dev, bar);
1336 fail3:
1337 efx->membase_phys = 0;
1338 fail2:
1339 pci_disable_device(efx->pci_dev);
1340 fail1:
1341 return rc;
1342}
1343
1344static void efx_fini_io(struct efx_nic *efx)
1345{
1346 int bar;
1347
1348 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1349
1350 if (efx->membase) {
1351 iounmap(efx->membase);
1352 efx->membase = NULL;
1353 }
1354
1355 if (efx->membase_phys) {
1356 bar = efx->type->mem_bar(efx);
1357 pci_release_region(efx->pci_dev, bar);
1358 efx->membase_phys = 0;
1359 }
1360
1361 /* Don't disable bus-mastering if VFs are assigned */
1362 if (!pci_vfs_assigned(efx->pci_dev))
1363 pci_disable_device(efx->pci_dev);
1364}
1365
1366void efx_set_default_rx_indir_table(struct efx_nic *efx,
1367 struct efx_rss_context *ctx)
1368{
1369 size_t i;
1370
1371 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
1372 ctx->rx_indir_table[i] =
1373 ethtool_rxfh_indir_default(i, efx->rss_spread);
1374}
1375
1376static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1377{
1378 cpumask_var_t thread_mask;
1379 unsigned int count;
1380 int cpu;
1381
1382 if (rss_cpus) {
1383 count = rss_cpus;
1384 } else {
1385 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1386 netif_warn(efx, probe, efx->net_dev,
1387 "RSS disabled due to allocation failure\n");
1388 return 1;
1389 }
1390
1391 count = 0;
1392 for_each_online_cpu(cpu) {
1393 if (!cpumask_test_cpu(cpu, thread_mask)) {
1394 ++count;
1395 cpumask_or(thread_mask, thread_mask,
1396 topology_sibling_cpumask(cpu));
1397 }
1398 }
1399
1400 free_cpumask_var(thread_mask);
1401 }
1402
1403 if (count > EFX_MAX_RX_QUEUES) {
1404 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1405 "Reducing number of rx queues from %u to %u.\n",
1406 count, EFX_MAX_RX_QUEUES);
1407 count = EFX_MAX_RX_QUEUES;
1408 }
1409
1410 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1411 * table entries that are inaccessible to VFs
1412 */
1413#ifdef CONFIG_SFC_SRIOV
1414 if (efx->type->sriov_wanted) {
1415 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1416 count > efx_vf_size(efx)) {
1417 netif_warn(efx, probe, efx->net_dev,
1418 "Reducing number of RSS channels from %u to %u for "
1419 "VF support. Increase vf-msix-limit to use more "
1420 "channels on the PF.\n",
1421 count, efx_vf_size(efx));
1422 count = efx_vf_size(efx);
1423 }
1424 }
1425#endif
1426
1427 return count;
1428}
1429
1430/* Probe the number and type of interrupts we are able to obtain, and
1431 * the resulting numbers of channels and RX queues.
1432 */
1433static int efx_probe_interrupts(struct efx_nic *efx)
1434{
1435 unsigned int extra_channels = 0;
1436 unsigned int i, j;
1437 int rc;
1438
1439 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1440 if (efx->extra_channel_type[i])
1441 ++extra_channels;
1442
1443 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1444 struct msix_entry xentries[EFX_MAX_CHANNELS];
1445 unsigned int n_channels;
1446
1447 n_channels = efx_wanted_parallelism(efx);
1448 if (efx_separate_tx_channels)
1449 n_channels *= 2;
1450 n_channels += extra_channels;
1451 n_channels = min(n_channels, efx->max_channels);
1452
1453 for (i = 0; i < n_channels; i++)
1454 xentries[i].entry = i;
1455 rc = pci_enable_msix_range(efx->pci_dev,
1456 xentries, 1, n_channels);
1457 if (rc < 0) {
1458 /* Fall back to single channel MSI */
1459 netif_err(efx, drv, efx->net_dev,
1460 "could not enable MSI-X\n");
1461 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
1462 efx->interrupt_mode = EFX_INT_MODE_MSI;
1463 else
1464 return rc;
1465 } else if (rc < n_channels) {
1466 netif_err(efx, drv, efx->net_dev,
1467 "WARNING: Insufficient MSI-X vectors"
1468 " available (%d < %u).\n", rc, n_channels);
1469 netif_err(efx, drv, efx->net_dev,
1470 "WARNING: Performance may be reduced.\n");
1471 n_channels = rc;
1472 }
1473
1474 if (rc > 0) {
1475 efx->n_channels = n_channels;
1476 if (n_channels > extra_channels)
1477 n_channels -= extra_channels;
1478 if (efx_separate_tx_channels) {
1479 efx->n_tx_channels = min(max(n_channels / 2,
1480 1U),
1481 efx->max_tx_channels);
1482 efx->n_rx_channels = max(n_channels -
1483 efx->n_tx_channels,
1484 1U);
1485 } else {
1486 efx->n_tx_channels = min(n_channels,
1487 efx->max_tx_channels);
1488 efx->n_rx_channels = n_channels;
1489 }
1490 for (i = 0; i < efx->n_channels; i++)
1491 efx_get_channel(efx, i)->irq =
1492 xentries[i].vector;
1493 }
1494 }
1495
1496 /* Try single interrupt MSI */
1497 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1498 efx->n_channels = 1;
1499 efx->n_rx_channels = 1;
1500 efx->n_tx_channels = 1;
1501 rc = pci_enable_msi(efx->pci_dev);
1502 if (rc == 0) {
1503 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1504 } else {
1505 netif_err(efx, drv, efx->net_dev,
1506 "could not enable MSI\n");
1507 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
1508 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1509 else
1510 return rc;
1511 }
1512 }
1513
1514 /* Assume legacy interrupts */
1515 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1516 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1517 efx->n_rx_channels = 1;
1518 efx->n_tx_channels = 1;
1519 efx->legacy_irq = efx->pci_dev->irq;
1520 }
1521
1522 /* Assign extra channels if possible */
1523 efx->n_extra_tx_channels = 0;
1524 j = efx->n_channels;
1525 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1526 if (!efx->extra_channel_type[i])
1527 continue;
1528 if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1529 efx->n_channels <= extra_channels) {
1530 efx->extra_channel_type[i]->handle_no_channel(efx);
1531 } else {
1532 --j;
1533 efx_get_channel(efx, j)->type =
1534 efx->extra_channel_type[i];
1535 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
1536 efx->n_extra_tx_channels++;
1537 }
1538 }
1539
1540 /* RSS might be usable on VFs even if it is disabled on the PF */
1541#ifdef CONFIG_SFC_SRIOV
1542 if (efx->type->sriov_wanted) {
1543 efx->rss_spread = ((efx->n_rx_channels > 1 ||
1544 !efx->type->sriov_wanted(efx)) ?
1545 efx->n_rx_channels : efx_vf_size(efx));
1546 return 0;
1547 }
1548#endif
1549 efx->rss_spread = efx->n_rx_channels;
1550
1551 return 0;
1552}
1553
1554static int efx_soft_enable_interrupts(struct efx_nic *efx)
1555{
1556 struct efx_channel *channel, *end_channel;
1557 int rc;
1558
1559 BUG_ON(efx->state == STATE_DISABLED);
1560
1561 efx->irq_soft_enabled = true;
1562 smp_wmb();
1563
1564 efx_for_each_channel(channel, efx) {
1565 if (!channel->type->keep_eventq) {
1566 rc = efx_init_eventq(channel);
1567 if (rc)
1568 goto fail;
1569 }
1570 efx_start_eventq(channel);
1571 }
1572
1573 efx_mcdi_mode_event(efx);
1574
1575 return 0;
1576fail:
1577 end_channel = channel;
1578 efx_for_each_channel(channel, efx) {
1579 if (channel == end_channel)
1580 break;
1581 efx_stop_eventq(channel);
1582 if (!channel->type->keep_eventq)
1583 efx_fini_eventq(channel);
1584 }
1585
1586 return rc;
1587}
1588
1589static void efx_soft_disable_interrupts(struct efx_nic *efx)
1590{
1591 struct efx_channel *channel;
1592
1593 if (efx->state == STATE_DISABLED)
1594 return;
1595
1596 efx_mcdi_mode_poll(efx);
1597
1598 efx->irq_soft_enabled = false;
1599 smp_wmb();
1600
1601 if (efx->legacy_irq)
1602 synchronize_irq(efx->legacy_irq);
1603
1604 efx_for_each_channel(channel, efx) {
1605 if (channel->irq)
1606 synchronize_irq(channel->irq);
1607
1608 efx_stop_eventq(channel);
1609 if (!channel->type->keep_eventq)
1610 efx_fini_eventq(channel);
1611 }
1612
1613 /* Flush the asynchronous MCDI request queue */
1614 efx_mcdi_flush_async(efx);
1615}
1616
1617static int efx_enable_interrupts(struct efx_nic *efx)
1618{
1619 struct efx_channel *channel, *end_channel;
1620 int rc;
1621
1622 BUG_ON(efx->state == STATE_DISABLED);
1623
1624 if (efx->eeh_disabled_legacy_irq) {
1625 enable_irq(efx->legacy_irq);
1626 efx->eeh_disabled_legacy_irq = false;
1627 }
1628
1629 efx->type->irq_enable_master(efx);
1630
1631 efx_for_each_channel(channel, efx) {
1632 if (channel->type->keep_eventq) {
1633 rc = efx_init_eventq(channel);
1634 if (rc)
1635 goto fail;
1636 }
1637 }
1638
1639 rc = efx_soft_enable_interrupts(efx);
1640 if (rc)
1641 goto fail;
1642
1643 return 0;
1644
1645fail:
1646 end_channel = channel;
1647 efx_for_each_channel(channel, efx) {
1648 if (channel == end_channel)
1649 break;
1650 if (channel->type->keep_eventq)
1651 efx_fini_eventq(channel);
1652 }
1653
1654 efx->type->irq_disable_non_ev(efx);
1655
1656 return rc;
1657}
1658
1659static void efx_disable_interrupts(struct efx_nic *efx)
1660{
1661 struct efx_channel *channel;
1662
1663 efx_soft_disable_interrupts(efx);
1664
1665 efx_for_each_channel(channel, efx) {
1666 if (channel->type->keep_eventq)
1667 efx_fini_eventq(channel);
1668 }
1669
1670 efx->type->irq_disable_non_ev(efx);
1671}
1672
1673static void efx_remove_interrupts(struct efx_nic *efx)
1674{
1675 struct efx_channel *channel;
1676
1677 /* Remove MSI/MSI-X interrupts */
1678 efx_for_each_channel(channel, efx)
1679 channel->irq = 0;
1680 pci_disable_msi(efx->pci_dev);
1681 pci_disable_msix(efx->pci_dev);
1682
1683 /* Remove legacy interrupt */
1684 efx->legacy_irq = 0;
1685}
1686
1687static void efx_set_channels(struct efx_nic *efx)
1688{
1689 struct efx_channel *channel;
1690 struct efx_tx_queue *tx_queue;
1691
1692 efx->tx_channel_offset =
1693 efx_separate_tx_channels ?
1694 efx->n_channels - efx->n_tx_channels : 0;
1695
1696 /* We need to mark which channels really have RX and TX
1697 * queues, and adjust the TX queue numbers if we have separate
1698 * RX-only and TX-only channels.
1699 */
1700 efx_for_each_channel(channel, efx) {
1701 if (channel->channel < efx->n_rx_channels)
1702 channel->rx_queue.core_index = channel->channel;
1703 else
1704 channel->rx_queue.core_index = -1;
1705
1706 efx_for_each_channel_tx_queue(tx_queue, channel)
1707 tx_queue->queue -= (efx->tx_channel_offset *
1708 EFX_TXQ_TYPES);
1709 }
1710}
1711
1712static int efx_probe_nic(struct efx_nic *efx)
1713{
1714 int rc;
1715
1716 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1717
1718 /* Carry out hardware-type specific initialisation */
1719 rc = efx->type->probe(efx);
1720 if (rc)
1721 return rc;
1722
1723 do {
1724 if (!efx->max_channels || !efx->max_tx_channels) {
1725 netif_err(efx, drv, efx->net_dev,
1726 "Insufficient resources to allocate"
1727 " any channels\n");
1728 rc = -ENOSPC;
1729 goto fail1;
1730 }
1731
1732 /* Determine the number of channels and queues by trying
1733 * to hook in MSI-X interrupts.
1734 */
1735 rc = efx_probe_interrupts(efx);
1736 if (rc)
1737 goto fail1;
1738
1739 efx_set_channels(efx);
1740
1741 /* dimension_resources can fail with EAGAIN */
1742 rc = efx->type->dimension_resources(efx);
1743 if (rc != 0 && rc != -EAGAIN)
1744 goto fail2;
1745
1746 if (rc == -EAGAIN)
1747 /* try again with new max_channels */
1748 efx_remove_interrupts(efx);
1749
1750 } while (rc == -EAGAIN);
1751
1752 if (efx->n_channels > 1)
1753 netdev_rss_key_fill(efx->rss_context.rx_hash_key,
1754 sizeof(efx->rss_context.rx_hash_key));
1755 efx_set_default_rx_indir_table(efx, &efx->rss_context);
1756
1757 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1758 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1759
1760 /* Initialise the interrupt moderation settings */
1761 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1762 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1763 true);
1764
1765 return 0;
1766
1767fail2:
1768 efx_remove_interrupts(efx);
1769fail1:
1770 efx->type->remove(efx);
1771 return rc;
1772}
1773
1774static void efx_remove_nic(struct efx_nic *efx)
1775{
1776 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1777
1778 efx_remove_interrupts(efx);
1779 efx->type->remove(efx);
1780}
1781
1782static int efx_probe_filters(struct efx_nic *efx)
1783{
1784 int rc;
1785
1786 init_rwsem(&efx->filter_sem);
1787 mutex_lock(&efx->mac_lock);
1788 down_write(&efx->filter_sem);
1789 rc = efx->type->filter_table_probe(efx);
1790 if (rc)
1791 goto out_unlock;
1792
1793#ifdef CONFIG_RFS_ACCEL
1794 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1795 struct efx_channel *channel;
1796 int i, success = 1;
1797
1798 efx_for_each_channel(channel, efx) {
1799 channel->rps_flow_id =
1800 kcalloc(efx->type->max_rx_ip_filters,
1801 sizeof(*channel->rps_flow_id),
1802 GFP_KERNEL);
1803 if (!channel->rps_flow_id)
1804 success = 0;
1805 else
1806 for (i = 0;
1807 i < efx->type->max_rx_ip_filters;
1808 ++i)
1809 channel->rps_flow_id[i] =
1810 RPS_FLOW_ID_INVALID;
1811 }
1812
1813 if (!success) {
1814 efx_for_each_channel(channel, efx)
1815 kfree(channel->rps_flow_id);
1816 efx->type->filter_table_remove(efx);
1817 rc = -ENOMEM;
1818 goto out_unlock;
1819 }
1820
1821 efx->rps_expire_index = efx->rps_expire_channel = 0;
1822 }
1823#endif
1824out_unlock:
1825 up_write(&efx->filter_sem);
1826 mutex_unlock(&efx->mac_lock);
1827 return rc;
1828}
1829
1830static void efx_remove_filters(struct efx_nic *efx)
1831{
1832#ifdef CONFIG_RFS_ACCEL
1833 struct efx_channel *channel;
1834
1835 efx_for_each_channel(channel, efx)
1836 kfree(channel->rps_flow_id);
1837#endif
1838 down_write(&efx->filter_sem);
1839 efx->type->filter_table_remove(efx);
1840 up_write(&efx->filter_sem);
1841}
1842
1843static void efx_restore_filters(struct efx_nic *efx)
1844{
1845 down_read(&efx->filter_sem);
1846 efx->type->filter_table_restore(efx);
1847 up_read(&efx->filter_sem);
1848}
1849
1850/**************************************************************************
1851 *
1852 * NIC startup/shutdown
1853 *
1854 *************************************************************************/
1855
1856static int efx_probe_all(struct efx_nic *efx)
1857{
1858 int rc;
1859
1860 rc = efx_probe_nic(efx);
1861 if (rc) {
1862 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1863 goto fail1;
1864 }
1865
1866 rc = efx_probe_port(efx);
1867 if (rc) {
1868 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1869 goto fail2;
1870 }
1871
1872 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1873 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1874 rc = -EINVAL;
1875 goto fail3;
1876 }
1877 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1878
1879#ifdef CONFIG_SFC_SRIOV
1880 rc = efx->type->vswitching_probe(efx);
1881 if (rc) /* not fatal; the PF will still work fine */
1882 netif_warn(efx, probe, efx->net_dev,
1883 "failed to setup vswitching rc=%d;"
1884 " VFs may not function\n", rc);
1885#endif
1886
1887 rc = efx_probe_filters(efx);
1888 if (rc) {
1889 netif_err(efx, probe, efx->net_dev,
1890 "failed to create filter tables\n");
1891 goto fail4;
1892 }
1893
1894 rc = efx_probe_channels(efx);
1895 if (rc)
1896 goto fail5;
1897
1898 return 0;
1899
1900 fail5:
1901 efx_remove_filters(efx);
1902 fail4:
1903#ifdef CONFIG_SFC_SRIOV
1904 efx->type->vswitching_remove(efx);
1905#endif
1906 fail3:
1907 efx_remove_port(efx);
1908 fail2:
1909 efx_remove_nic(efx);
1910 fail1:
1911 return rc;
1912}
1913
1914/* If the interface is supposed to be running but is not, start
1915 * the hardware and software data path, regular activity for the port
1916 * (MAC statistics, link polling, etc.) and schedule the port to be
1917 * reconfigured. Interrupts must already be enabled. This function
1918 * is safe to call multiple times, so long as the NIC is not disabled.
1919 * Requires the RTNL lock.
1920 */
1921static void efx_start_all(struct efx_nic *efx)
1922{
1923 EFX_ASSERT_RESET_SERIALISED(efx);
1924 BUG_ON(efx->state == STATE_DISABLED);
1925
1926 /* Check that it is appropriate to restart the interface. All
1927 * of these flags are safe to read under just the rtnl lock */
1928 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1929 efx->reset_pending)
1930 return;
1931
1932 efx_start_port(efx);
1933 efx_start_datapath(efx);
1934
1935 /* Start the hardware monitor if there is one */
1936 if (efx->type->monitor != NULL)
1937 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1938 efx_monitor_interval);
1939
1940 /* Link state detection is normally event-driven; we have
1941 * to poll now because we could have missed a change
1942 */
1943 mutex_lock(&efx->mac_lock);
1944 if (efx->phy_op->poll(efx))
1945 efx_link_status_changed(efx);
1946 mutex_unlock(&efx->mac_lock);
1947
1948 efx->type->start_stats(efx);
1949 efx->type->pull_stats(efx);
1950 spin_lock_bh(&efx->stats_lock);
1951 efx->type->update_stats(efx, NULL, NULL);
1952 spin_unlock_bh(&efx->stats_lock);
1953}
1954
1955/* Quiesce the hardware and software data path, and regular activity
1956 * for the port without bringing the link down. Safe to call multiple
1957 * times with the NIC in almost any state, but interrupts should be
1958 * enabled. Requires the RTNL lock.
1959 */
1960static void efx_stop_all(struct efx_nic *efx)
1961{
1962 EFX_ASSERT_RESET_SERIALISED(efx);
1963
1964 /* port_enabled can be read safely under the rtnl lock */
1965 if (!efx->port_enabled)
1966 return;
1967
1968 /* update stats before we go down so we can accurately count
1969 * rx_nodesc_drops
1970 */
1971 efx->type->pull_stats(efx);
1972 spin_lock_bh(&efx->stats_lock);
1973 efx->type->update_stats(efx, NULL, NULL);
1974 spin_unlock_bh(&efx->stats_lock);
1975 efx->type->stop_stats(efx);
1976 efx_stop_port(efx);
1977
1978 /* Stop the kernel transmit interface. This is only valid if
1979 * the device is stopped or detached; otherwise the watchdog
1980 * may fire immediately.
1981 */
1982 WARN_ON(netif_running(efx->net_dev) &&
1983 netif_device_present(efx->net_dev));
1984 netif_tx_disable(efx->net_dev);
1985
1986 efx_stop_datapath(efx);
1987}
1988
1989static void efx_remove_all(struct efx_nic *efx)
1990{
1991 efx_remove_channels(efx);
1992 efx_remove_filters(efx);
1993#ifdef CONFIG_SFC_SRIOV
1994 efx->type->vswitching_remove(efx);
1995#endif
1996 efx_remove_port(efx);
1997 efx_remove_nic(efx);
1998}
1999
2000/**************************************************************************
2001 *
2002 * Interrupt moderation
2003 *
2004 **************************************************************************/
2005unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
2006{
2007 if (usecs == 0)
2008 return 0;
2009 if (usecs * 1000 < efx->timer_quantum_ns)
2010 return 1; /* never round down to 0 */
2011 return usecs * 1000 / efx->timer_quantum_ns;
2012}
2013
2014unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
2015{
2016 /* We must round up when converting ticks to microseconds
2017 * because we round down when converting the other way.
2018 */
2019 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
2020}
2021
2022/* Set interrupt moderation parameters */
2023int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
2024 unsigned int rx_usecs, bool rx_adaptive,
2025 bool rx_may_override_tx)
2026{
2027 struct efx_channel *channel;
2028 unsigned int timer_max_us;
2029
2030 EFX_ASSERT_RESET_SERIALISED(efx);
2031
2032 timer_max_us = efx->timer_max_ns / 1000;
2033
2034 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
2035 return -EINVAL;
2036
2037 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
2038 !rx_may_override_tx) {
2039 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
2040 "RX and TX IRQ moderation must be equal\n");
2041 return -EINVAL;
2042 }
2043
2044 efx->irq_rx_adaptive = rx_adaptive;
2045 efx->irq_rx_moderation_us = rx_usecs;
2046 efx_for_each_channel(channel, efx) {
2047 if (efx_channel_has_rx_queue(channel))
2048 channel->irq_moderation_us = rx_usecs;
2049 else if (efx_channel_has_tx_queues(channel))
2050 channel->irq_moderation_us = tx_usecs;
2051 }
2052
2053 return 0;
2054}
2055
2056void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
2057 unsigned int *rx_usecs, bool *rx_adaptive)
2058{
2059 *rx_adaptive = efx->irq_rx_adaptive;
2060 *rx_usecs = efx->irq_rx_moderation_us;
2061
2062 /* If channels are shared between RX and TX, so is IRQ
2063 * moderation. Otherwise, IRQ moderation is the same for all
2064 * TX channels and is not adaptive.
2065 */
2066 if (efx->tx_channel_offset == 0) {
2067 *tx_usecs = *rx_usecs;
2068 } else {
2069 struct efx_channel *tx_channel;
2070
2071 tx_channel = efx->channel[efx->tx_channel_offset];
2072 *tx_usecs = tx_channel->irq_moderation_us;
2073 }
2074}
2075
2076/**************************************************************************
2077 *
2078 * Hardware monitor
2079 *
2080 **************************************************************************/
2081
2082/* Run periodically off the general workqueue */
2083static void efx_monitor(struct work_struct *data)
2084{
2085 struct efx_nic *efx = container_of(data, struct efx_nic,
2086 monitor_work.work);
2087
2088 netif_vdbg(efx, timer, efx->net_dev,
2089 "hardware monitor executing on CPU %d\n",
2090 raw_smp_processor_id());
2091 BUG_ON(efx->type->monitor == NULL);
2092
2093 /* If the mac_lock is already held then it is likely a port
2094 * reconfiguration is already in place, which will likely do
2095 * most of the work of monitor() anyway. */
2096 if (mutex_trylock(&efx->mac_lock)) {
2097 if (efx->port_enabled)
2098 efx->type->monitor(efx);
2099 mutex_unlock(&efx->mac_lock);
2100 }
2101
2102 queue_delayed_work(efx->workqueue, &efx->monitor_work,
2103 efx_monitor_interval);
2104}
2105
2106/**************************************************************************
2107 *
2108 * ioctls
2109 *
2110 *************************************************************************/
2111
2112/* Net device ioctl
2113 * Context: process, rtnl_lock() held.
2114 */
2115static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2116{
2117 struct efx_nic *efx = netdev_priv(net_dev);
2118 struct mii_ioctl_data *data = if_mii(ifr);
2119
2120 if (cmd == SIOCSHWTSTAMP)
2121 return efx_ptp_set_ts_config(efx, ifr);
2122 if (cmd == SIOCGHWTSTAMP)
2123 return efx_ptp_get_ts_config(efx, ifr);
2124
2125 /* Convert phy_id from older PRTAD/DEVAD format */
2126 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2127 (data->phy_id & 0xfc00) == 0x0400)
2128 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2129
2130 return mdio_mii_ioctl(&efx->mdio, data, cmd);
2131}
2132
2133/**************************************************************************
2134 *
2135 * NAPI interface
2136 *
2137 **************************************************************************/
2138
2139static void efx_init_napi_channel(struct efx_channel *channel)
2140{
2141 struct efx_nic *efx = channel->efx;
2142
2143 channel->napi_dev = efx->net_dev;
2144 netif_napi_add(channel->napi_dev, &channel->napi_str,
2145 efx_poll, napi_weight);
2146}
2147
2148static void efx_init_napi(struct efx_nic *efx)
2149{
2150 struct efx_channel *channel;
2151
2152 efx_for_each_channel(channel, efx)
2153 efx_init_napi_channel(channel);
2154}
2155
2156static void efx_fini_napi_channel(struct efx_channel *channel)
2157{
2158 if (channel->napi_dev)
2159 netif_napi_del(&channel->napi_str);
2160
2161 channel->napi_dev = NULL;
2162}
2163
2164static void efx_fini_napi(struct efx_nic *efx)
2165{
2166 struct efx_channel *channel;
2167
2168 efx_for_each_channel(channel, efx)
2169 efx_fini_napi_channel(channel);
2170}
2171
2172/**************************************************************************
2173 *
2174 * Kernel netpoll interface
2175 *
2176 *************************************************************************/
2177
2178#ifdef CONFIG_NET_POLL_CONTROLLER
2179
2180/* Although in the common case interrupts will be disabled, this is not
2181 * guaranteed. However, all our work happens inside the NAPI callback,
2182 * so no locking is required.
2183 */
2184static void efx_netpoll(struct net_device *net_dev)
2185{
2186 struct efx_nic *efx = netdev_priv(net_dev);
2187 struct efx_channel *channel;
2188
2189 efx_for_each_channel(channel, efx)
2190 efx_schedule_channel(channel);
2191}
2192
2193#endif
2194
2195/**************************************************************************
2196 *
2197 * Kernel net device interface
2198 *
2199 *************************************************************************/
2200
2201/* Context: process, rtnl_lock() held. */
2202int efx_net_open(struct net_device *net_dev)
2203{
2204 struct efx_nic *efx = netdev_priv(net_dev);
2205 int rc;
2206
2207 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2208 raw_smp_processor_id());
2209
2210 rc = efx_check_disabled(efx);
2211 if (rc)
2212 return rc;
2213 if (efx->phy_mode & PHY_MODE_SPECIAL)
2214 return -EBUSY;
2215 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
2216 return -EIO;
2217
2218 /* Notify the kernel of the link state polled during driver load,
2219 * before the monitor starts running */
2220 efx_link_status_changed(efx);
2221
2222 efx_start_all(efx);
2223 if (efx->state == STATE_DISABLED || efx->reset_pending)
2224 netif_device_detach(efx->net_dev);
2225 efx_selftest_async_start(efx);
2226 return 0;
2227}
2228
2229/* Context: process, rtnl_lock() held.
2230 * Note that the kernel will ignore our return code; this method
2231 * should really be a void.
2232 */
2233int efx_net_stop(struct net_device *net_dev)
2234{
2235 struct efx_nic *efx = netdev_priv(net_dev);
2236
2237 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2238 raw_smp_processor_id());
2239
2240 /* Stop the device and flush all the channels */
2241 efx_stop_all(efx);
2242
2243 return 0;
2244}
2245
2246/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2247static void efx_net_stats(struct net_device *net_dev,
2248 struct rtnl_link_stats64 *stats)
2249{
2250 struct efx_nic *efx = netdev_priv(net_dev);
2251
2252 spin_lock_bh(&efx->stats_lock);
2253 efx->type->update_stats(efx, NULL, stats);
2254 spin_unlock_bh(&efx->stats_lock);
2255}
2256
2257/* Context: netif_tx_lock held, BHs disabled. */
2258static void efx_watchdog(struct net_device *net_dev)
2259{
2260 struct efx_nic *efx = netdev_priv(net_dev);
2261
2262 netif_err(efx, tx_err, efx->net_dev,
2263 "TX stuck with port_enabled=%d: resetting channels\n",
2264 efx->port_enabled);
2265
2266 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2267}
2268
2269
2270/* Context: process, rtnl_lock() held. */
2271static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2272{
2273 struct efx_nic *efx = netdev_priv(net_dev);
2274 int rc;
2275
2276 rc = efx_check_disabled(efx);
2277 if (rc)
2278 return rc;
2279
2280 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2281
2282 efx_device_detach_sync(efx);
2283 efx_stop_all(efx);
2284
2285 mutex_lock(&efx->mac_lock);
2286 net_dev->mtu = new_mtu;
2287 efx_mac_reconfigure(efx);
2288 mutex_unlock(&efx->mac_lock);
2289
2290 efx_start_all(efx);
2291 efx_device_attach_if_not_resetting(efx);
2292 return 0;
2293}
2294
2295static int efx_set_mac_address(struct net_device *net_dev, void *data)
2296{
2297 struct efx_nic *efx = netdev_priv(net_dev);
2298 struct sockaddr *addr = data;
2299 u8 *new_addr = addr->sa_data;
2300 u8 old_addr[6];
2301 int rc;
2302
2303 if (!is_valid_ether_addr(new_addr)) {
2304 netif_err(efx, drv, efx->net_dev,
2305 "invalid ethernet MAC address requested: %pM\n",
2306 new_addr);
2307 return -EADDRNOTAVAIL;
2308 }
2309
2310 /* save old address */
2311 ether_addr_copy(old_addr, net_dev->dev_addr);
2312 ether_addr_copy(net_dev->dev_addr, new_addr);
2313 if (efx->type->set_mac_address) {
2314 rc = efx->type->set_mac_address(efx);
2315 if (rc) {
2316 ether_addr_copy(net_dev->dev_addr, old_addr);
2317 return rc;
2318 }
2319 }
2320
2321 /* Reconfigure the MAC */
2322 mutex_lock(&efx->mac_lock);
2323 efx_mac_reconfigure(efx);
2324 mutex_unlock(&efx->mac_lock);
2325
2326 return 0;
2327}
2328
2329/* Context: netif_addr_lock held, BHs disabled. */
2330static void efx_set_rx_mode(struct net_device *net_dev)
2331{
2332 struct efx_nic *efx = netdev_priv(net_dev);
2333
2334 if (efx->port_enabled)
2335 queue_work(efx->workqueue, &efx->mac_work);
2336 /* Otherwise efx_start_port() will do this */
2337}
2338
2339static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2340{
2341 struct efx_nic *efx = netdev_priv(net_dev);
2342 int rc;
2343
2344 /* If disabling RX n-tuple filtering, clear existing filters */
2345 if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2346 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2347 if (rc)
2348 return rc;
2349 }
2350
2351 /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
2352 * If rx-fcs is changed, mac_reconfigure updates that too.
2353 */
2354 if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
2355 NETIF_F_RXFCS)) {
2356 /* efx_set_rx_mode() will schedule MAC work to update filters
2357 * when a new features are finally set in net_dev.
2358 */
2359 efx_set_rx_mode(net_dev);
2360 }
2361
2362 return 0;
2363}
2364
2365static int efx_get_phys_port_id(struct net_device *net_dev,
2366 struct netdev_phys_item_id *ppid)
2367{
2368 struct efx_nic *efx = netdev_priv(net_dev);
2369
2370 if (efx->type->get_phys_port_id)
2371 return efx->type->get_phys_port_id(efx, ppid);
2372 else
2373 return -EOPNOTSUPP;
2374}
2375
2376static int efx_get_phys_port_name(struct net_device *net_dev,
2377 char *name, size_t len)
2378{
2379 struct efx_nic *efx = netdev_priv(net_dev);
2380
2381 if (snprintf(name, len, "p%u", efx->port_num) >= len)
2382 return -EINVAL;
2383 return 0;
2384}
2385
2386static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2387{
2388 struct efx_nic *efx = netdev_priv(net_dev);
2389
2390 if (efx->type->vlan_rx_add_vid)
2391 return efx->type->vlan_rx_add_vid(efx, proto, vid);
2392 else
2393 return -EOPNOTSUPP;
2394}
2395
2396static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2397{
2398 struct efx_nic *efx = netdev_priv(net_dev);
2399
2400 if (efx->type->vlan_rx_kill_vid)
2401 return efx->type->vlan_rx_kill_vid(efx, proto, vid);
2402 else
2403 return -EOPNOTSUPP;
2404}
2405
2406static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
2407{
2408 switch (in) {
2409 case UDP_TUNNEL_TYPE_VXLAN:
2410 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
2411 case UDP_TUNNEL_TYPE_GENEVE:
2412 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
2413 default:
2414 return -1;
2415 }
2416}
2417
2418static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
2419{
2420 struct efx_nic *efx = netdev_priv(dev);
2421 struct efx_udp_tunnel tnl;
2422 int efx_tunnel_type;
2423
2424 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2425 if (efx_tunnel_type < 0)
2426 return;
2427
2428 tnl.type = (u16)efx_tunnel_type;
2429 tnl.port = ti->port;
2430
2431 if (efx->type->udp_tnl_add_port)
2432 (void)efx->type->udp_tnl_add_port(efx, tnl);
2433}
2434
2435static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
2436{
2437 struct efx_nic *efx = netdev_priv(dev);
2438 struct efx_udp_tunnel tnl;
2439 int efx_tunnel_type;
2440
2441 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2442 if (efx_tunnel_type < 0)
2443 return;
2444
2445 tnl.type = (u16)efx_tunnel_type;
2446 tnl.port = ti->port;
2447
2448 if (efx->type->udp_tnl_del_port)
2449 (void)efx->type->udp_tnl_del_port(efx, tnl);
2450}
2451
2452static const struct net_device_ops efx_netdev_ops = {
2453 .ndo_open = efx_net_open,
2454 .ndo_stop = efx_net_stop,
2455 .ndo_get_stats64 = efx_net_stats,
2456 .ndo_tx_timeout = efx_watchdog,
2457 .ndo_start_xmit = efx_hard_start_xmit,
2458 .ndo_validate_addr = eth_validate_addr,
2459 .ndo_do_ioctl = efx_ioctl,
2460 .ndo_change_mtu = efx_change_mtu,
2461 .ndo_set_mac_address = efx_set_mac_address,
2462 .ndo_set_rx_mode = efx_set_rx_mode,
2463 .ndo_set_features = efx_set_features,
2464 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
2465 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
2466#ifdef CONFIG_SFC_SRIOV
2467 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2468 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
2469 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
2470 .ndo_get_vf_config = efx_sriov_get_vf_config,
2471 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
2472#endif
2473 .ndo_get_phys_port_id = efx_get_phys_port_id,
2474 .ndo_get_phys_port_name = efx_get_phys_port_name,
2475#ifdef CONFIG_NET_POLL_CONTROLLER
2476 .ndo_poll_controller = efx_netpoll,
2477#endif
2478 .ndo_setup_tc = efx_setup_tc,
2479#ifdef CONFIG_RFS_ACCEL
2480 .ndo_rx_flow_steer = efx_filter_rfs,
2481#endif
2482 .ndo_udp_tunnel_add = efx_udp_tunnel_add,
2483 .ndo_udp_tunnel_del = efx_udp_tunnel_del,
2484};
2485
2486static void efx_update_name(struct efx_nic *efx)
2487{
2488 strcpy(efx->name, efx->net_dev->name);
2489 efx_mtd_rename(efx);
2490 efx_set_channel_names(efx);
2491}
2492
2493static int efx_netdev_event(struct notifier_block *this,
2494 unsigned long event, void *ptr)
2495{
2496 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2497
2498 if ((net_dev->netdev_ops == &efx_netdev_ops) &&
2499 event == NETDEV_CHANGENAME)
2500 efx_update_name(netdev_priv(net_dev));
2501
2502 return NOTIFY_DONE;
2503}
2504
2505static struct notifier_block efx_netdev_notifier = {
2506 .notifier_call = efx_netdev_event,
2507};
2508
2509static ssize_t
2510show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2511{
2512 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2513 return sprintf(buf, "%d\n", efx->phy_type);
2514}
2515static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2516
2517#ifdef CONFIG_SFC_MCDI_LOGGING
2518static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2519 char *buf)
2520{
2521 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2522 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2523
2524 return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2525}
2526static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2527 const char *buf, size_t count)
2528{
2529 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2530 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2531 bool enable = count > 0 && *buf != '0';
2532
2533 mcdi->logging_enabled = enable;
2534 return count;
2535}
2536static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2537#endif
2538
2539static int efx_register_netdev(struct efx_nic *efx)
2540{
2541 struct net_device *net_dev = efx->net_dev;
2542 struct efx_channel *channel;
2543 int rc;
2544
2545 net_dev->watchdog_timeo = 5 * HZ;
2546 net_dev->irq = efx->pci_dev->irq;
2547 net_dev->netdev_ops = &efx_netdev_ops;
2548 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
2549 net_dev->priv_flags |= IFF_UNICAST_FLT;
2550 net_dev->ethtool_ops = &efx_ethtool_ops;
2551 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2552 net_dev->min_mtu = EFX_MIN_MTU;
2553 net_dev->max_mtu = EFX_MAX_MTU;
2554
2555 rtnl_lock();
2556
2557 /* Enable resets to be scheduled and check whether any were
2558 * already requested. If so, the NIC is probably hosed so we
2559 * abort.
2560 */
2561 efx->state = STATE_READY;
2562 smp_mb(); /* ensure we change state before checking reset_pending */
2563 if (efx->reset_pending) {
2564 netif_err(efx, probe, efx->net_dev,
2565 "aborting probe due to scheduled reset\n");
2566 rc = -EIO;
2567 goto fail_locked;
2568 }
2569
2570 rc = dev_alloc_name(net_dev, net_dev->name);
2571 if (rc < 0)
2572 goto fail_locked;
2573 efx_update_name(efx);
2574
2575 /* Always start with carrier off; PHY events will detect the link */
2576 netif_carrier_off(net_dev);
2577
2578 rc = register_netdevice(net_dev);
2579 if (rc)
2580 goto fail_locked;
2581
2582 efx_for_each_channel(channel, efx) {
2583 struct efx_tx_queue *tx_queue;
2584 efx_for_each_channel_tx_queue(tx_queue, channel)
2585 efx_init_tx_queue_core_txq(tx_queue);
2586 }
2587
2588 efx_associate(efx);
2589
2590 rtnl_unlock();
2591
2592 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2593 if (rc) {
2594 netif_err(efx, drv, efx->net_dev,
2595 "failed to init net dev attributes\n");
2596 goto fail_registered;
2597 }
2598#ifdef CONFIG_SFC_MCDI_LOGGING
2599 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2600 if (rc) {
2601 netif_err(efx, drv, efx->net_dev,
2602 "failed to init net dev attributes\n");
2603 goto fail_attr_mcdi_logging;
2604 }
2605#endif
2606
2607 return 0;
2608
2609#ifdef CONFIG_SFC_MCDI_LOGGING
2610fail_attr_mcdi_logging:
2611 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2612#endif
2613fail_registered:
2614 rtnl_lock();
2615 efx_dissociate(efx);
2616 unregister_netdevice(net_dev);
2617fail_locked:
2618 efx->state = STATE_UNINIT;
2619 rtnl_unlock();
2620 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2621 return rc;
2622}
2623
2624static void efx_unregister_netdev(struct efx_nic *efx)
2625{
2626 if (!efx->net_dev)
2627 return;
2628
2629 BUG_ON(netdev_priv(efx->net_dev) != efx);
2630
2631 if (efx_dev_registered(efx)) {
2632 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2633#ifdef CONFIG_SFC_MCDI_LOGGING
2634 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2635#endif
2636 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2637 unregister_netdev(efx->net_dev);
2638 }
2639}
2640
2641/**************************************************************************
2642 *
2643 * Device reset and suspend
2644 *
2645 **************************************************************************/
2646
2647/* Tears down the entire software state and most of the hardware state
2648 * before reset. */
2649void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2650{
2651 EFX_ASSERT_RESET_SERIALISED(efx);
2652
2653 if (method == RESET_TYPE_MCDI_TIMEOUT)
2654 efx->type->prepare_flr(efx);
2655
2656 efx_stop_all(efx);
2657 efx_disable_interrupts(efx);
2658
2659 mutex_lock(&efx->mac_lock);
2660 mutex_lock(&efx->rss_lock);
2661 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2662 method != RESET_TYPE_DATAPATH)
2663 efx->phy_op->fini(efx);
2664 efx->type->fini(efx);
2665}
2666
2667/* This function will always ensure that the locks acquired in
2668 * efx_reset_down() are released. A failure return code indicates
2669 * that we were unable to reinitialise the hardware, and the
2670 * driver should be disabled. If ok is false, then the rx and tx
2671 * engines are not restarted, pending a RESET_DISABLE. */
2672int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2673{
2674 int rc;
2675
2676 EFX_ASSERT_RESET_SERIALISED(efx);
2677
2678 if (method == RESET_TYPE_MCDI_TIMEOUT)
2679 efx->type->finish_flr(efx);
2680
2681 /* Ensure that SRAM is initialised even if we're disabling the device */
2682 rc = efx->type->init(efx);
2683 if (rc) {
2684 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2685 goto fail;
2686 }
2687
2688 if (!ok)
2689 goto fail;
2690
2691 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2692 method != RESET_TYPE_DATAPATH) {
2693 rc = efx->phy_op->init(efx);
2694 if (rc)
2695 goto fail;
2696 rc = efx->phy_op->reconfigure(efx);
2697 if (rc && rc != -EPERM)
2698 netif_err(efx, drv, efx->net_dev,
2699 "could not restore PHY settings\n");
2700 }
2701
2702 rc = efx_enable_interrupts(efx);
2703 if (rc)
2704 goto fail;
2705
2706#ifdef CONFIG_SFC_SRIOV
2707 rc = efx->type->vswitching_restore(efx);
2708 if (rc) /* not fatal; the PF will still work fine */
2709 netif_warn(efx, probe, efx->net_dev,
2710 "failed to restore vswitching rc=%d;"
2711 " VFs may not function\n", rc);
2712#endif
2713
2714 if (efx->type->rx_restore_rss_contexts)
2715 efx->type->rx_restore_rss_contexts(efx);
2716 mutex_unlock(&efx->rss_lock);
2717 down_read(&efx->filter_sem);
2718 efx_restore_filters(efx);
2719 up_read(&efx->filter_sem);
2720 if (efx->type->sriov_reset)
2721 efx->type->sriov_reset(efx);
2722
2723 mutex_unlock(&efx->mac_lock);
2724
2725 efx_start_all(efx);
2726
2727 if (efx->type->udp_tnl_push_ports)
2728 efx->type->udp_tnl_push_ports(efx);
2729
2730 return 0;
2731
2732fail:
2733 efx->port_initialized = false;
2734
2735 mutex_unlock(&efx->rss_lock);
2736 mutex_unlock(&efx->mac_lock);
2737
2738 return rc;
2739}
2740
2741/* Reset the NIC using the specified method. Note that the reset may
2742 * fail, in which case the card will be left in an unusable state.
2743 *
2744 * Caller must hold the rtnl_lock.
2745 */
2746int efx_reset(struct efx_nic *efx, enum reset_type method)
2747{
2748 int rc, rc2;
2749 bool disabled;
2750
2751 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2752 RESET_TYPE(method));
2753
2754 efx_device_detach_sync(efx);
2755 efx_reset_down(efx, method);
2756
2757 rc = efx->type->reset(efx, method);
2758 if (rc) {
2759 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2760 goto out;
2761 }
2762
2763 /* Clear flags for the scopes we covered. We assume the NIC and
2764 * driver are now quiescent so that there is no race here.
2765 */
2766 if (method < RESET_TYPE_MAX_METHOD)
2767 efx->reset_pending &= -(1 << (method + 1));
2768 else /* it doesn't fit into the well-ordered scope hierarchy */
2769 __clear_bit(method, &efx->reset_pending);
2770
2771 /* Reinitialise bus-mastering, which may have been turned off before
2772 * the reset was scheduled. This is still appropriate, even in the
2773 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2774 * can respond to requests. */
2775 pci_set_master(efx->pci_dev);
2776
2777out:
2778 /* Leave device stopped if necessary */
2779 disabled = rc ||
2780 method == RESET_TYPE_DISABLE ||
2781 method == RESET_TYPE_RECOVER_OR_DISABLE;
2782 rc2 = efx_reset_up(efx, method, !disabled);
2783 if (rc2) {
2784 disabled = true;
2785 if (!rc)
2786 rc = rc2;
2787 }
2788
2789 if (disabled) {
2790 dev_close(efx->net_dev);
2791 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2792 efx->state = STATE_DISABLED;
2793 } else {
2794 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2795 efx_device_attach_if_not_resetting(efx);
2796 }
2797 return rc;
2798}
2799
2800/* Try recovery mechanisms.
2801 * For now only EEH is supported.
2802 * Returns 0 if the recovery mechanisms are unsuccessful.
2803 * Returns a non-zero value otherwise.
2804 */
2805int efx_try_recovery(struct efx_nic *efx)
2806{
2807#ifdef CONFIG_EEH
2808 /* A PCI error can occur and not be seen by EEH because nothing
2809 * happens on the PCI bus. In this case the driver may fail and
2810 * schedule a 'recover or reset', leading to this recovery handler.
2811 * Manually call the eeh failure check function.
2812 */
2813 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2814 if (eeh_dev_check_failure(eehdev)) {
2815 /* The EEH mechanisms will handle the error and reset the
2816 * device if necessary.
2817 */
2818 return 1;
2819 }
2820#endif
2821 return 0;
2822}
2823
2824static void efx_wait_for_bist_end(struct efx_nic *efx)
2825{
2826 int i;
2827
2828 for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2829 if (efx_mcdi_poll_reboot(efx))
2830 goto out;
2831 msleep(BIST_WAIT_DELAY_MS);
2832 }
2833
2834 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2835out:
2836 /* Either way unset the BIST flag. If we found no reboot we probably
2837 * won't recover, but we should try.
2838 */
2839 efx->mc_bist_for_other_fn = false;
2840}
2841
2842/* The worker thread exists so that code that cannot sleep can
2843 * schedule a reset for later.
2844 */
2845static void efx_reset_work(struct work_struct *data)
2846{
2847 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2848 unsigned long pending;
2849 enum reset_type method;
2850
2851 pending = READ_ONCE(efx->reset_pending);
2852 method = fls(pending) - 1;
2853
2854 if (method == RESET_TYPE_MC_BIST)
2855 efx_wait_for_bist_end(efx);
2856
2857 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2858 method == RESET_TYPE_RECOVER_OR_ALL) &&
2859 efx_try_recovery(efx))
2860 return;
2861
2862 if (!pending)
2863 return;
2864
2865 rtnl_lock();
2866
2867 /* We checked the state in efx_schedule_reset() but it may
2868 * have changed by now. Now that we have the RTNL lock,
2869 * it cannot change again.
2870 */
2871 if (efx->state == STATE_READY)
2872 (void)efx_reset(efx, method);
2873
2874 rtnl_unlock();
2875}
2876
2877void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2878{
2879 enum reset_type method;
2880
2881 if (efx->state == STATE_RECOVERY) {
2882 netif_dbg(efx, drv, efx->net_dev,
2883 "recovering: skip scheduling %s reset\n",
2884 RESET_TYPE(type));
2885 return;
2886 }
2887
2888 switch (type) {
2889 case RESET_TYPE_INVISIBLE:
2890 case RESET_TYPE_ALL:
2891 case RESET_TYPE_RECOVER_OR_ALL:
2892 case RESET_TYPE_WORLD:
2893 case RESET_TYPE_DISABLE:
2894 case RESET_TYPE_RECOVER_OR_DISABLE:
2895 case RESET_TYPE_DATAPATH:
2896 case RESET_TYPE_MC_BIST:
2897 case RESET_TYPE_MCDI_TIMEOUT:
2898 method = type;
2899 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2900 RESET_TYPE(method));
2901 break;
2902 default:
2903 method = efx->type->map_reset_reason(type);
2904 netif_dbg(efx, drv, efx->net_dev,
2905 "scheduling %s reset for %s\n",
2906 RESET_TYPE(method), RESET_TYPE(type));
2907 break;
2908 }
2909
2910 set_bit(method, &efx->reset_pending);
2911 smp_mb(); /* ensure we change reset_pending before checking state */
2912
2913 /* If we're not READY then just leave the flags set as the cue
2914 * to abort probing or reschedule the reset later.
2915 */
2916 if (READ_ONCE(efx->state) != STATE_READY)
2917 return;
2918
2919 /* efx_process_channel() will no longer read events once a
2920 * reset is scheduled. So switch back to poll'd MCDI completions. */
2921 efx_mcdi_mode_poll(efx);
2922
2923 queue_work(reset_workqueue, &efx->reset_work);
2924}
2925
2926/**************************************************************************
2927 *
2928 * List of NICs we support
2929 *
2930 **************************************************************************/
2931
2932/* PCI device ID table */
2933static const struct pci_device_id efx_pci_table[] = {
2934 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
2935 .driver_data = (unsigned long) &siena_a0_nic_type},
2936 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
2937 .driver_data = (unsigned long) &siena_a0_nic_type},
2938 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
2939 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2940 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
2941 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2942 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
2943 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2944 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */
2945 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2946 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */
2947 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2948 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
2949 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2950 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03), /* SFC9250 PF */
2951 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2952 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03), /* SFC9250 VF */
2953 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2954 {0} /* end of list */
2955};
2956
2957/**************************************************************************
2958 *
2959 * Dummy PHY/MAC operations
2960 *
2961 * Can be used for some unimplemented operations
2962 * Needed so all function pointers are valid and do not have to be tested
2963 * before use
2964 *
2965 **************************************************************************/
2966int efx_port_dummy_op_int(struct efx_nic *efx)
2967{
2968 return 0;
2969}
2970void efx_port_dummy_op_void(struct efx_nic *efx) {}
2971
2972static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2973{
2974 return false;
2975}
2976
2977static const struct efx_phy_operations efx_dummy_phy_operations = {
2978 .init = efx_port_dummy_op_int,
2979 .reconfigure = efx_port_dummy_op_int,
2980 .poll = efx_port_dummy_op_poll,
2981 .fini = efx_port_dummy_op_void,
2982};
2983
2984/**************************************************************************
2985 *
2986 * Data housekeeping
2987 *
2988 **************************************************************************/
2989
2990/* This zeroes out and then fills in the invariants in a struct
2991 * efx_nic (including all sub-structures).
2992 */
2993static int efx_init_struct(struct efx_nic *efx,
2994 struct pci_dev *pci_dev, struct net_device *net_dev)
2995{
2996 int rc = -ENOMEM, i;
2997
2998 /* Initialise common structures */
2999 INIT_LIST_HEAD(&efx->node);
3000 INIT_LIST_HEAD(&efx->secondary_list);
3001 spin_lock_init(&efx->biu_lock);
3002#ifdef CONFIG_SFC_MTD
3003 INIT_LIST_HEAD(&efx->mtd_list);
3004#endif
3005 INIT_WORK(&efx->reset_work, efx_reset_work);
3006 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
3007 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
3008 efx->pci_dev = pci_dev;
3009 efx->msg_enable = debug;
3010 efx->state = STATE_UNINIT;
3011 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
3012
3013 efx->net_dev = net_dev;
3014 efx->rx_prefix_size = efx->type->rx_prefix_size;
3015 efx->rx_ip_align =
3016 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
3017 efx->rx_packet_hash_offset =
3018 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
3019 efx->rx_packet_ts_offset =
3020 efx->type->rx_ts_offset - efx->type->rx_prefix_size;
3021 INIT_LIST_HEAD(&efx->rss_context.list);
3022 mutex_init(&efx->rss_lock);
3023 spin_lock_init(&efx->stats_lock);
3024 efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
3025 efx->num_mac_stats = MC_CMD_MAC_NSTATS;
3026 BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
3027 mutex_init(&efx->mac_lock);
3028#ifdef CONFIG_RFS_ACCEL
3029 mutex_init(&efx->rps_mutex);
3030 spin_lock_init(&efx->rps_hash_lock);
3031 /* Failure to allocate is not fatal, but may degrade ARFS performance */
3032 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3033 sizeof(*efx->rps_hash_table), GFP_KERNEL);
3034#endif
3035 efx->phy_op = &efx_dummy_phy_operations;
3036 efx->mdio.dev = net_dev;
3037 INIT_WORK(&efx->mac_work, efx_mac_work);
3038 init_waitqueue_head(&efx->flush_wq);
3039
3040 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
3041 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
3042 if (!efx->channel[i])
3043 goto fail;
3044 efx->msi_context[i].efx = efx;
3045 efx->msi_context[i].index = i;
3046 }
3047
3048 /* Higher numbered interrupt modes are less capable! */
3049 if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
3050 efx->type->min_interrupt_mode)) {
3051 rc = -EIO;
3052 goto fail;
3053 }
3054 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
3055 interrupt_mode);
3056 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
3057 interrupt_mode);
3058
3059 /* Would be good to use the net_dev name, but we're too early */
3060 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
3061 pci_name(pci_dev));
3062 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
3063 if (!efx->workqueue)
3064 goto fail;
3065
3066 return 0;
3067
3068fail:
3069 efx_fini_struct(efx);
3070 return rc;
3071}
3072
3073static void efx_fini_struct(struct efx_nic *efx)
3074{
3075 int i;
3076
3077#ifdef CONFIG_RFS_ACCEL
3078 kfree(efx->rps_hash_table);
3079#endif
3080
3081 for (i = 0; i < EFX_MAX_CHANNELS; i++)
3082 kfree(efx->channel[i]);
3083
3084 kfree(efx->vpd_sn);
3085
3086 if (efx->workqueue) {
3087 destroy_workqueue(efx->workqueue);
3088 efx->workqueue = NULL;
3089 }
3090}
3091
3092void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3093{
3094 u64 n_rx_nodesc_trunc = 0;
3095 struct efx_channel *channel;
3096
3097 efx_for_each_channel(channel, efx)
3098 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
3099 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
3100 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3101}
3102
3103bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3104 const struct efx_filter_spec *right)
3105{
3106 if ((left->match_flags ^ right->match_flags) |
3107 ((left->flags ^ right->flags) &
3108 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3109 return false;
3110
3111 return memcmp(&left->outer_vid, &right->outer_vid,
3112 sizeof(struct efx_filter_spec) -
3113 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3114}
3115
3116u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3117{
3118 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3119 return jhash2((const u32 *)&spec->outer_vid,
3120 (sizeof(struct efx_filter_spec) -
3121 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3122 0);
3123}
3124
3125#ifdef CONFIG_RFS_ACCEL
3126bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3127 bool *force)
3128{
3129 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3130 /* ARFS is currently updating this entry, leave it */
3131 return false;
3132 }
3133 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3134 /* ARFS tried and failed to update this, so it's probably out
3135 * of date. Remove the filter and the ARFS rule entry.
3136 */
3137 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3138 *force = true;
3139 return true;
3140 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
3141 /* ARFS has moved on, so old filter is not needed. Since we did
3142 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
3143 * not be removed by efx_rps_hash_del() subsequently.
3144 */
3145 *force = true;
3146 return true;
3147 }
3148 /* Remove it iff ARFS wants to. */
3149 return true;
3150}
3151
3152struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3153 const struct efx_filter_spec *spec)
3154{
3155 u32 hash = efx_filter_spec_hash(spec);
3156
3157 WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
3158 if (!efx->rps_hash_table)
3159 return NULL;
3160 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3161}
3162
3163struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3164 const struct efx_filter_spec *spec)
3165{
3166 struct efx_arfs_rule *rule;
3167 struct hlist_head *head;
3168 struct hlist_node *node;
3169
3170 head = efx_rps_hash_bucket(efx, spec);
3171 if (!head)
3172 return NULL;
3173 hlist_for_each(node, head) {
3174 rule = container_of(node, struct efx_arfs_rule, node);
3175 if (efx_filter_spec_equal(spec, &rule->spec))
3176 return rule;
3177 }
3178 return NULL;
3179}
3180
3181struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3182 const struct efx_filter_spec *spec,
3183 bool *new)
3184{
3185 struct efx_arfs_rule *rule;
3186 struct hlist_head *head;
3187 struct hlist_node *node;
3188
3189 head = efx_rps_hash_bucket(efx, spec);
3190 if (!head)
3191 return NULL;
3192 hlist_for_each(node, head) {
3193 rule = container_of(node, struct efx_arfs_rule, node);
3194 if (efx_filter_spec_equal(spec, &rule->spec)) {
3195 *new = false;
3196 return rule;
3197 }
3198 }
3199 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3200 *new = true;
3201 if (rule) {
3202 memcpy(&rule->spec, spec, sizeof(rule->spec));
3203 hlist_add_head(&rule->node, head);
3204 }
3205 return rule;
3206}
3207
3208void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3209{
3210 struct efx_arfs_rule *rule;
3211 struct hlist_head *head;
3212 struct hlist_node *node;
3213
3214 head = efx_rps_hash_bucket(efx, spec);
3215 if (WARN_ON(!head))
3216 return;
3217 hlist_for_each(node, head) {
3218 rule = container_of(node, struct efx_arfs_rule, node);
3219 if (efx_filter_spec_equal(spec, &rule->spec)) {
3220 /* Someone already reused the entry. We know that if
3221 * this check doesn't fire (i.e. filter_id == REMOVING)
3222 * then the REMOVING mark was put there by our caller,
3223 * because caller is holding a lock on filter table and
3224 * only holders of that lock set REMOVING.
3225 */
3226 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3227 return;
3228 hlist_del(node);
3229 kfree(rule);
3230 return;
3231 }
3232 }
3233 /* We didn't find it. */
3234 WARN_ON(1);
3235}
3236#endif
3237
3238/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
3239 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
3240 */
3241struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
3242{
3243 struct list_head *head = &efx->rss_context.list;
3244 struct efx_rss_context *ctx, *new;
3245 u32 id = 1; /* Don't use zero, that refers to the master RSS context */
3246
3247 WARN_ON(!mutex_is_locked(&efx->rss_lock));
3248
3249 /* Search for first gap in the numbering */
3250 list_for_each_entry(ctx, head, list) {
3251 if (ctx->user_id != id)
3252 break;
3253 id++;
3254 /* Check for wrap. If this happens, we have nearly 2^32
3255 * allocated RSS contexts, which seems unlikely.
3256 */
3257 if (WARN_ON_ONCE(!id))
3258 return NULL;
3259 }
3260
3261 /* Create the new entry */
3262 new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
3263 if (!new)
3264 return NULL;
3265 new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3266 new->rx_hash_udp_4tuple = false;
3267
3268 /* Insert the new entry into the gap */
3269 new->user_id = id;
3270 list_add_tail(&new->list, &ctx->list);
3271 return new;
3272}
3273
3274struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
3275{
3276 struct list_head *head = &efx->rss_context.list;
3277 struct efx_rss_context *ctx;
3278
3279 WARN_ON(!mutex_is_locked(&efx->rss_lock));
3280
3281 list_for_each_entry(ctx, head, list)
3282 if (ctx->user_id == id)
3283 return ctx;
3284 return NULL;
3285}
3286
3287void efx_free_rss_context_entry(struct efx_rss_context *ctx)
3288{
3289 list_del(&ctx->list);
3290 kfree(ctx);
3291}
3292
3293/**************************************************************************
3294 *
3295 * PCI interface
3296 *
3297 **************************************************************************/
3298
3299/* Main body of final NIC shutdown code
3300 * This is called only at module unload (or hotplug removal).
3301 */
3302static void efx_pci_remove_main(struct efx_nic *efx)
3303{
3304 /* Flush reset_work. It can no longer be scheduled since we
3305 * are not READY.
3306 */
3307 BUG_ON(efx->state == STATE_READY);
3308 cancel_work_sync(&efx->reset_work);
3309
3310 efx_disable_interrupts(efx);
3311 efx_nic_fini_interrupt(efx);
3312 efx_fini_port(efx);
3313 efx->type->fini(efx);
3314 efx_fini_napi(efx);
3315 efx_remove_all(efx);
3316}
3317
3318/* Final NIC shutdown
3319 * This is called only at module unload (or hotplug removal). A PF can call
3320 * this on its VFs to ensure they are unbound first.
3321 */
3322static void efx_pci_remove(struct pci_dev *pci_dev)
3323{
3324 struct efx_nic *efx;
3325
3326 efx = pci_get_drvdata(pci_dev);
3327 if (!efx)
3328 return;
3329
3330 /* Mark the NIC as fini, then stop the interface */
3331 rtnl_lock();
3332 efx_dissociate(efx);
3333 dev_close(efx->net_dev);
3334 efx_disable_interrupts(efx);
3335 efx->state = STATE_UNINIT;
3336 rtnl_unlock();
3337
3338 if (efx->type->sriov_fini)
3339 efx->type->sriov_fini(efx);
3340
3341 efx_unregister_netdev(efx);
3342
3343 efx_mtd_remove(efx);
3344
3345 efx_pci_remove_main(efx);
3346
3347 efx_fini_io(efx);
3348 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
3349
3350 efx_fini_struct(efx);
3351 free_netdev(efx->net_dev);
3352
3353 pci_disable_pcie_error_reporting(pci_dev);
3354};
3355
3356/* NIC VPD information
3357 * Called during probe to display the part number of the
3358 * installed NIC. VPD is potentially very large but this should
3359 * always appear within the first 512 bytes.
3360 */
3361#define SFC_VPD_LEN 512
3362static void efx_probe_vpd_strings(struct efx_nic *efx)
3363{
3364 struct pci_dev *dev = efx->pci_dev;
3365 char vpd_data[SFC_VPD_LEN];
3366 ssize_t vpd_size;
3367 int ro_start, ro_size, i, j;
3368
3369 /* Get the vpd data from the device */
3370 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
3371 if (vpd_size <= 0) {
3372 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
3373 return;
3374 }
3375
3376 /* Get the Read only section */
3377 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
3378 if (ro_start < 0) {
3379 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
3380 return;
3381 }
3382
3383 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
3384 j = ro_size;
3385 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3386 if (i + j > vpd_size)
3387 j = vpd_size - i;
3388
3389 /* Get the Part number */
3390 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
3391 if (i < 0) {
3392 netif_err(efx, drv, efx->net_dev, "Part number not found\n");
3393 return;
3394 }
3395
3396 j = pci_vpd_info_field_size(&vpd_data[i]);
3397 i += PCI_VPD_INFO_FLD_HDR_SIZE;
3398 if (i + j > vpd_size) {
3399 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
3400 return;
3401 }
3402
3403 netif_info(efx, drv, efx->net_dev,
3404 "Part Number : %.*s\n", j, &vpd_data[i]);
3405
3406 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3407 j = ro_size;
3408 i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
3409 if (i < 0) {
3410 netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
3411 return;
3412 }
3413
3414 j = pci_vpd_info_field_size(&vpd_data[i]);
3415 i += PCI_VPD_INFO_FLD_HDR_SIZE;
3416 if (i + j > vpd_size) {
3417 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
3418 return;
3419 }
3420
3421 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
3422 if (!efx->vpd_sn)
3423 return;
3424
3425 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
3426}
3427
3428
3429/* Main body of NIC initialisation
3430 * This is called at module load (or hotplug insertion, theoretically).
3431 */
3432static int efx_pci_probe_main(struct efx_nic *efx)
3433{
3434 int rc;
3435
3436 /* Do start-of-day initialisation */
3437 rc = efx_probe_all(efx);
3438 if (rc)
3439 goto fail1;
3440
3441 efx_init_napi(efx);
3442
3443 rc = efx->type->init(efx);
3444 if (rc) {
3445 netif_err(efx, probe, efx->net_dev,
3446 "failed to initialise NIC\n");
3447 goto fail3;
3448 }
3449
3450 rc = efx_init_port(efx);
3451 if (rc) {
3452 netif_err(efx, probe, efx->net_dev,
3453 "failed to initialise port\n");
3454 goto fail4;
3455 }
3456
3457 rc = efx_nic_init_interrupt(efx);
3458 if (rc)
3459 goto fail5;
3460 rc = efx_enable_interrupts(efx);
3461 if (rc)
3462 goto fail6;
3463
3464 return 0;
3465
3466 fail6:
3467 efx_nic_fini_interrupt(efx);
3468 fail5:
3469 efx_fini_port(efx);
3470 fail4:
3471 efx->type->fini(efx);
3472 fail3:
3473 efx_fini_napi(efx);
3474 efx_remove_all(efx);
3475 fail1:
3476 return rc;
3477}
3478
3479static int efx_pci_probe_post_io(struct efx_nic *efx)
3480{
3481 struct net_device *net_dev = efx->net_dev;
3482 int rc = efx_pci_probe_main(efx);
3483
3484 if (rc)
3485 return rc;
3486
3487 if (efx->type->sriov_init) {
3488 rc = efx->type->sriov_init(efx);
3489 if (rc)
3490 netif_err(efx, probe, efx->net_dev,
3491 "SR-IOV can't be enabled rc %d\n", rc);
3492 }
3493
3494 /* Determine netdevice features */
3495 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3496 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
3497 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3498 net_dev->features |= NETIF_F_TSO6;
3499 /* Check whether device supports TSO */
3500 if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
3501 net_dev->features &= ~NETIF_F_ALL_TSO;
3502 /* Mask for features that also apply to VLAN devices */
3503 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
3504 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3505 NETIF_F_RXCSUM);
3506
3507 net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
3508
3509 /* Disable receiving frames with bad FCS, by default. */
3510 net_dev->features &= ~NETIF_F_RXALL;
3511
3512 /* Disable VLAN filtering by default. It may be enforced if
3513 * the feature is fixed (i.e. VLAN filters are required to
3514 * receive VLAN tagged packets due to vPort restrictions).
3515 */
3516 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3517 net_dev->features |= efx->fixed_features;
3518
3519 rc = efx_register_netdev(efx);
3520 if (!rc)
3521 return 0;
3522
3523 efx_pci_remove_main(efx);
3524 return rc;
3525}
3526
3527/* NIC initialisation
3528 *
3529 * This is called at module load (or hotplug insertion,
3530 * theoretically). It sets up PCI mappings, resets the NIC,
3531 * sets up and registers the network devices with the kernel and hooks
3532 * the interrupt service routine. It does not prepare the device for
3533 * transmission; this is left to the first time one of the network
3534 * interfaces is brought up (i.e. efx_net_open).
3535 */
3536static int efx_pci_probe(struct pci_dev *pci_dev,
3537 const struct pci_device_id *entry)
3538{
3539 struct net_device *net_dev;
3540 struct efx_nic *efx;
3541 int rc;
3542
3543 /* Allocate and initialise a struct net_device and struct efx_nic */
3544 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
3545 EFX_MAX_RX_QUEUES);
3546 if (!net_dev)
3547 return -ENOMEM;
3548 efx = netdev_priv(net_dev);
3549 efx->type = (const struct efx_nic_type *) entry->driver_data;
3550 efx->fixed_features |= NETIF_F_HIGHDMA;
3551
3552 pci_set_drvdata(pci_dev, efx);
3553 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3554 rc = efx_init_struct(efx, pci_dev, net_dev);
3555 if (rc)
3556 goto fail1;
3557
3558 netif_info(efx, probe, efx->net_dev,
3559 "Solarflare NIC detected\n");
3560
3561 if (!efx->type->is_vf)
3562 efx_probe_vpd_strings(efx);
3563
3564 /* Set up basic I/O (BAR mappings etc) */
3565 rc = efx_init_io(efx);
3566 if (rc)
3567 goto fail2;
3568
3569 rc = efx_pci_probe_post_io(efx);
3570 if (rc) {
3571 /* On failure, retry once immediately.
3572 * If we aborted probe due to a scheduled reset, dismiss it.
3573 */
3574 efx->reset_pending = 0;
3575 rc = efx_pci_probe_post_io(efx);
3576 if (rc) {
3577 /* On another failure, retry once more
3578 * after a 50-305ms delay.
3579 */
3580 unsigned char r;
3581
3582 get_random_bytes(&r, 1);
3583 msleep((unsigned int)r + 50);
3584 efx->reset_pending = 0;
3585 rc = efx_pci_probe_post_io(efx);
3586 }
3587 }
3588 if (rc)
3589 goto fail3;
3590
3591 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3592
3593 /* Try to create MTDs, but allow this to fail */
3594 rtnl_lock();
3595 rc = efx_mtd_probe(efx);
3596 rtnl_unlock();
3597 if (rc && rc != -EPERM)
3598 netif_warn(efx, probe, efx->net_dev,
3599 "failed to create MTDs (%d)\n", rc);
3600
3601 rc = pci_enable_pcie_error_reporting(pci_dev);
3602 if (rc && rc != -EINVAL)
3603 netif_notice(efx, probe, efx->net_dev,
3604 "PCIE error reporting unavailable (%d).\n",
3605 rc);
3606
3607 if (efx->type->udp_tnl_push_ports)
3608 efx->type->udp_tnl_push_ports(efx);
3609
3610 return 0;
3611
3612 fail3:
3613 efx_fini_io(efx);
3614 fail2:
3615 efx_fini_struct(efx);
3616 fail1:
3617 WARN_ON(rc > 0);
3618 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3619 free_netdev(net_dev);
3620 return rc;
3621}
3622
3623/* efx_pci_sriov_configure returns the actual number of Virtual Functions
3624 * enabled on success
3625 */
3626#ifdef CONFIG_SFC_SRIOV
3627static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
3628{
3629 int rc;
3630 struct efx_nic *efx = pci_get_drvdata(dev);
3631
3632 if (efx->type->sriov_configure) {
3633 rc = efx->type->sriov_configure(efx, num_vfs);
3634 if (rc)
3635 return rc;
3636 else
3637 return num_vfs;
3638 } else
3639 return -EOPNOTSUPP;
3640}
3641#endif
3642
3643static int efx_pm_freeze(struct device *dev)
3644{
3645 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3646
3647 rtnl_lock();
3648
3649 if (efx->state != STATE_DISABLED) {
3650 efx->state = STATE_UNINIT;
3651
3652 efx_device_detach_sync(efx);
3653
3654 efx_stop_all(efx);
3655 efx_disable_interrupts(efx);
3656 }
3657
3658 rtnl_unlock();
3659
3660 return 0;
3661}
3662
3663static int efx_pm_thaw(struct device *dev)
3664{
3665 int rc;
3666 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3667
3668 rtnl_lock();
3669
3670 if (efx->state != STATE_DISABLED) {
3671 rc = efx_enable_interrupts(efx);
3672 if (rc)
3673 goto fail;
3674
3675 mutex_lock(&efx->mac_lock);
3676 efx->phy_op->reconfigure(efx);
3677 mutex_unlock(&efx->mac_lock);
3678
3679 efx_start_all(efx);
3680
3681 efx_device_attach_if_not_resetting(efx);
3682
3683 efx->state = STATE_READY;
3684
3685 efx->type->resume_wol(efx);
3686 }
3687
3688 rtnl_unlock();
3689
3690 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3691 queue_work(reset_workqueue, &efx->reset_work);
3692
3693 return 0;
3694
3695fail:
3696 rtnl_unlock();
3697
3698 return rc;
3699}
3700
3701static int efx_pm_poweroff(struct device *dev)
3702{
3703 struct pci_dev *pci_dev = to_pci_dev(dev);
3704 struct efx_nic *efx = pci_get_drvdata(pci_dev);
3705
3706 efx->type->fini(efx);
3707
3708 efx->reset_pending = 0;
3709
3710 pci_save_state(pci_dev);
3711 return pci_set_power_state(pci_dev, PCI_D3hot);
3712}
3713
3714/* Used for both resume and restore */
3715static int efx_pm_resume(struct device *dev)
3716{
3717 struct pci_dev *pci_dev = to_pci_dev(dev);
3718 struct efx_nic *efx = pci_get_drvdata(pci_dev);
3719 int rc;
3720
3721 rc = pci_set_power_state(pci_dev, PCI_D0);
3722 if (rc)
3723 return rc;
3724 pci_restore_state(pci_dev);
3725 rc = pci_enable_device(pci_dev);
3726 if (rc)
3727 return rc;
3728 pci_set_master(efx->pci_dev);
3729 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3730 if (rc)
3731 return rc;
3732 rc = efx->type->init(efx);
3733 if (rc)
3734 return rc;
3735 rc = efx_pm_thaw(dev);
3736 return rc;
3737}
3738
3739static int efx_pm_suspend(struct device *dev)
3740{
3741 int rc;
3742
3743 efx_pm_freeze(dev);
3744 rc = efx_pm_poweroff(dev);
3745 if (rc)
3746 efx_pm_resume(dev);
3747 return rc;
3748}
3749
3750static const struct dev_pm_ops efx_pm_ops = {
3751 .suspend = efx_pm_suspend,
3752 .resume = efx_pm_resume,
3753 .freeze = efx_pm_freeze,
3754 .thaw = efx_pm_thaw,
3755 .poweroff = efx_pm_poweroff,
3756 .restore = efx_pm_resume,
3757};
3758
3759/* A PCI error affecting this device was detected.
3760 * At this point MMIO and DMA may be disabled.
3761 * Stop the software path and request a slot reset.
3762 */
3763static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3764 enum pci_channel_state state)
3765{
3766 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3767 struct efx_nic *efx = pci_get_drvdata(pdev);
3768
3769 if (state == pci_channel_io_perm_failure)
3770 return PCI_ERS_RESULT_DISCONNECT;
3771
3772 rtnl_lock();
3773
3774 if (efx->state != STATE_DISABLED) {
3775 efx->state = STATE_RECOVERY;
3776 efx->reset_pending = 0;
3777
3778 efx_device_detach_sync(efx);
3779
3780 efx_stop_all(efx);
3781 efx_disable_interrupts(efx);
3782
3783 status = PCI_ERS_RESULT_NEED_RESET;
3784 } else {
3785 /* If the interface is disabled we don't want to do anything
3786 * with it.
3787 */
3788 status = PCI_ERS_RESULT_RECOVERED;
3789 }
3790
3791 rtnl_unlock();
3792
3793 pci_disable_device(pdev);
3794
3795 return status;
3796}
3797
3798/* Fake a successful reset, which will be performed later in efx_io_resume. */
3799static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3800{
3801 struct efx_nic *efx = pci_get_drvdata(pdev);
3802 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3803 int rc;
3804
3805 if (pci_enable_device(pdev)) {
3806 netif_err(efx, hw, efx->net_dev,
3807 "Cannot re-enable PCI device after reset.\n");
3808 status = PCI_ERS_RESULT_DISCONNECT;
3809 }
3810
3811 rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3812 if (rc) {
3813 netif_err(efx, hw, efx->net_dev,
3814 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3815 /* Non-fatal error. Continue. */
3816 }
3817
3818 return status;
3819}
3820
3821/* Perform the actual reset and resume I/O operations. */
3822static void efx_io_resume(struct pci_dev *pdev)
3823{
3824 struct efx_nic *efx = pci_get_drvdata(pdev);
3825 int rc;
3826
3827 rtnl_lock();
3828
3829 if (efx->state == STATE_DISABLED)
3830 goto out;
3831
3832 rc = efx_reset(efx, RESET_TYPE_ALL);
3833 if (rc) {
3834 netif_err(efx, hw, efx->net_dev,
3835 "efx_reset failed after PCI error (%d)\n", rc);
3836 } else {
3837 efx->state = STATE_READY;
3838 netif_dbg(efx, hw, efx->net_dev,
3839 "Done resetting and resuming IO after PCI error.\n");
3840 }
3841
3842out:
3843 rtnl_unlock();
3844}
3845
3846/* For simplicity and reliability, we always require a slot reset and try to
3847 * reset the hardware when a pci error affecting the device is detected.
3848 * We leave both the link_reset and mmio_enabled callback unimplemented:
3849 * with our request for slot reset the mmio_enabled callback will never be
3850 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3851 */
3852static const struct pci_error_handlers efx_err_handlers = {
3853 .error_detected = efx_io_error_detected,
3854 .slot_reset = efx_io_slot_reset,
3855 .resume = efx_io_resume,
3856};
3857
3858static struct pci_driver efx_pci_driver = {
3859 .name = KBUILD_MODNAME,
3860 .id_table = efx_pci_table,
3861 .probe = efx_pci_probe,
3862 .remove = efx_pci_remove,
3863 .driver.pm = &efx_pm_ops,
3864 .err_handler = &efx_err_handlers,
3865#ifdef CONFIG_SFC_SRIOV
3866 .sriov_configure = efx_pci_sriov_configure,
3867#endif
3868};
3869
3870/**************************************************************************
3871 *
3872 * Kernel module interface
3873 *
3874 *************************************************************************/
3875
3876module_param(interrupt_mode, uint, 0444);
3877MODULE_PARM_DESC(interrupt_mode,
3878 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3879
3880static int __init efx_init_module(void)
3881{
3882 int rc;
3883
3884 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3885
3886 rc = register_netdevice_notifier(&efx_netdev_notifier);
3887 if (rc)
3888 goto err_notifier;
3889
3890#ifdef CONFIG_SFC_SRIOV
3891 rc = efx_init_sriov();
3892 if (rc)
3893 goto err_sriov;
3894#endif
3895
3896 reset_workqueue = create_singlethread_workqueue("sfc_reset");
3897 if (!reset_workqueue) {
3898 rc = -ENOMEM;
3899 goto err_reset;
3900 }
3901
3902 rc = pci_register_driver(&efx_pci_driver);
3903 if (rc < 0)
3904 goto err_pci;
3905
3906 return 0;
3907
3908 err_pci:
3909 destroy_workqueue(reset_workqueue);
3910 err_reset:
3911#ifdef CONFIG_SFC_SRIOV
3912 efx_fini_sriov();
3913 err_sriov:
3914#endif
3915 unregister_netdevice_notifier(&efx_netdev_notifier);
3916 err_notifier:
3917 return rc;
3918}
3919
3920static void __exit efx_exit_module(void)
3921{
3922 printk(KERN_INFO "Solarflare NET driver unloading\n");
3923
3924 pci_unregister_driver(&efx_pci_driver);
3925 destroy_workqueue(reset_workqueue);
3926#ifdef CONFIG_SFC_SRIOV
3927 efx_fini_sriov();
3928#endif
3929 unregister_netdevice_notifier(&efx_netdev_notifier);
3930
3931}
3932
3933module_init(efx_init_module);
3934module_exit(efx_exit_module);
3935
3936MODULE_AUTHOR("Solarflare Communications and "
3937 "Michael Brown <mbrown@fensystems.co.uk>");
3938MODULE_DESCRIPTION("Solarflare network driver");
3939MODULE_LICENSE("GPL");
3940MODULE_DEVICE_TABLE(pci, efx_pci_table);
3941MODULE_VERSION(EFX_DRIVER_VERSION);
1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/ethtool.h>
21#include <linux/topology.h>
22#include <linux/gfp.h>
23#include <linux/aer.h>
24#include <linux/interrupt.h>
25#include "net_driver.h"
26#include "efx.h"
27#include "nic.h"
28#include "selftest.h"
29#include "sriov.h"
30
31#include "mcdi.h"
32#include "workarounds.h"
33
34/**************************************************************************
35 *
36 * Type name strings
37 *
38 **************************************************************************
39 */
40
41/* Loopback mode names (see LOOPBACK_MODE()) */
42const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
43const char *const efx_loopback_mode_names[] = {
44 [LOOPBACK_NONE] = "NONE",
45 [LOOPBACK_DATA] = "DATAPATH",
46 [LOOPBACK_GMAC] = "GMAC",
47 [LOOPBACK_XGMII] = "XGMII",
48 [LOOPBACK_XGXS] = "XGXS",
49 [LOOPBACK_XAUI] = "XAUI",
50 [LOOPBACK_GMII] = "GMII",
51 [LOOPBACK_SGMII] = "SGMII",
52 [LOOPBACK_XGBR] = "XGBR",
53 [LOOPBACK_XFI] = "XFI",
54 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
55 [LOOPBACK_GMII_FAR] = "GMII_FAR",
56 [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
57 [LOOPBACK_XFI_FAR] = "XFI_FAR",
58 [LOOPBACK_GPHY] = "GPHY",
59 [LOOPBACK_PHYXS] = "PHYXS",
60 [LOOPBACK_PCS] = "PCS",
61 [LOOPBACK_PMAPMD] = "PMA/PMD",
62 [LOOPBACK_XPORT] = "XPORT",
63 [LOOPBACK_XGMII_WS] = "XGMII_WS",
64 [LOOPBACK_XAUI_WS] = "XAUI_WS",
65 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
66 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
67 [LOOPBACK_GMII_WS] = "GMII_WS",
68 [LOOPBACK_XFI_WS] = "XFI_WS",
69 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
70 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
71};
72
73const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
74const char *const efx_reset_type_names[] = {
75 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
76 [RESET_TYPE_ALL] = "ALL",
77 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
78 [RESET_TYPE_WORLD] = "WORLD",
79 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
80 [RESET_TYPE_DATAPATH] = "DATAPATH",
81 [RESET_TYPE_MC_BIST] = "MC_BIST",
82 [RESET_TYPE_DISABLE] = "DISABLE",
83 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
84 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
85 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
86 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
87 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
88 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
89 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
90};
91
92/* Reset workqueue. If any NIC has a hardware failure then a reset will be
93 * queued onto this work queue. This is not a per-nic work queue, because
94 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
95 */
96static struct workqueue_struct *reset_workqueue;
97
98/* How often and how many times to poll for a reset while waiting for a
99 * BIST that another function started to complete.
100 */
101#define BIST_WAIT_DELAY_MS 100
102#define BIST_WAIT_DELAY_COUNT 100
103
104/**************************************************************************
105 *
106 * Configurable values
107 *
108 *************************************************************************/
109
110/*
111 * Use separate channels for TX and RX events
112 *
113 * Set this to 1 to use separate channels for TX and RX. It allows us
114 * to control interrupt affinity separately for TX and RX.
115 *
116 * This is only used in MSI-X interrupt mode
117 */
118bool efx_separate_tx_channels;
119module_param(efx_separate_tx_channels, bool, 0444);
120MODULE_PARM_DESC(efx_separate_tx_channels,
121 "Use separate channels for TX and RX");
122
123/* This is the weight assigned to each of the (per-channel) virtual
124 * NAPI devices.
125 */
126static int napi_weight = 64;
127
128/* This is the time (in jiffies) between invocations of the hardware
129 * monitor.
130 * On Falcon-based NICs, this will:
131 * - Check the on-board hardware monitor;
132 * - Poll the link state and reconfigure the hardware as necessary.
133 * On Siena-based NICs for power systems with EEH support, this will give EEH a
134 * chance to start.
135 */
136static unsigned int efx_monitor_interval = 1 * HZ;
137
138/* Initial interrupt moderation settings. They can be modified after
139 * module load with ethtool.
140 *
141 * The default for RX should strike a balance between increasing the
142 * round-trip latency and reducing overhead.
143 */
144static unsigned int rx_irq_mod_usec = 60;
145
146/* Initial interrupt moderation settings. They can be modified after
147 * module load with ethtool.
148 *
149 * This default is chosen to ensure that a 10G link does not go idle
150 * while a TX queue is stopped after it has become full. A queue is
151 * restarted when it drops below half full. The time this takes (assuming
152 * worst case 3 descriptors per packet and 1024 descriptors) is
153 * 512 / 3 * 1.2 = 205 usec.
154 */
155static unsigned int tx_irq_mod_usec = 150;
156
157/* This is the first interrupt mode to try out of:
158 * 0 => MSI-X
159 * 1 => MSI
160 * 2 => legacy
161 */
162static unsigned int interrupt_mode;
163
164/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
165 * i.e. the number of CPUs among which we may distribute simultaneous
166 * interrupt handling.
167 *
168 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
169 * The default (0) means to assign an interrupt to each core.
170 */
171static unsigned int rss_cpus;
172module_param(rss_cpus, uint, 0444);
173MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
174
175static bool phy_flash_cfg;
176module_param(phy_flash_cfg, bool, 0644);
177MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
178
179static unsigned irq_adapt_low_thresh = 8000;
180module_param(irq_adapt_low_thresh, uint, 0644);
181MODULE_PARM_DESC(irq_adapt_low_thresh,
182 "Threshold score for reducing IRQ moderation");
183
184static unsigned irq_adapt_high_thresh = 16000;
185module_param(irq_adapt_high_thresh, uint, 0644);
186MODULE_PARM_DESC(irq_adapt_high_thresh,
187 "Threshold score for increasing IRQ moderation");
188
189static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
190 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
191 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
192 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
193module_param(debug, uint, 0);
194MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
195
196/**************************************************************************
197 *
198 * Utility functions and prototypes
199 *
200 *************************************************************************/
201
202static int efx_soft_enable_interrupts(struct efx_nic *efx);
203static void efx_soft_disable_interrupts(struct efx_nic *efx);
204static void efx_remove_channel(struct efx_channel *channel);
205static void efx_remove_channels(struct efx_nic *efx);
206static const struct efx_channel_type efx_default_channel_type;
207static void efx_remove_port(struct efx_nic *efx);
208static void efx_init_napi_channel(struct efx_channel *channel);
209static void efx_fini_napi(struct efx_nic *efx);
210static void efx_fini_napi_channel(struct efx_channel *channel);
211static void efx_fini_struct(struct efx_nic *efx);
212static void efx_start_all(struct efx_nic *efx);
213static void efx_stop_all(struct efx_nic *efx);
214
215#define EFX_ASSERT_RESET_SERIALISED(efx) \
216 do { \
217 if ((efx->state == STATE_READY) || \
218 (efx->state == STATE_RECOVERY) || \
219 (efx->state == STATE_DISABLED)) \
220 ASSERT_RTNL(); \
221 } while (0)
222
223static int efx_check_disabled(struct efx_nic *efx)
224{
225 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
226 netif_err(efx, drv, efx->net_dev,
227 "device is disabled due to earlier errors\n");
228 return -EIO;
229 }
230 return 0;
231}
232
233/**************************************************************************
234 *
235 * Event queue processing
236 *
237 *************************************************************************/
238
239/* Process channel's event queue
240 *
241 * This function is responsible for processing the event queue of a
242 * single channel. The caller must guarantee that this function will
243 * never be concurrently called more than once on the same channel,
244 * though different channels may be being processed concurrently.
245 */
246static int efx_process_channel(struct efx_channel *channel, int budget)
247{
248 struct efx_tx_queue *tx_queue;
249 int spent;
250
251 if (unlikely(!channel->enabled))
252 return 0;
253
254 efx_for_each_channel_tx_queue(tx_queue, channel) {
255 tx_queue->pkts_compl = 0;
256 tx_queue->bytes_compl = 0;
257 }
258
259 spent = efx_nic_process_eventq(channel, budget);
260 if (spent && efx_channel_has_rx_queue(channel)) {
261 struct efx_rx_queue *rx_queue =
262 efx_channel_get_rx_queue(channel);
263
264 efx_rx_flush_packet(channel);
265 efx_fast_push_rx_descriptors(rx_queue, true);
266 }
267
268 /* Update BQL */
269 efx_for_each_channel_tx_queue(tx_queue, channel) {
270 if (tx_queue->bytes_compl) {
271 netdev_tx_completed_queue(tx_queue->core_txq,
272 tx_queue->pkts_compl, tx_queue->bytes_compl);
273 }
274 }
275
276 return spent;
277}
278
279/* NAPI poll handler
280 *
281 * NAPI guarantees serialisation of polls of the same device, which
282 * provides the guarantee required by efx_process_channel().
283 */
284static int efx_poll(struct napi_struct *napi, int budget)
285{
286 struct efx_channel *channel =
287 container_of(napi, struct efx_channel, napi_str);
288 struct efx_nic *efx = channel->efx;
289 int spent;
290
291 if (!efx_channel_lock_napi(channel))
292 return budget;
293
294 netif_vdbg(efx, intr, efx->net_dev,
295 "channel %d NAPI poll executing on CPU %d\n",
296 channel->channel, raw_smp_processor_id());
297
298 spent = efx_process_channel(channel, budget);
299
300 if (spent < budget) {
301 if (efx_channel_has_rx_queue(channel) &&
302 efx->irq_rx_adaptive &&
303 unlikely(++channel->irq_count == 1000)) {
304 if (unlikely(channel->irq_mod_score <
305 irq_adapt_low_thresh)) {
306 if (channel->irq_moderation > 1) {
307 channel->irq_moderation -= 1;
308 efx->type->push_irq_moderation(channel);
309 }
310 } else if (unlikely(channel->irq_mod_score >
311 irq_adapt_high_thresh)) {
312 if (channel->irq_moderation <
313 efx->irq_rx_moderation) {
314 channel->irq_moderation += 1;
315 efx->type->push_irq_moderation(channel);
316 }
317 }
318 channel->irq_count = 0;
319 channel->irq_mod_score = 0;
320 }
321
322 efx_filter_rfs_expire(channel);
323
324 /* There is no race here; although napi_disable() will
325 * only wait for napi_complete(), this isn't a problem
326 * since efx_nic_eventq_read_ack() will have no effect if
327 * interrupts have already been disabled.
328 */
329 napi_complete(napi);
330 efx_nic_eventq_read_ack(channel);
331 }
332
333 efx_channel_unlock_napi(channel);
334 return spent;
335}
336
337/* Create event queue
338 * Event queue memory allocations are done only once. If the channel
339 * is reset, the memory buffer will be reused; this guards against
340 * errors during channel reset and also simplifies interrupt handling.
341 */
342static int efx_probe_eventq(struct efx_channel *channel)
343{
344 struct efx_nic *efx = channel->efx;
345 unsigned long entries;
346
347 netif_dbg(efx, probe, efx->net_dev,
348 "chan %d create event queue\n", channel->channel);
349
350 /* Build an event queue with room for one event per tx and rx buffer,
351 * plus some extra for link state events and MCDI completions. */
352 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
353 EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
354 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
355
356 return efx_nic_probe_eventq(channel);
357}
358
359/* Prepare channel's event queue */
360static int efx_init_eventq(struct efx_channel *channel)
361{
362 struct efx_nic *efx = channel->efx;
363 int rc;
364
365 EFX_WARN_ON_PARANOID(channel->eventq_init);
366
367 netif_dbg(efx, drv, efx->net_dev,
368 "chan %d init event queue\n", channel->channel);
369
370 rc = efx_nic_init_eventq(channel);
371 if (rc == 0) {
372 efx->type->push_irq_moderation(channel);
373 channel->eventq_read_ptr = 0;
374 channel->eventq_init = true;
375 }
376 return rc;
377}
378
379/* Enable event queue processing and NAPI */
380void efx_start_eventq(struct efx_channel *channel)
381{
382 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
383 "chan %d start event queue\n", channel->channel);
384
385 /* Make sure the NAPI handler sees the enabled flag set */
386 channel->enabled = true;
387 smp_wmb();
388
389 efx_channel_enable(channel);
390 napi_enable(&channel->napi_str);
391 efx_nic_eventq_read_ack(channel);
392}
393
394/* Disable event queue processing and NAPI */
395void efx_stop_eventq(struct efx_channel *channel)
396{
397 if (!channel->enabled)
398 return;
399
400 napi_disable(&channel->napi_str);
401 while (!efx_channel_disable(channel))
402 usleep_range(1000, 20000);
403 channel->enabled = false;
404}
405
406static void efx_fini_eventq(struct efx_channel *channel)
407{
408 if (!channel->eventq_init)
409 return;
410
411 netif_dbg(channel->efx, drv, channel->efx->net_dev,
412 "chan %d fini event queue\n", channel->channel);
413
414 efx_nic_fini_eventq(channel);
415 channel->eventq_init = false;
416}
417
418static void efx_remove_eventq(struct efx_channel *channel)
419{
420 netif_dbg(channel->efx, drv, channel->efx->net_dev,
421 "chan %d remove event queue\n", channel->channel);
422
423 efx_nic_remove_eventq(channel);
424}
425
426/**************************************************************************
427 *
428 * Channel handling
429 *
430 *************************************************************************/
431
432/* Allocate and initialise a channel structure. */
433static struct efx_channel *
434efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
435{
436 struct efx_channel *channel;
437 struct efx_rx_queue *rx_queue;
438 struct efx_tx_queue *tx_queue;
439 int j;
440
441 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
442 if (!channel)
443 return NULL;
444
445 channel->efx = efx;
446 channel->channel = i;
447 channel->type = &efx_default_channel_type;
448
449 for (j = 0; j < EFX_TXQ_TYPES; j++) {
450 tx_queue = &channel->tx_queue[j];
451 tx_queue->efx = efx;
452 tx_queue->queue = i * EFX_TXQ_TYPES + j;
453 tx_queue->channel = channel;
454 }
455
456 rx_queue = &channel->rx_queue;
457 rx_queue->efx = efx;
458 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
459 (unsigned long)rx_queue);
460
461 return channel;
462}
463
464/* Allocate and initialise a channel structure, copying parameters
465 * (but not resources) from an old channel structure.
466 */
467static struct efx_channel *
468efx_copy_channel(const struct efx_channel *old_channel)
469{
470 struct efx_channel *channel;
471 struct efx_rx_queue *rx_queue;
472 struct efx_tx_queue *tx_queue;
473 int j;
474
475 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
476 if (!channel)
477 return NULL;
478
479 *channel = *old_channel;
480
481 channel->napi_dev = NULL;
482 memset(&channel->eventq, 0, sizeof(channel->eventq));
483
484 for (j = 0; j < EFX_TXQ_TYPES; j++) {
485 tx_queue = &channel->tx_queue[j];
486 if (tx_queue->channel)
487 tx_queue->channel = channel;
488 tx_queue->buffer = NULL;
489 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
490 }
491
492 rx_queue = &channel->rx_queue;
493 rx_queue->buffer = NULL;
494 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
495 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
496 (unsigned long)rx_queue);
497
498 return channel;
499}
500
501static int efx_probe_channel(struct efx_channel *channel)
502{
503 struct efx_tx_queue *tx_queue;
504 struct efx_rx_queue *rx_queue;
505 int rc;
506
507 netif_dbg(channel->efx, probe, channel->efx->net_dev,
508 "creating channel %d\n", channel->channel);
509
510 rc = channel->type->pre_probe(channel);
511 if (rc)
512 goto fail;
513
514 rc = efx_probe_eventq(channel);
515 if (rc)
516 goto fail;
517
518 efx_for_each_channel_tx_queue(tx_queue, channel) {
519 rc = efx_probe_tx_queue(tx_queue);
520 if (rc)
521 goto fail;
522 }
523
524 efx_for_each_channel_rx_queue(rx_queue, channel) {
525 rc = efx_probe_rx_queue(rx_queue);
526 if (rc)
527 goto fail;
528 }
529
530 return 0;
531
532fail:
533 efx_remove_channel(channel);
534 return rc;
535}
536
537static void
538efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
539{
540 struct efx_nic *efx = channel->efx;
541 const char *type;
542 int number;
543
544 number = channel->channel;
545 if (efx->tx_channel_offset == 0) {
546 type = "";
547 } else if (channel->channel < efx->tx_channel_offset) {
548 type = "-rx";
549 } else {
550 type = "-tx";
551 number -= efx->tx_channel_offset;
552 }
553 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
554}
555
556static void efx_set_channel_names(struct efx_nic *efx)
557{
558 struct efx_channel *channel;
559
560 efx_for_each_channel(channel, efx)
561 channel->type->get_name(channel,
562 efx->msi_context[channel->channel].name,
563 sizeof(efx->msi_context[0].name));
564}
565
566static int efx_probe_channels(struct efx_nic *efx)
567{
568 struct efx_channel *channel;
569 int rc;
570
571 /* Restart special buffer allocation */
572 efx->next_buffer_table = 0;
573
574 /* Probe channels in reverse, so that any 'extra' channels
575 * use the start of the buffer table. This allows the traffic
576 * channels to be resized without moving them or wasting the
577 * entries before them.
578 */
579 efx_for_each_channel_rev(channel, efx) {
580 rc = efx_probe_channel(channel);
581 if (rc) {
582 netif_err(efx, probe, efx->net_dev,
583 "failed to create channel %d\n",
584 channel->channel);
585 goto fail;
586 }
587 }
588 efx_set_channel_names(efx);
589
590 return 0;
591
592fail:
593 efx_remove_channels(efx);
594 return rc;
595}
596
597/* Channels are shutdown and reinitialised whilst the NIC is running
598 * to propagate configuration changes (mtu, checksum offload), or
599 * to clear hardware error conditions
600 */
601static void efx_start_datapath(struct efx_nic *efx)
602{
603 bool old_rx_scatter = efx->rx_scatter;
604 struct efx_tx_queue *tx_queue;
605 struct efx_rx_queue *rx_queue;
606 struct efx_channel *channel;
607 size_t rx_buf_len;
608
609 /* Calculate the rx buffer allocation parameters required to
610 * support the current MTU, including padding for header
611 * alignment and overruns.
612 */
613 efx->rx_dma_len = (efx->rx_prefix_size +
614 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
615 efx->type->rx_buffer_padding);
616 rx_buf_len = (sizeof(struct efx_rx_page_state) +
617 efx->rx_ip_align + efx->rx_dma_len);
618 if (rx_buf_len <= PAGE_SIZE) {
619 efx->rx_scatter = efx->type->always_rx_scatter;
620 efx->rx_buffer_order = 0;
621 } else if (efx->type->can_rx_scatter) {
622 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
623 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
624 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
625 EFX_RX_BUF_ALIGNMENT) >
626 PAGE_SIZE);
627 efx->rx_scatter = true;
628 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
629 efx->rx_buffer_order = 0;
630 } else {
631 efx->rx_scatter = false;
632 efx->rx_buffer_order = get_order(rx_buf_len);
633 }
634
635 efx_rx_config_page_split(efx);
636 if (efx->rx_buffer_order)
637 netif_dbg(efx, drv, efx->net_dev,
638 "RX buf len=%u; page order=%u batch=%u\n",
639 efx->rx_dma_len, efx->rx_buffer_order,
640 efx->rx_pages_per_batch);
641 else
642 netif_dbg(efx, drv, efx->net_dev,
643 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
644 efx->rx_dma_len, efx->rx_page_buf_step,
645 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
646
647 /* RX filters may also have scatter-enabled flags */
648 if (efx->rx_scatter != old_rx_scatter)
649 efx->type->filter_update_rx_scatter(efx);
650
651 /* We must keep at least one descriptor in a TX ring empty.
652 * We could avoid this when the queue size does not exactly
653 * match the hardware ring size, but it's not that important.
654 * Therefore we stop the queue when one more skb might fill
655 * the ring completely. We wake it when half way back to
656 * empty.
657 */
658 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
659 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
660
661 /* Initialise the channels */
662 efx_for_each_channel(channel, efx) {
663 efx_for_each_channel_tx_queue(tx_queue, channel) {
664 efx_init_tx_queue(tx_queue);
665 atomic_inc(&efx->active_queues);
666 }
667
668 efx_for_each_channel_rx_queue(rx_queue, channel) {
669 efx_init_rx_queue(rx_queue);
670 atomic_inc(&efx->active_queues);
671 efx_stop_eventq(channel);
672 efx_fast_push_rx_descriptors(rx_queue, false);
673 efx_start_eventq(channel);
674 }
675
676 WARN_ON(channel->rx_pkt_n_frags);
677 }
678
679 efx_ptp_start_datapath(efx);
680
681 if (netif_device_present(efx->net_dev))
682 netif_tx_wake_all_queues(efx->net_dev);
683}
684
685static void efx_stop_datapath(struct efx_nic *efx)
686{
687 struct efx_channel *channel;
688 struct efx_tx_queue *tx_queue;
689 struct efx_rx_queue *rx_queue;
690 int rc;
691
692 EFX_ASSERT_RESET_SERIALISED(efx);
693 BUG_ON(efx->port_enabled);
694
695 efx_ptp_stop_datapath(efx);
696
697 /* Stop RX refill */
698 efx_for_each_channel(channel, efx) {
699 efx_for_each_channel_rx_queue(rx_queue, channel)
700 rx_queue->refill_enabled = false;
701 }
702
703 efx_for_each_channel(channel, efx) {
704 /* RX packet processing is pipelined, so wait for the
705 * NAPI handler to complete. At least event queue 0
706 * might be kept active by non-data events, so don't
707 * use napi_synchronize() but actually disable NAPI
708 * temporarily.
709 */
710 if (efx_channel_has_rx_queue(channel)) {
711 efx_stop_eventq(channel);
712 efx_start_eventq(channel);
713 }
714 }
715
716 rc = efx->type->fini_dmaq(efx);
717 if (rc && EFX_WORKAROUND_7803(efx)) {
718 /* Schedule a reset to recover from the flush failure. The
719 * descriptor caches reference memory we're about to free,
720 * but falcon_reconfigure_mac_wrapper() won't reconnect
721 * the MACs because of the pending reset.
722 */
723 netif_err(efx, drv, efx->net_dev,
724 "Resetting to recover from flush failure\n");
725 efx_schedule_reset(efx, RESET_TYPE_ALL);
726 } else if (rc) {
727 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
728 } else {
729 netif_dbg(efx, drv, efx->net_dev,
730 "successfully flushed all queues\n");
731 }
732
733 efx_for_each_channel(channel, efx) {
734 efx_for_each_channel_rx_queue(rx_queue, channel)
735 efx_fini_rx_queue(rx_queue);
736 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
737 efx_fini_tx_queue(tx_queue);
738 }
739}
740
741static void efx_remove_channel(struct efx_channel *channel)
742{
743 struct efx_tx_queue *tx_queue;
744 struct efx_rx_queue *rx_queue;
745
746 netif_dbg(channel->efx, drv, channel->efx->net_dev,
747 "destroy chan %d\n", channel->channel);
748
749 efx_for_each_channel_rx_queue(rx_queue, channel)
750 efx_remove_rx_queue(rx_queue);
751 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
752 efx_remove_tx_queue(tx_queue);
753 efx_remove_eventq(channel);
754 channel->type->post_remove(channel);
755}
756
757static void efx_remove_channels(struct efx_nic *efx)
758{
759 struct efx_channel *channel;
760
761 efx_for_each_channel(channel, efx)
762 efx_remove_channel(channel);
763}
764
765int
766efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
767{
768 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
769 u32 old_rxq_entries, old_txq_entries;
770 unsigned i, next_buffer_table = 0;
771 int rc, rc2;
772
773 rc = efx_check_disabled(efx);
774 if (rc)
775 return rc;
776
777 /* Not all channels should be reallocated. We must avoid
778 * reallocating their buffer table entries.
779 */
780 efx_for_each_channel(channel, efx) {
781 struct efx_rx_queue *rx_queue;
782 struct efx_tx_queue *tx_queue;
783
784 if (channel->type->copy)
785 continue;
786 next_buffer_table = max(next_buffer_table,
787 channel->eventq.index +
788 channel->eventq.entries);
789 efx_for_each_channel_rx_queue(rx_queue, channel)
790 next_buffer_table = max(next_buffer_table,
791 rx_queue->rxd.index +
792 rx_queue->rxd.entries);
793 efx_for_each_channel_tx_queue(tx_queue, channel)
794 next_buffer_table = max(next_buffer_table,
795 tx_queue->txd.index +
796 tx_queue->txd.entries);
797 }
798
799 efx_device_detach_sync(efx);
800 efx_stop_all(efx);
801 efx_soft_disable_interrupts(efx);
802
803 /* Clone channels (where possible) */
804 memset(other_channel, 0, sizeof(other_channel));
805 for (i = 0; i < efx->n_channels; i++) {
806 channel = efx->channel[i];
807 if (channel->type->copy)
808 channel = channel->type->copy(channel);
809 if (!channel) {
810 rc = -ENOMEM;
811 goto out;
812 }
813 other_channel[i] = channel;
814 }
815
816 /* Swap entry counts and channel pointers */
817 old_rxq_entries = efx->rxq_entries;
818 old_txq_entries = efx->txq_entries;
819 efx->rxq_entries = rxq_entries;
820 efx->txq_entries = txq_entries;
821 for (i = 0; i < efx->n_channels; i++) {
822 channel = efx->channel[i];
823 efx->channel[i] = other_channel[i];
824 other_channel[i] = channel;
825 }
826
827 /* Restart buffer table allocation */
828 efx->next_buffer_table = next_buffer_table;
829
830 for (i = 0; i < efx->n_channels; i++) {
831 channel = efx->channel[i];
832 if (!channel->type->copy)
833 continue;
834 rc = efx_probe_channel(channel);
835 if (rc)
836 goto rollback;
837 efx_init_napi_channel(efx->channel[i]);
838 }
839
840out:
841 /* Destroy unused channel structures */
842 for (i = 0; i < efx->n_channels; i++) {
843 channel = other_channel[i];
844 if (channel && channel->type->copy) {
845 efx_fini_napi_channel(channel);
846 efx_remove_channel(channel);
847 kfree(channel);
848 }
849 }
850
851 rc2 = efx_soft_enable_interrupts(efx);
852 if (rc2) {
853 rc = rc ? rc : rc2;
854 netif_err(efx, drv, efx->net_dev,
855 "unable to restart interrupts on channel reallocation\n");
856 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
857 } else {
858 efx_start_all(efx);
859 netif_device_attach(efx->net_dev);
860 }
861 return rc;
862
863rollback:
864 /* Swap back */
865 efx->rxq_entries = old_rxq_entries;
866 efx->txq_entries = old_txq_entries;
867 for (i = 0; i < efx->n_channels; i++) {
868 channel = efx->channel[i];
869 efx->channel[i] = other_channel[i];
870 other_channel[i] = channel;
871 }
872 goto out;
873}
874
875void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
876{
877 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
878}
879
880static const struct efx_channel_type efx_default_channel_type = {
881 .pre_probe = efx_channel_dummy_op_int,
882 .post_remove = efx_channel_dummy_op_void,
883 .get_name = efx_get_channel_name,
884 .copy = efx_copy_channel,
885 .keep_eventq = false,
886};
887
888int efx_channel_dummy_op_int(struct efx_channel *channel)
889{
890 return 0;
891}
892
893void efx_channel_dummy_op_void(struct efx_channel *channel)
894{
895}
896
897/**************************************************************************
898 *
899 * Port handling
900 *
901 **************************************************************************/
902
903/* This ensures that the kernel is kept informed (via
904 * netif_carrier_on/off) of the link status, and also maintains the
905 * link status's stop on the port's TX queue.
906 */
907void efx_link_status_changed(struct efx_nic *efx)
908{
909 struct efx_link_state *link_state = &efx->link_state;
910
911 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
912 * that no events are triggered between unregister_netdev() and the
913 * driver unloading. A more general condition is that NETDEV_CHANGE
914 * can only be generated between NETDEV_UP and NETDEV_DOWN */
915 if (!netif_running(efx->net_dev))
916 return;
917
918 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
919 efx->n_link_state_changes++;
920
921 if (link_state->up)
922 netif_carrier_on(efx->net_dev);
923 else
924 netif_carrier_off(efx->net_dev);
925 }
926
927 /* Status message for kernel log */
928 if (link_state->up)
929 netif_info(efx, link, efx->net_dev,
930 "link up at %uMbps %s-duplex (MTU %d)\n",
931 link_state->speed, link_state->fd ? "full" : "half",
932 efx->net_dev->mtu);
933 else
934 netif_info(efx, link, efx->net_dev, "link down\n");
935}
936
937void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
938{
939 efx->link_advertising = advertising;
940 if (advertising) {
941 if (advertising & ADVERTISED_Pause)
942 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
943 else
944 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
945 if (advertising & ADVERTISED_Asym_Pause)
946 efx->wanted_fc ^= EFX_FC_TX;
947 }
948}
949
950void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
951{
952 efx->wanted_fc = wanted_fc;
953 if (efx->link_advertising) {
954 if (wanted_fc & EFX_FC_RX)
955 efx->link_advertising |= (ADVERTISED_Pause |
956 ADVERTISED_Asym_Pause);
957 else
958 efx->link_advertising &= ~(ADVERTISED_Pause |
959 ADVERTISED_Asym_Pause);
960 if (wanted_fc & EFX_FC_TX)
961 efx->link_advertising ^= ADVERTISED_Asym_Pause;
962 }
963}
964
965static void efx_fini_port(struct efx_nic *efx);
966
967/* We assume that efx->type->reconfigure_mac will always try to sync RX
968 * filters and therefore needs to read-lock the filter table against freeing
969 */
970void efx_mac_reconfigure(struct efx_nic *efx)
971{
972 down_read(&efx->filter_sem);
973 efx->type->reconfigure_mac(efx);
974 up_read(&efx->filter_sem);
975}
976
977/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
978 * the MAC appropriately. All other PHY configuration changes are pushed
979 * through phy_op->set_settings(), and pushed asynchronously to the MAC
980 * through efx_monitor().
981 *
982 * Callers must hold the mac_lock
983 */
984int __efx_reconfigure_port(struct efx_nic *efx)
985{
986 enum efx_phy_mode phy_mode;
987 int rc;
988
989 WARN_ON(!mutex_is_locked(&efx->mac_lock));
990
991 /* Disable PHY transmit in mac level loopbacks */
992 phy_mode = efx->phy_mode;
993 if (LOOPBACK_INTERNAL(efx))
994 efx->phy_mode |= PHY_MODE_TX_DISABLED;
995 else
996 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
997
998 rc = efx->type->reconfigure_port(efx);
999
1000 if (rc)
1001 efx->phy_mode = phy_mode;
1002
1003 return rc;
1004}
1005
1006/* Reinitialise the MAC to pick up new PHY settings, even if the port is
1007 * disabled. */
1008int efx_reconfigure_port(struct efx_nic *efx)
1009{
1010 int rc;
1011
1012 EFX_ASSERT_RESET_SERIALISED(efx);
1013
1014 mutex_lock(&efx->mac_lock);
1015 rc = __efx_reconfigure_port(efx);
1016 mutex_unlock(&efx->mac_lock);
1017
1018 return rc;
1019}
1020
1021/* Asynchronous work item for changing MAC promiscuity and multicast
1022 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1023 * MAC directly. */
1024static void efx_mac_work(struct work_struct *data)
1025{
1026 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1027
1028 mutex_lock(&efx->mac_lock);
1029 if (efx->port_enabled)
1030 efx_mac_reconfigure(efx);
1031 mutex_unlock(&efx->mac_lock);
1032}
1033
1034static int efx_probe_port(struct efx_nic *efx)
1035{
1036 int rc;
1037
1038 netif_dbg(efx, probe, efx->net_dev, "create port\n");
1039
1040 if (phy_flash_cfg)
1041 efx->phy_mode = PHY_MODE_SPECIAL;
1042
1043 /* Connect up MAC/PHY operations table */
1044 rc = efx->type->probe_port(efx);
1045 if (rc)
1046 return rc;
1047
1048 /* Initialise MAC address to permanent address */
1049 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1050
1051 return 0;
1052}
1053
1054static int efx_init_port(struct efx_nic *efx)
1055{
1056 int rc;
1057
1058 netif_dbg(efx, drv, efx->net_dev, "init port\n");
1059
1060 mutex_lock(&efx->mac_lock);
1061
1062 rc = efx->phy_op->init(efx);
1063 if (rc)
1064 goto fail1;
1065
1066 efx->port_initialized = true;
1067
1068 /* Reconfigure the MAC before creating dma queues (required for
1069 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1070 efx_mac_reconfigure(efx);
1071
1072 /* Ensure the PHY advertises the correct flow control settings */
1073 rc = efx->phy_op->reconfigure(efx);
1074 if (rc && rc != -EPERM)
1075 goto fail2;
1076
1077 mutex_unlock(&efx->mac_lock);
1078 return 0;
1079
1080fail2:
1081 efx->phy_op->fini(efx);
1082fail1:
1083 mutex_unlock(&efx->mac_lock);
1084 return rc;
1085}
1086
1087static void efx_start_port(struct efx_nic *efx)
1088{
1089 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1090 BUG_ON(efx->port_enabled);
1091
1092 mutex_lock(&efx->mac_lock);
1093 efx->port_enabled = true;
1094
1095 /* Ensure MAC ingress/egress is enabled */
1096 efx_mac_reconfigure(efx);
1097
1098 mutex_unlock(&efx->mac_lock);
1099}
1100
1101/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1102 * and the async self-test, wait for them to finish and prevent them
1103 * being scheduled again. This doesn't cover online resets, which
1104 * should only be cancelled when removing the device.
1105 */
1106static void efx_stop_port(struct efx_nic *efx)
1107{
1108 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1109
1110 EFX_ASSERT_RESET_SERIALISED(efx);
1111
1112 mutex_lock(&efx->mac_lock);
1113 efx->port_enabled = false;
1114 mutex_unlock(&efx->mac_lock);
1115
1116 /* Serialise against efx_set_multicast_list() */
1117 netif_addr_lock_bh(efx->net_dev);
1118 netif_addr_unlock_bh(efx->net_dev);
1119
1120 cancel_delayed_work_sync(&efx->monitor_work);
1121 efx_selftest_async_cancel(efx);
1122 cancel_work_sync(&efx->mac_work);
1123}
1124
1125static void efx_fini_port(struct efx_nic *efx)
1126{
1127 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1128
1129 if (!efx->port_initialized)
1130 return;
1131
1132 efx->phy_op->fini(efx);
1133 efx->port_initialized = false;
1134
1135 efx->link_state.up = false;
1136 efx_link_status_changed(efx);
1137}
1138
1139static void efx_remove_port(struct efx_nic *efx)
1140{
1141 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1142
1143 efx->type->remove_port(efx);
1144}
1145
1146/**************************************************************************
1147 *
1148 * NIC handling
1149 *
1150 **************************************************************************/
1151
1152static LIST_HEAD(efx_primary_list);
1153static LIST_HEAD(efx_unassociated_list);
1154
1155static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
1156{
1157 return left->type == right->type &&
1158 left->vpd_sn && right->vpd_sn &&
1159 !strcmp(left->vpd_sn, right->vpd_sn);
1160}
1161
1162static void efx_associate(struct efx_nic *efx)
1163{
1164 struct efx_nic *other, *next;
1165
1166 if (efx->primary == efx) {
1167 /* Adding primary function; look for secondaries */
1168
1169 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1170 list_add_tail(&efx->node, &efx_primary_list);
1171
1172 list_for_each_entry_safe(other, next, &efx_unassociated_list,
1173 node) {
1174 if (efx_same_controller(efx, other)) {
1175 list_del(&other->node);
1176 netif_dbg(other, probe, other->net_dev,
1177 "moving to secondary list of %s %s\n",
1178 pci_name(efx->pci_dev),
1179 efx->net_dev->name);
1180 list_add_tail(&other->node,
1181 &efx->secondary_list);
1182 other->primary = efx;
1183 }
1184 }
1185 } else {
1186 /* Adding secondary function; look for primary */
1187
1188 list_for_each_entry(other, &efx_primary_list, node) {
1189 if (efx_same_controller(efx, other)) {
1190 netif_dbg(efx, probe, efx->net_dev,
1191 "adding to secondary list of %s %s\n",
1192 pci_name(other->pci_dev),
1193 other->net_dev->name);
1194 list_add_tail(&efx->node,
1195 &other->secondary_list);
1196 efx->primary = other;
1197 return;
1198 }
1199 }
1200
1201 netif_dbg(efx, probe, efx->net_dev,
1202 "adding to unassociated list\n");
1203 list_add_tail(&efx->node, &efx_unassociated_list);
1204 }
1205}
1206
1207static void efx_dissociate(struct efx_nic *efx)
1208{
1209 struct efx_nic *other, *next;
1210
1211 list_del(&efx->node);
1212 efx->primary = NULL;
1213
1214 list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1215 list_del(&other->node);
1216 netif_dbg(other, probe, other->net_dev,
1217 "moving to unassociated list\n");
1218 list_add_tail(&other->node, &efx_unassociated_list);
1219 other->primary = NULL;
1220 }
1221}
1222
1223/* This configures the PCI device to enable I/O and DMA. */
1224static int efx_init_io(struct efx_nic *efx)
1225{
1226 struct pci_dev *pci_dev = efx->pci_dev;
1227 dma_addr_t dma_mask = efx->type->max_dma_mask;
1228 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1229 int rc, bar;
1230
1231 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1232
1233 bar = efx->type->mem_bar;
1234
1235 rc = pci_enable_device(pci_dev);
1236 if (rc) {
1237 netif_err(efx, probe, efx->net_dev,
1238 "failed to enable PCI device\n");
1239 goto fail1;
1240 }
1241
1242 pci_set_master(pci_dev);
1243
1244 /* Set the PCI DMA mask. Try all possibilities from our
1245 * genuine mask down to 32 bits, because some architectures
1246 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1247 * masks event though they reject 46 bit masks.
1248 */
1249 while (dma_mask > 0x7fffffffUL) {
1250 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1251 if (rc == 0)
1252 break;
1253 dma_mask >>= 1;
1254 }
1255 if (rc) {
1256 netif_err(efx, probe, efx->net_dev,
1257 "could not find a suitable DMA mask\n");
1258 goto fail2;
1259 }
1260 netif_dbg(efx, probe, efx->net_dev,
1261 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1262
1263 efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1264 rc = pci_request_region(pci_dev, bar, "sfc");
1265 if (rc) {
1266 netif_err(efx, probe, efx->net_dev,
1267 "request for memory BAR failed\n");
1268 rc = -EIO;
1269 goto fail3;
1270 }
1271 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1272 if (!efx->membase) {
1273 netif_err(efx, probe, efx->net_dev,
1274 "could not map memory BAR at %llx+%x\n",
1275 (unsigned long long)efx->membase_phys, mem_map_size);
1276 rc = -ENOMEM;
1277 goto fail4;
1278 }
1279 netif_dbg(efx, probe, efx->net_dev,
1280 "memory BAR at %llx+%x (virtual %p)\n",
1281 (unsigned long long)efx->membase_phys, mem_map_size,
1282 efx->membase);
1283
1284 return 0;
1285
1286 fail4:
1287 pci_release_region(efx->pci_dev, bar);
1288 fail3:
1289 efx->membase_phys = 0;
1290 fail2:
1291 pci_disable_device(efx->pci_dev);
1292 fail1:
1293 return rc;
1294}
1295
1296static void efx_fini_io(struct efx_nic *efx)
1297{
1298 int bar;
1299
1300 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1301
1302 if (efx->membase) {
1303 iounmap(efx->membase);
1304 efx->membase = NULL;
1305 }
1306
1307 if (efx->membase_phys) {
1308 bar = efx->type->mem_bar;
1309 pci_release_region(efx->pci_dev, bar);
1310 efx->membase_phys = 0;
1311 }
1312
1313 /* Don't disable bus-mastering if VFs are assigned */
1314 if (!pci_vfs_assigned(efx->pci_dev))
1315 pci_disable_device(efx->pci_dev);
1316}
1317
1318void efx_set_default_rx_indir_table(struct efx_nic *efx)
1319{
1320 size_t i;
1321
1322 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1323 efx->rx_indir_table[i] =
1324 ethtool_rxfh_indir_default(i, efx->rss_spread);
1325}
1326
1327static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1328{
1329 cpumask_var_t thread_mask;
1330 unsigned int count;
1331 int cpu;
1332
1333 if (rss_cpus) {
1334 count = rss_cpus;
1335 } else {
1336 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1337 netif_warn(efx, probe, efx->net_dev,
1338 "RSS disabled due to allocation failure\n");
1339 return 1;
1340 }
1341
1342 count = 0;
1343 for_each_online_cpu(cpu) {
1344 if (!cpumask_test_cpu(cpu, thread_mask)) {
1345 ++count;
1346 cpumask_or(thread_mask, thread_mask,
1347 topology_sibling_cpumask(cpu));
1348 }
1349 }
1350
1351 free_cpumask_var(thread_mask);
1352 }
1353
1354 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1355 * table entries that are inaccessible to VFs
1356 */
1357#ifdef CONFIG_SFC_SRIOV
1358 if (efx->type->sriov_wanted) {
1359 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1360 count > efx_vf_size(efx)) {
1361 netif_warn(efx, probe, efx->net_dev,
1362 "Reducing number of RSS channels from %u to %u for "
1363 "VF support. Increase vf-msix-limit to use more "
1364 "channels on the PF.\n",
1365 count, efx_vf_size(efx));
1366 count = efx_vf_size(efx);
1367 }
1368 }
1369#endif
1370
1371 return count;
1372}
1373
1374/* Probe the number and type of interrupts we are able to obtain, and
1375 * the resulting numbers of channels and RX queues.
1376 */
1377static int efx_probe_interrupts(struct efx_nic *efx)
1378{
1379 unsigned int extra_channels = 0;
1380 unsigned int i, j;
1381 int rc;
1382
1383 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1384 if (efx->extra_channel_type[i])
1385 ++extra_channels;
1386
1387 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1388 struct msix_entry xentries[EFX_MAX_CHANNELS];
1389 unsigned int n_channels;
1390
1391 n_channels = efx_wanted_parallelism(efx);
1392 if (efx_separate_tx_channels)
1393 n_channels *= 2;
1394 n_channels += extra_channels;
1395 n_channels = min(n_channels, efx->max_channels);
1396
1397 for (i = 0; i < n_channels; i++)
1398 xentries[i].entry = i;
1399 rc = pci_enable_msix_range(efx->pci_dev,
1400 xentries, 1, n_channels);
1401 if (rc < 0) {
1402 /* Fall back to single channel MSI */
1403 efx->interrupt_mode = EFX_INT_MODE_MSI;
1404 netif_err(efx, drv, efx->net_dev,
1405 "could not enable MSI-X\n");
1406 } else if (rc < n_channels) {
1407 netif_err(efx, drv, efx->net_dev,
1408 "WARNING: Insufficient MSI-X vectors"
1409 " available (%d < %u).\n", rc, n_channels);
1410 netif_err(efx, drv, efx->net_dev,
1411 "WARNING: Performance may be reduced.\n");
1412 n_channels = rc;
1413 }
1414
1415 if (rc > 0) {
1416 efx->n_channels = n_channels;
1417 if (n_channels > extra_channels)
1418 n_channels -= extra_channels;
1419 if (efx_separate_tx_channels) {
1420 efx->n_tx_channels = min(max(n_channels / 2,
1421 1U),
1422 efx->max_tx_channels);
1423 efx->n_rx_channels = max(n_channels -
1424 efx->n_tx_channels,
1425 1U);
1426 } else {
1427 efx->n_tx_channels = min(n_channels,
1428 efx->max_tx_channels);
1429 efx->n_rx_channels = n_channels;
1430 }
1431 for (i = 0; i < efx->n_channels; i++)
1432 efx_get_channel(efx, i)->irq =
1433 xentries[i].vector;
1434 }
1435 }
1436
1437 /* Try single interrupt MSI */
1438 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1439 efx->n_channels = 1;
1440 efx->n_rx_channels = 1;
1441 efx->n_tx_channels = 1;
1442 rc = pci_enable_msi(efx->pci_dev);
1443 if (rc == 0) {
1444 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1445 } else {
1446 netif_err(efx, drv, efx->net_dev,
1447 "could not enable MSI\n");
1448 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1449 }
1450 }
1451
1452 /* Assume legacy interrupts */
1453 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1454 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1455 efx->n_rx_channels = 1;
1456 efx->n_tx_channels = 1;
1457 efx->legacy_irq = efx->pci_dev->irq;
1458 }
1459
1460 /* Assign extra channels if possible */
1461 j = efx->n_channels;
1462 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1463 if (!efx->extra_channel_type[i])
1464 continue;
1465 if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1466 efx->n_channels <= extra_channels) {
1467 efx->extra_channel_type[i]->handle_no_channel(efx);
1468 } else {
1469 --j;
1470 efx_get_channel(efx, j)->type =
1471 efx->extra_channel_type[i];
1472 }
1473 }
1474
1475 /* RSS might be usable on VFs even if it is disabled on the PF */
1476#ifdef CONFIG_SFC_SRIOV
1477 if (efx->type->sriov_wanted) {
1478 efx->rss_spread = ((efx->n_rx_channels > 1 ||
1479 !efx->type->sriov_wanted(efx)) ?
1480 efx->n_rx_channels : efx_vf_size(efx));
1481 return 0;
1482 }
1483#endif
1484 efx->rss_spread = efx->n_rx_channels;
1485
1486 return 0;
1487}
1488
1489static int efx_soft_enable_interrupts(struct efx_nic *efx)
1490{
1491 struct efx_channel *channel, *end_channel;
1492 int rc;
1493
1494 BUG_ON(efx->state == STATE_DISABLED);
1495
1496 efx->irq_soft_enabled = true;
1497 smp_wmb();
1498
1499 efx_for_each_channel(channel, efx) {
1500 if (!channel->type->keep_eventq) {
1501 rc = efx_init_eventq(channel);
1502 if (rc)
1503 goto fail;
1504 }
1505 efx_start_eventq(channel);
1506 }
1507
1508 efx_mcdi_mode_event(efx);
1509
1510 return 0;
1511fail:
1512 end_channel = channel;
1513 efx_for_each_channel(channel, efx) {
1514 if (channel == end_channel)
1515 break;
1516 efx_stop_eventq(channel);
1517 if (!channel->type->keep_eventq)
1518 efx_fini_eventq(channel);
1519 }
1520
1521 return rc;
1522}
1523
1524static void efx_soft_disable_interrupts(struct efx_nic *efx)
1525{
1526 struct efx_channel *channel;
1527
1528 if (efx->state == STATE_DISABLED)
1529 return;
1530
1531 efx_mcdi_mode_poll(efx);
1532
1533 efx->irq_soft_enabled = false;
1534 smp_wmb();
1535
1536 if (efx->legacy_irq)
1537 synchronize_irq(efx->legacy_irq);
1538
1539 efx_for_each_channel(channel, efx) {
1540 if (channel->irq)
1541 synchronize_irq(channel->irq);
1542
1543 efx_stop_eventq(channel);
1544 if (!channel->type->keep_eventq)
1545 efx_fini_eventq(channel);
1546 }
1547
1548 /* Flush the asynchronous MCDI request queue */
1549 efx_mcdi_flush_async(efx);
1550}
1551
1552static int efx_enable_interrupts(struct efx_nic *efx)
1553{
1554 struct efx_channel *channel, *end_channel;
1555 int rc;
1556
1557 BUG_ON(efx->state == STATE_DISABLED);
1558
1559 if (efx->eeh_disabled_legacy_irq) {
1560 enable_irq(efx->legacy_irq);
1561 efx->eeh_disabled_legacy_irq = false;
1562 }
1563
1564 efx->type->irq_enable_master(efx);
1565
1566 efx_for_each_channel(channel, efx) {
1567 if (channel->type->keep_eventq) {
1568 rc = efx_init_eventq(channel);
1569 if (rc)
1570 goto fail;
1571 }
1572 }
1573
1574 rc = efx_soft_enable_interrupts(efx);
1575 if (rc)
1576 goto fail;
1577
1578 return 0;
1579
1580fail:
1581 end_channel = channel;
1582 efx_for_each_channel(channel, efx) {
1583 if (channel == end_channel)
1584 break;
1585 if (channel->type->keep_eventq)
1586 efx_fini_eventq(channel);
1587 }
1588
1589 efx->type->irq_disable_non_ev(efx);
1590
1591 return rc;
1592}
1593
1594static void efx_disable_interrupts(struct efx_nic *efx)
1595{
1596 struct efx_channel *channel;
1597
1598 efx_soft_disable_interrupts(efx);
1599
1600 efx_for_each_channel(channel, efx) {
1601 if (channel->type->keep_eventq)
1602 efx_fini_eventq(channel);
1603 }
1604
1605 efx->type->irq_disable_non_ev(efx);
1606}
1607
1608static void efx_remove_interrupts(struct efx_nic *efx)
1609{
1610 struct efx_channel *channel;
1611
1612 /* Remove MSI/MSI-X interrupts */
1613 efx_for_each_channel(channel, efx)
1614 channel->irq = 0;
1615 pci_disable_msi(efx->pci_dev);
1616 pci_disable_msix(efx->pci_dev);
1617
1618 /* Remove legacy interrupt */
1619 efx->legacy_irq = 0;
1620}
1621
1622static void efx_set_channels(struct efx_nic *efx)
1623{
1624 struct efx_channel *channel;
1625 struct efx_tx_queue *tx_queue;
1626
1627 efx->tx_channel_offset =
1628 efx_separate_tx_channels ?
1629 efx->n_channels - efx->n_tx_channels : 0;
1630
1631 /* We need to mark which channels really have RX and TX
1632 * queues, and adjust the TX queue numbers if we have separate
1633 * RX-only and TX-only channels.
1634 */
1635 efx_for_each_channel(channel, efx) {
1636 if (channel->channel < efx->n_rx_channels)
1637 channel->rx_queue.core_index = channel->channel;
1638 else
1639 channel->rx_queue.core_index = -1;
1640
1641 efx_for_each_channel_tx_queue(tx_queue, channel)
1642 tx_queue->queue -= (efx->tx_channel_offset *
1643 EFX_TXQ_TYPES);
1644 }
1645}
1646
1647static int efx_probe_nic(struct efx_nic *efx)
1648{
1649 int rc;
1650
1651 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1652
1653 /* Carry out hardware-type specific initialisation */
1654 rc = efx->type->probe(efx);
1655 if (rc)
1656 return rc;
1657
1658 do {
1659 if (!efx->max_channels || !efx->max_tx_channels) {
1660 netif_err(efx, drv, efx->net_dev,
1661 "Insufficient resources to allocate"
1662 " any channels\n");
1663 rc = -ENOSPC;
1664 goto fail1;
1665 }
1666
1667 /* Determine the number of channels and queues by trying
1668 * to hook in MSI-X interrupts.
1669 */
1670 rc = efx_probe_interrupts(efx);
1671 if (rc)
1672 goto fail1;
1673
1674 efx_set_channels(efx);
1675
1676 /* dimension_resources can fail with EAGAIN */
1677 rc = efx->type->dimension_resources(efx);
1678 if (rc != 0 && rc != -EAGAIN)
1679 goto fail2;
1680
1681 if (rc == -EAGAIN)
1682 /* try again with new max_channels */
1683 efx_remove_interrupts(efx);
1684
1685 } while (rc == -EAGAIN);
1686
1687 if (efx->n_channels > 1)
1688 netdev_rss_key_fill(&efx->rx_hash_key,
1689 sizeof(efx->rx_hash_key));
1690 efx_set_default_rx_indir_table(efx);
1691
1692 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1693 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1694
1695 /* Initialise the interrupt moderation settings */
1696 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1697 true);
1698
1699 return 0;
1700
1701fail2:
1702 efx_remove_interrupts(efx);
1703fail1:
1704 efx->type->remove(efx);
1705 return rc;
1706}
1707
1708static void efx_remove_nic(struct efx_nic *efx)
1709{
1710 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1711
1712 efx_remove_interrupts(efx);
1713 efx->type->remove(efx);
1714}
1715
1716static int efx_probe_filters(struct efx_nic *efx)
1717{
1718 int rc;
1719
1720 spin_lock_init(&efx->filter_lock);
1721 init_rwsem(&efx->filter_sem);
1722 down_write(&efx->filter_sem);
1723 rc = efx->type->filter_table_probe(efx);
1724 if (rc)
1725 goto out_unlock;
1726
1727#ifdef CONFIG_RFS_ACCEL
1728 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1729 efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
1730 sizeof(*efx->rps_flow_id),
1731 GFP_KERNEL);
1732 if (!efx->rps_flow_id) {
1733 efx->type->filter_table_remove(efx);
1734 rc = -ENOMEM;
1735 goto out_unlock;
1736 }
1737 }
1738#endif
1739out_unlock:
1740 up_write(&efx->filter_sem);
1741 return rc;
1742}
1743
1744static void efx_remove_filters(struct efx_nic *efx)
1745{
1746#ifdef CONFIG_RFS_ACCEL
1747 kfree(efx->rps_flow_id);
1748#endif
1749 down_write(&efx->filter_sem);
1750 efx->type->filter_table_remove(efx);
1751 up_write(&efx->filter_sem);
1752}
1753
1754static void efx_restore_filters(struct efx_nic *efx)
1755{
1756 down_read(&efx->filter_sem);
1757 efx->type->filter_table_restore(efx);
1758 up_read(&efx->filter_sem);
1759}
1760
1761/**************************************************************************
1762 *
1763 * NIC startup/shutdown
1764 *
1765 *************************************************************************/
1766
1767static int efx_probe_all(struct efx_nic *efx)
1768{
1769 int rc;
1770
1771 rc = efx_probe_nic(efx);
1772 if (rc) {
1773 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1774 goto fail1;
1775 }
1776
1777 rc = efx_probe_port(efx);
1778 if (rc) {
1779 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1780 goto fail2;
1781 }
1782
1783 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1784 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1785 rc = -EINVAL;
1786 goto fail3;
1787 }
1788 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1789
1790#ifdef CONFIG_SFC_SRIOV
1791 rc = efx->type->vswitching_probe(efx);
1792 if (rc) /* not fatal; the PF will still work fine */
1793 netif_warn(efx, probe, efx->net_dev,
1794 "failed to setup vswitching rc=%d;"
1795 " VFs may not function\n", rc);
1796#endif
1797
1798 rc = efx_probe_filters(efx);
1799 if (rc) {
1800 netif_err(efx, probe, efx->net_dev,
1801 "failed to create filter tables\n");
1802 goto fail4;
1803 }
1804
1805 rc = efx_probe_channels(efx);
1806 if (rc)
1807 goto fail5;
1808
1809 return 0;
1810
1811 fail5:
1812 efx_remove_filters(efx);
1813 fail4:
1814#ifdef CONFIG_SFC_SRIOV
1815 efx->type->vswitching_remove(efx);
1816#endif
1817 fail3:
1818 efx_remove_port(efx);
1819 fail2:
1820 efx_remove_nic(efx);
1821 fail1:
1822 return rc;
1823}
1824
1825/* If the interface is supposed to be running but is not, start
1826 * the hardware and software data path, regular activity for the port
1827 * (MAC statistics, link polling, etc.) and schedule the port to be
1828 * reconfigured. Interrupts must already be enabled. This function
1829 * is safe to call multiple times, so long as the NIC is not disabled.
1830 * Requires the RTNL lock.
1831 */
1832static void efx_start_all(struct efx_nic *efx)
1833{
1834 EFX_ASSERT_RESET_SERIALISED(efx);
1835 BUG_ON(efx->state == STATE_DISABLED);
1836
1837 /* Check that it is appropriate to restart the interface. All
1838 * of these flags are safe to read under just the rtnl lock */
1839 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1840 efx->reset_pending)
1841 return;
1842
1843 efx_start_port(efx);
1844 efx_start_datapath(efx);
1845
1846 /* Start the hardware monitor if there is one */
1847 if (efx->type->monitor != NULL)
1848 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1849 efx_monitor_interval);
1850
1851 /* If link state detection is normally event-driven, we have
1852 * to poll now because we could have missed a change
1853 */
1854 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1855 mutex_lock(&efx->mac_lock);
1856 if (efx->phy_op->poll(efx))
1857 efx_link_status_changed(efx);
1858 mutex_unlock(&efx->mac_lock);
1859 }
1860
1861 efx->type->start_stats(efx);
1862 efx->type->pull_stats(efx);
1863 spin_lock_bh(&efx->stats_lock);
1864 efx->type->update_stats(efx, NULL, NULL);
1865 spin_unlock_bh(&efx->stats_lock);
1866}
1867
1868/* Quiesce the hardware and software data path, and regular activity
1869 * for the port without bringing the link down. Safe to call multiple
1870 * times with the NIC in almost any state, but interrupts should be
1871 * enabled. Requires the RTNL lock.
1872 */
1873static void efx_stop_all(struct efx_nic *efx)
1874{
1875 EFX_ASSERT_RESET_SERIALISED(efx);
1876
1877 /* port_enabled can be read safely under the rtnl lock */
1878 if (!efx->port_enabled)
1879 return;
1880
1881 /* update stats before we go down so we can accurately count
1882 * rx_nodesc_drops
1883 */
1884 efx->type->pull_stats(efx);
1885 spin_lock_bh(&efx->stats_lock);
1886 efx->type->update_stats(efx, NULL, NULL);
1887 spin_unlock_bh(&efx->stats_lock);
1888 efx->type->stop_stats(efx);
1889 efx_stop_port(efx);
1890
1891 /* Stop the kernel transmit interface. This is only valid if
1892 * the device is stopped or detached; otherwise the watchdog
1893 * may fire immediately.
1894 */
1895 WARN_ON(netif_running(efx->net_dev) &&
1896 netif_device_present(efx->net_dev));
1897 netif_tx_disable(efx->net_dev);
1898
1899 efx_stop_datapath(efx);
1900}
1901
1902static void efx_remove_all(struct efx_nic *efx)
1903{
1904 efx_remove_channels(efx);
1905 efx_remove_filters(efx);
1906#ifdef CONFIG_SFC_SRIOV
1907 efx->type->vswitching_remove(efx);
1908#endif
1909 efx_remove_port(efx);
1910 efx_remove_nic(efx);
1911}
1912
1913/**************************************************************************
1914 *
1915 * Interrupt moderation
1916 *
1917 **************************************************************************/
1918
1919static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1920{
1921 if (usecs == 0)
1922 return 0;
1923 if (usecs * 1000 < quantum_ns)
1924 return 1; /* never round down to 0 */
1925 return usecs * 1000 / quantum_ns;
1926}
1927
1928/* Set interrupt moderation parameters */
1929int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1930 unsigned int rx_usecs, bool rx_adaptive,
1931 bool rx_may_override_tx)
1932{
1933 struct efx_channel *channel;
1934 unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
1935 efx->timer_quantum_ns,
1936 1000);
1937 unsigned int tx_ticks;
1938 unsigned int rx_ticks;
1939
1940 EFX_ASSERT_RESET_SERIALISED(efx);
1941
1942 if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1943 return -EINVAL;
1944
1945 tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
1946 rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
1947
1948 if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
1949 !rx_may_override_tx) {
1950 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1951 "RX and TX IRQ moderation must be equal\n");
1952 return -EINVAL;
1953 }
1954
1955 efx->irq_rx_adaptive = rx_adaptive;
1956 efx->irq_rx_moderation = rx_ticks;
1957 efx_for_each_channel(channel, efx) {
1958 if (efx_channel_has_rx_queue(channel))
1959 channel->irq_moderation = rx_ticks;
1960 else if (efx_channel_has_tx_queues(channel))
1961 channel->irq_moderation = tx_ticks;
1962 }
1963
1964 return 0;
1965}
1966
1967void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1968 unsigned int *rx_usecs, bool *rx_adaptive)
1969{
1970 /* We must round up when converting ticks to microseconds
1971 * because we round down when converting the other way.
1972 */
1973
1974 *rx_adaptive = efx->irq_rx_adaptive;
1975 *rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
1976 efx->timer_quantum_ns,
1977 1000);
1978
1979 /* If channels are shared between RX and TX, so is IRQ
1980 * moderation. Otherwise, IRQ moderation is the same for all
1981 * TX channels and is not adaptive.
1982 */
1983 if (efx->tx_channel_offset == 0)
1984 *tx_usecs = *rx_usecs;
1985 else
1986 *tx_usecs = DIV_ROUND_UP(
1987 efx->channel[efx->tx_channel_offset]->irq_moderation *
1988 efx->timer_quantum_ns,
1989 1000);
1990}
1991
1992/**************************************************************************
1993 *
1994 * Hardware monitor
1995 *
1996 **************************************************************************/
1997
1998/* Run periodically off the general workqueue */
1999static void efx_monitor(struct work_struct *data)
2000{
2001 struct efx_nic *efx = container_of(data, struct efx_nic,
2002 monitor_work.work);
2003
2004 netif_vdbg(efx, timer, efx->net_dev,
2005 "hardware monitor executing on CPU %d\n",
2006 raw_smp_processor_id());
2007 BUG_ON(efx->type->monitor == NULL);
2008
2009 /* If the mac_lock is already held then it is likely a port
2010 * reconfiguration is already in place, which will likely do
2011 * most of the work of monitor() anyway. */
2012 if (mutex_trylock(&efx->mac_lock)) {
2013 if (efx->port_enabled)
2014 efx->type->monitor(efx);
2015 mutex_unlock(&efx->mac_lock);
2016 }
2017
2018 queue_delayed_work(efx->workqueue, &efx->monitor_work,
2019 efx_monitor_interval);
2020}
2021
2022/**************************************************************************
2023 *
2024 * ioctls
2025 *
2026 *************************************************************************/
2027
2028/* Net device ioctl
2029 * Context: process, rtnl_lock() held.
2030 */
2031static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2032{
2033 struct efx_nic *efx = netdev_priv(net_dev);
2034 struct mii_ioctl_data *data = if_mii(ifr);
2035
2036 if (cmd == SIOCSHWTSTAMP)
2037 return efx_ptp_set_ts_config(efx, ifr);
2038 if (cmd == SIOCGHWTSTAMP)
2039 return efx_ptp_get_ts_config(efx, ifr);
2040
2041 /* Convert phy_id from older PRTAD/DEVAD format */
2042 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2043 (data->phy_id & 0xfc00) == 0x0400)
2044 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2045
2046 return mdio_mii_ioctl(&efx->mdio, data, cmd);
2047}
2048
2049/**************************************************************************
2050 *
2051 * NAPI interface
2052 *
2053 **************************************************************************/
2054
2055static void efx_init_napi_channel(struct efx_channel *channel)
2056{
2057 struct efx_nic *efx = channel->efx;
2058
2059 channel->napi_dev = efx->net_dev;
2060 netif_napi_add(channel->napi_dev, &channel->napi_str,
2061 efx_poll, napi_weight);
2062 efx_channel_busy_poll_init(channel);
2063}
2064
2065static void efx_init_napi(struct efx_nic *efx)
2066{
2067 struct efx_channel *channel;
2068
2069 efx_for_each_channel(channel, efx)
2070 efx_init_napi_channel(channel);
2071}
2072
2073static void efx_fini_napi_channel(struct efx_channel *channel)
2074{
2075 if (channel->napi_dev) {
2076 netif_napi_del(&channel->napi_str);
2077 napi_hash_del(&channel->napi_str);
2078 }
2079 channel->napi_dev = NULL;
2080}
2081
2082static void efx_fini_napi(struct efx_nic *efx)
2083{
2084 struct efx_channel *channel;
2085
2086 efx_for_each_channel(channel, efx)
2087 efx_fini_napi_channel(channel);
2088}
2089
2090/**************************************************************************
2091 *
2092 * Kernel netpoll interface
2093 *
2094 *************************************************************************/
2095
2096#ifdef CONFIG_NET_POLL_CONTROLLER
2097
2098/* Although in the common case interrupts will be disabled, this is not
2099 * guaranteed. However, all our work happens inside the NAPI callback,
2100 * so no locking is required.
2101 */
2102static void efx_netpoll(struct net_device *net_dev)
2103{
2104 struct efx_nic *efx = netdev_priv(net_dev);
2105 struct efx_channel *channel;
2106
2107 efx_for_each_channel(channel, efx)
2108 efx_schedule_channel(channel);
2109}
2110
2111#endif
2112
2113#ifdef CONFIG_NET_RX_BUSY_POLL
2114static int efx_busy_poll(struct napi_struct *napi)
2115{
2116 struct efx_channel *channel =
2117 container_of(napi, struct efx_channel, napi_str);
2118 struct efx_nic *efx = channel->efx;
2119 int budget = 4;
2120 int old_rx_packets, rx_packets;
2121
2122 if (!netif_running(efx->net_dev))
2123 return LL_FLUSH_FAILED;
2124
2125 if (!efx_channel_try_lock_poll(channel))
2126 return LL_FLUSH_BUSY;
2127
2128 old_rx_packets = channel->rx_queue.rx_packets;
2129 efx_process_channel(channel, budget);
2130
2131 rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
2132
2133 /* There is no race condition with NAPI here.
2134 * NAPI will automatically be rescheduled if it yielded during busy
2135 * polling, because it was not able to take the lock and thus returned
2136 * the full budget.
2137 */
2138 efx_channel_unlock_poll(channel);
2139
2140 return rx_packets;
2141}
2142#endif
2143
2144/**************************************************************************
2145 *
2146 * Kernel net device interface
2147 *
2148 *************************************************************************/
2149
2150/* Context: process, rtnl_lock() held. */
2151int efx_net_open(struct net_device *net_dev)
2152{
2153 struct efx_nic *efx = netdev_priv(net_dev);
2154 int rc;
2155
2156 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2157 raw_smp_processor_id());
2158
2159 rc = efx_check_disabled(efx);
2160 if (rc)
2161 return rc;
2162 if (efx->phy_mode & PHY_MODE_SPECIAL)
2163 return -EBUSY;
2164 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
2165 return -EIO;
2166
2167 /* Notify the kernel of the link state polled during driver load,
2168 * before the monitor starts running */
2169 efx_link_status_changed(efx);
2170
2171 efx_start_all(efx);
2172 efx_selftest_async_start(efx);
2173 return 0;
2174}
2175
2176/* Context: process, rtnl_lock() held.
2177 * Note that the kernel will ignore our return code; this method
2178 * should really be a void.
2179 */
2180int efx_net_stop(struct net_device *net_dev)
2181{
2182 struct efx_nic *efx = netdev_priv(net_dev);
2183
2184 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2185 raw_smp_processor_id());
2186
2187 /* Stop the device and flush all the channels */
2188 efx_stop_all(efx);
2189
2190 return 0;
2191}
2192
2193/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2194static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
2195 struct rtnl_link_stats64 *stats)
2196{
2197 struct efx_nic *efx = netdev_priv(net_dev);
2198
2199 spin_lock_bh(&efx->stats_lock);
2200 efx->type->update_stats(efx, NULL, stats);
2201 spin_unlock_bh(&efx->stats_lock);
2202
2203 return stats;
2204}
2205
2206/* Context: netif_tx_lock held, BHs disabled. */
2207static void efx_watchdog(struct net_device *net_dev)
2208{
2209 struct efx_nic *efx = netdev_priv(net_dev);
2210
2211 netif_err(efx, tx_err, efx->net_dev,
2212 "TX stuck with port_enabled=%d: resetting channels\n",
2213 efx->port_enabled);
2214
2215 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2216}
2217
2218
2219/* Context: process, rtnl_lock() held. */
2220static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2221{
2222 struct efx_nic *efx = netdev_priv(net_dev);
2223 int rc;
2224
2225 rc = efx_check_disabled(efx);
2226 if (rc)
2227 return rc;
2228 if (new_mtu > EFX_MAX_MTU)
2229 return -EINVAL;
2230
2231 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2232
2233 efx_device_detach_sync(efx);
2234 efx_stop_all(efx);
2235
2236 mutex_lock(&efx->mac_lock);
2237 net_dev->mtu = new_mtu;
2238 efx_mac_reconfigure(efx);
2239 mutex_unlock(&efx->mac_lock);
2240
2241 efx_start_all(efx);
2242 netif_device_attach(efx->net_dev);
2243 return 0;
2244}
2245
2246static int efx_set_mac_address(struct net_device *net_dev, void *data)
2247{
2248 struct efx_nic *efx = netdev_priv(net_dev);
2249 struct sockaddr *addr = data;
2250 u8 *new_addr = addr->sa_data;
2251 u8 old_addr[6];
2252 int rc;
2253
2254 if (!is_valid_ether_addr(new_addr)) {
2255 netif_err(efx, drv, efx->net_dev,
2256 "invalid ethernet MAC address requested: %pM\n",
2257 new_addr);
2258 return -EADDRNOTAVAIL;
2259 }
2260
2261 /* save old address */
2262 ether_addr_copy(old_addr, net_dev->dev_addr);
2263 ether_addr_copy(net_dev->dev_addr, new_addr);
2264 if (efx->type->set_mac_address) {
2265 rc = efx->type->set_mac_address(efx);
2266 if (rc) {
2267 ether_addr_copy(net_dev->dev_addr, old_addr);
2268 return rc;
2269 }
2270 }
2271
2272 /* Reconfigure the MAC */
2273 mutex_lock(&efx->mac_lock);
2274 efx_mac_reconfigure(efx);
2275 mutex_unlock(&efx->mac_lock);
2276
2277 return 0;
2278}
2279
2280/* Context: netif_addr_lock held, BHs disabled. */
2281static void efx_set_rx_mode(struct net_device *net_dev)
2282{
2283 struct efx_nic *efx = netdev_priv(net_dev);
2284
2285 if (efx->port_enabled)
2286 queue_work(efx->workqueue, &efx->mac_work);
2287 /* Otherwise efx_start_port() will do this */
2288}
2289
2290static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2291{
2292 struct efx_nic *efx = netdev_priv(net_dev);
2293
2294 /* If disabling RX n-tuple filtering, clear existing filters */
2295 if (net_dev->features & ~data & NETIF_F_NTUPLE)
2296 return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2297
2298 return 0;
2299}
2300
2301static const struct net_device_ops efx_netdev_ops = {
2302 .ndo_open = efx_net_open,
2303 .ndo_stop = efx_net_stop,
2304 .ndo_get_stats64 = efx_net_stats,
2305 .ndo_tx_timeout = efx_watchdog,
2306 .ndo_start_xmit = efx_hard_start_xmit,
2307 .ndo_validate_addr = eth_validate_addr,
2308 .ndo_do_ioctl = efx_ioctl,
2309 .ndo_change_mtu = efx_change_mtu,
2310 .ndo_set_mac_address = efx_set_mac_address,
2311 .ndo_set_rx_mode = efx_set_rx_mode,
2312 .ndo_set_features = efx_set_features,
2313#ifdef CONFIG_SFC_SRIOV
2314 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2315 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
2316 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
2317 .ndo_get_vf_config = efx_sriov_get_vf_config,
2318 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
2319 .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
2320#endif
2321#ifdef CONFIG_NET_POLL_CONTROLLER
2322 .ndo_poll_controller = efx_netpoll,
2323#endif
2324 .ndo_setup_tc = efx_setup_tc,
2325#ifdef CONFIG_NET_RX_BUSY_POLL
2326 .ndo_busy_poll = efx_busy_poll,
2327#endif
2328#ifdef CONFIG_RFS_ACCEL
2329 .ndo_rx_flow_steer = efx_filter_rfs,
2330#endif
2331};
2332
2333static void efx_update_name(struct efx_nic *efx)
2334{
2335 strcpy(efx->name, efx->net_dev->name);
2336 efx_mtd_rename(efx);
2337 efx_set_channel_names(efx);
2338}
2339
2340static int efx_netdev_event(struct notifier_block *this,
2341 unsigned long event, void *ptr)
2342{
2343 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2344
2345 if ((net_dev->netdev_ops == &efx_netdev_ops) &&
2346 event == NETDEV_CHANGENAME)
2347 efx_update_name(netdev_priv(net_dev));
2348
2349 return NOTIFY_DONE;
2350}
2351
2352static struct notifier_block efx_netdev_notifier = {
2353 .notifier_call = efx_netdev_event,
2354};
2355
2356static ssize_t
2357show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2358{
2359 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2360 return sprintf(buf, "%d\n", efx->phy_type);
2361}
2362static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2363
2364#ifdef CONFIG_SFC_MCDI_LOGGING
2365static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2366 char *buf)
2367{
2368 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2369 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2370
2371 return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2372}
2373static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2374 const char *buf, size_t count)
2375{
2376 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2377 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2378 bool enable = count > 0 && *buf != '0';
2379
2380 mcdi->logging_enabled = enable;
2381 return count;
2382}
2383static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2384#endif
2385
2386static int efx_register_netdev(struct efx_nic *efx)
2387{
2388 struct net_device *net_dev = efx->net_dev;
2389 struct efx_channel *channel;
2390 int rc;
2391
2392 net_dev->watchdog_timeo = 5 * HZ;
2393 net_dev->irq = efx->pci_dev->irq;
2394 net_dev->netdev_ops = &efx_netdev_ops;
2395 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
2396 net_dev->priv_flags |= IFF_UNICAST_FLT;
2397 net_dev->ethtool_ops = &efx_ethtool_ops;
2398 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2399
2400 rtnl_lock();
2401
2402 /* Enable resets to be scheduled and check whether any were
2403 * already requested. If so, the NIC is probably hosed so we
2404 * abort.
2405 */
2406 efx->state = STATE_READY;
2407 smp_mb(); /* ensure we change state before checking reset_pending */
2408 if (efx->reset_pending) {
2409 netif_err(efx, probe, efx->net_dev,
2410 "aborting probe due to scheduled reset\n");
2411 rc = -EIO;
2412 goto fail_locked;
2413 }
2414
2415 rc = dev_alloc_name(net_dev, net_dev->name);
2416 if (rc < 0)
2417 goto fail_locked;
2418 efx_update_name(efx);
2419
2420 /* Always start with carrier off; PHY events will detect the link */
2421 netif_carrier_off(net_dev);
2422
2423 rc = register_netdevice(net_dev);
2424 if (rc)
2425 goto fail_locked;
2426
2427 efx_for_each_channel(channel, efx) {
2428 struct efx_tx_queue *tx_queue;
2429 efx_for_each_channel_tx_queue(tx_queue, channel)
2430 efx_init_tx_queue_core_txq(tx_queue);
2431 }
2432
2433 efx_associate(efx);
2434
2435 rtnl_unlock();
2436
2437 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2438 if (rc) {
2439 netif_err(efx, drv, efx->net_dev,
2440 "failed to init net dev attributes\n");
2441 goto fail_registered;
2442 }
2443#ifdef CONFIG_SFC_MCDI_LOGGING
2444 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2445 if (rc) {
2446 netif_err(efx, drv, efx->net_dev,
2447 "failed to init net dev attributes\n");
2448 goto fail_attr_mcdi_logging;
2449 }
2450#endif
2451
2452 return 0;
2453
2454#ifdef CONFIG_SFC_MCDI_LOGGING
2455fail_attr_mcdi_logging:
2456 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2457#endif
2458fail_registered:
2459 rtnl_lock();
2460 efx_dissociate(efx);
2461 unregister_netdevice(net_dev);
2462fail_locked:
2463 efx->state = STATE_UNINIT;
2464 rtnl_unlock();
2465 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2466 return rc;
2467}
2468
2469static void efx_unregister_netdev(struct efx_nic *efx)
2470{
2471 if (!efx->net_dev)
2472 return;
2473
2474 BUG_ON(netdev_priv(efx->net_dev) != efx);
2475
2476 if (efx_dev_registered(efx)) {
2477 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2478#ifdef CONFIG_SFC_MCDI_LOGGING
2479 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2480#endif
2481 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2482 unregister_netdev(efx->net_dev);
2483 }
2484}
2485
2486/**************************************************************************
2487 *
2488 * Device reset and suspend
2489 *
2490 **************************************************************************/
2491
2492/* Tears down the entire software state and most of the hardware state
2493 * before reset. */
2494void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2495{
2496 EFX_ASSERT_RESET_SERIALISED(efx);
2497
2498 if (method == RESET_TYPE_MCDI_TIMEOUT)
2499 efx->type->prepare_flr(efx);
2500
2501 efx_stop_all(efx);
2502 efx_disable_interrupts(efx);
2503
2504 mutex_lock(&efx->mac_lock);
2505 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2506 method != RESET_TYPE_DATAPATH)
2507 efx->phy_op->fini(efx);
2508 efx->type->fini(efx);
2509}
2510
2511/* This function will always ensure that the locks acquired in
2512 * efx_reset_down() are released. A failure return code indicates
2513 * that we were unable to reinitialise the hardware, and the
2514 * driver should be disabled. If ok is false, then the rx and tx
2515 * engines are not restarted, pending a RESET_DISABLE. */
2516int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2517{
2518 int rc;
2519
2520 EFX_ASSERT_RESET_SERIALISED(efx);
2521
2522 if (method == RESET_TYPE_MCDI_TIMEOUT)
2523 efx->type->finish_flr(efx);
2524
2525 /* Ensure that SRAM is initialised even if we're disabling the device */
2526 rc = efx->type->init(efx);
2527 if (rc) {
2528 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2529 goto fail;
2530 }
2531
2532 if (!ok)
2533 goto fail;
2534
2535 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2536 method != RESET_TYPE_DATAPATH) {
2537 rc = efx->phy_op->init(efx);
2538 if (rc)
2539 goto fail;
2540 rc = efx->phy_op->reconfigure(efx);
2541 if (rc && rc != -EPERM)
2542 netif_err(efx, drv, efx->net_dev,
2543 "could not restore PHY settings\n");
2544 }
2545
2546 rc = efx_enable_interrupts(efx);
2547 if (rc)
2548 goto fail;
2549
2550#ifdef CONFIG_SFC_SRIOV
2551 rc = efx->type->vswitching_restore(efx);
2552 if (rc) /* not fatal; the PF will still work fine */
2553 netif_warn(efx, probe, efx->net_dev,
2554 "failed to restore vswitching rc=%d;"
2555 " VFs may not function\n", rc);
2556#endif
2557
2558 down_read(&efx->filter_sem);
2559 efx_restore_filters(efx);
2560 up_read(&efx->filter_sem);
2561 if (efx->type->sriov_reset)
2562 efx->type->sriov_reset(efx);
2563
2564 mutex_unlock(&efx->mac_lock);
2565
2566 efx_start_all(efx);
2567
2568 return 0;
2569
2570fail:
2571 efx->port_initialized = false;
2572
2573 mutex_unlock(&efx->mac_lock);
2574
2575 return rc;
2576}
2577
2578/* Reset the NIC using the specified method. Note that the reset may
2579 * fail, in which case the card will be left in an unusable state.
2580 *
2581 * Caller must hold the rtnl_lock.
2582 */
2583int efx_reset(struct efx_nic *efx, enum reset_type method)
2584{
2585 int rc, rc2;
2586 bool disabled;
2587
2588 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2589 RESET_TYPE(method));
2590
2591 efx_device_detach_sync(efx);
2592 efx_reset_down(efx, method);
2593
2594 rc = efx->type->reset(efx, method);
2595 if (rc) {
2596 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2597 goto out;
2598 }
2599
2600 /* Clear flags for the scopes we covered. We assume the NIC and
2601 * driver are now quiescent so that there is no race here.
2602 */
2603 if (method < RESET_TYPE_MAX_METHOD)
2604 efx->reset_pending &= -(1 << (method + 1));
2605 else /* it doesn't fit into the well-ordered scope hierarchy */
2606 __clear_bit(method, &efx->reset_pending);
2607
2608 /* Reinitialise bus-mastering, which may have been turned off before
2609 * the reset was scheduled. This is still appropriate, even in the
2610 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2611 * can respond to requests. */
2612 pci_set_master(efx->pci_dev);
2613
2614out:
2615 /* Leave device stopped if necessary */
2616 disabled = rc ||
2617 method == RESET_TYPE_DISABLE ||
2618 method == RESET_TYPE_RECOVER_OR_DISABLE;
2619 rc2 = efx_reset_up(efx, method, !disabled);
2620 if (rc2) {
2621 disabled = true;
2622 if (!rc)
2623 rc = rc2;
2624 }
2625
2626 if (disabled) {
2627 dev_close(efx->net_dev);
2628 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2629 efx->state = STATE_DISABLED;
2630 } else {
2631 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2632 netif_device_attach(efx->net_dev);
2633 }
2634 return rc;
2635}
2636
2637/* Try recovery mechanisms.
2638 * For now only EEH is supported.
2639 * Returns 0 if the recovery mechanisms are unsuccessful.
2640 * Returns a non-zero value otherwise.
2641 */
2642int efx_try_recovery(struct efx_nic *efx)
2643{
2644#ifdef CONFIG_EEH
2645 /* A PCI error can occur and not be seen by EEH because nothing
2646 * happens on the PCI bus. In this case the driver may fail and
2647 * schedule a 'recover or reset', leading to this recovery handler.
2648 * Manually call the eeh failure check function.
2649 */
2650 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2651 if (eeh_dev_check_failure(eehdev)) {
2652 /* The EEH mechanisms will handle the error and reset the
2653 * device if necessary.
2654 */
2655 return 1;
2656 }
2657#endif
2658 return 0;
2659}
2660
2661static void efx_wait_for_bist_end(struct efx_nic *efx)
2662{
2663 int i;
2664
2665 for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2666 if (efx_mcdi_poll_reboot(efx))
2667 goto out;
2668 msleep(BIST_WAIT_DELAY_MS);
2669 }
2670
2671 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2672out:
2673 /* Either way unset the BIST flag. If we found no reboot we probably
2674 * won't recover, but we should try.
2675 */
2676 efx->mc_bist_for_other_fn = false;
2677}
2678
2679/* The worker thread exists so that code that cannot sleep can
2680 * schedule a reset for later.
2681 */
2682static void efx_reset_work(struct work_struct *data)
2683{
2684 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2685 unsigned long pending;
2686 enum reset_type method;
2687
2688 pending = ACCESS_ONCE(efx->reset_pending);
2689 method = fls(pending) - 1;
2690
2691 if (method == RESET_TYPE_MC_BIST)
2692 efx_wait_for_bist_end(efx);
2693
2694 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2695 method == RESET_TYPE_RECOVER_OR_ALL) &&
2696 efx_try_recovery(efx))
2697 return;
2698
2699 if (!pending)
2700 return;
2701
2702 rtnl_lock();
2703
2704 /* We checked the state in efx_schedule_reset() but it may
2705 * have changed by now. Now that we have the RTNL lock,
2706 * it cannot change again.
2707 */
2708 if (efx->state == STATE_READY)
2709 (void)efx_reset(efx, method);
2710
2711 rtnl_unlock();
2712}
2713
2714void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2715{
2716 enum reset_type method;
2717
2718 if (efx->state == STATE_RECOVERY) {
2719 netif_dbg(efx, drv, efx->net_dev,
2720 "recovering: skip scheduling %s reset\n",
2721 RESET_TYPE(type));
2722 return;
2723 }
2724
2725 switch (type) {
2726 case RESET_TYPE_INVISIBLE:
2727 case RESET_TYPE_ALL:
2728 case RESET_TYPE_RECOVER_OR_ALL:
2729 case RESET_TYPE_WORLD:
2730 case RESET_TYPE_DISABLE:
2731 case RESET_TYPE_RECOVER_OR_DISABLE:
2732 case RESET_TYPE_DATAPATH:
2733 case RESET_TYPE_MC_BIST:
2734 case RESET_TYPE_MCDI_TIMEOUT:
2735 method = type;
2736 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2737 RESET_TYPE(method));
2738 break;
2739 default:
2740 method = efx->type->map_reset_reason(type);
2741 netif_dbg(efx, drv, efx->net_dev,
2742 "scheduling %s reset for %s\n",
2743 RESET_TYPE(method), RESET_TYPE(type));
2744 break;
2745 }
2746
2747 set_bit(method, &efx->reset_pending);
2748 smp_mb(); /* ensure we change reset_pending before checking state */
2749
2750 /* If we're not READY then just leave the flags set as the cue
2751 * to abort probing or reschedule the reset later.
2752 */
2753 if (ACCESS_ONCE(efx->state) != STATE_READY)
2754 return;
2755
2756 /* efx_process_channel() will no longer read events once a
2757 * reset is scheduled. So switch back to poll'd MCDI completions. */
2758 efx_mcdi_mode_poll(efx);
2759
2760 queue_work(reset_workqueue, &efx->reset_work);
2761}
2762
2763/**************************************************************************
2764 *
2765 * List of NICs we support
2766 *
2767 **************************************************************************/
2768
2769/* PCI device ID table */
2770static const struct pci_device_id efx_pci_table[] = {
2771 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2772 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2773 .driver_data = (unsigned long) &falcon_a1_nic_type},
2774 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2775 PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2776 .driver_data = (unsigned long) &falcon_b0_nic_type},
2777 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
2778 .driver_data = (unsigned long) &siena_a0_nic_type},
2779 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
2780 .driver_data = (unsigned long) &siena_a0_nic_type},
2781 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
2782 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2783 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903), /* SFC9120 VF */
2784 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2785 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923), /* SFC9140 PF */
2786 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2787 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923), /* SFC9140 VF */
2788 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2789 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03), /* SFC9220 PF */
2790 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2791 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03), /* SFC9220 VF */
2792 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2793 {0} /* end of list */
2794};
2795
2796/**************************************************************************
2797 *
2798 * Dummy PHY/MAC operations
2799 *
2800 * Can be used for some unimplemented operations
2801 * Needed so all function pointers are valid and do not have to be tested
2802 * before use
2803 *
2804 **************************************************************************/
2805int efx_port_dummy_op_int(struct efx_nic *efx)
2806{
2807 return 0;
2808}
2809void efx_port_dummy_op_void(struct efx_nic *efx) {}
2810
2811static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2812{
2813 return false;
2814}
2815
2816static const struct efx_phy_operations efx_dummy_phy_operations = {
2817 .init = efx_port_dummy_op_int,
2818 .reconfigure = efx_port_dummy_op_int,
2819 .poll = efx_port_dummy_op_poll,
2820 .fini = efx_port_dummy_op_void,
2821};
2822
2823/**************************************************************************
2824 *
2825 * Data housekeeping
2826 *
2827 **************************************************************************/
2828
2829/* This zeroes out and then fills in the invariants in a struct
2830 * efx_nic (including all sub-structures).
2831 */
2832static int efx_init_struct(struct efx_nic *efx,
2833 struct pci_dev *pci_dev, struct net_device *net_dev)
2834{
2835 int i;
2836
2837 /* Initialise common structures */
2838 INIT_LIST_HEAD(&efx->node);
2839 INIT_LIST_HEAD(&efx->secondary_list);
2840 spin_lock_init(&efx->biu_lock);
2841#ifdef CONFIG_SFC_MTD
2842 INIT_LIST_HEAD(&efx->mtd_list);
2843#endif
2844 INIT_WORK(&efx->reset_work, efx_reset_work);
2845 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
2846 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2847 efx->pci_dev = pci_dev;
2848 efx->msg_enable = debug;
2849 efx->state = STATE_UNINIT;
2850 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2851
2852 efx->net_dev = net_dev;
2853 efx->rx_prefix_size = efx->type->rx_prefix_size;
2854 efx->rx_ip_align =
2855 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2856 efx->rx_packet_hash_offset =
2857 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2858 efx->rx_packet_ts_offset =
2859 efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2860 spin_lock_init(&efx->stats_lock);
2861 mutex_init(&efx->mac_lock);
2862 efx->phy_op = &efx_dummy_phy_operations;
2863 efx->mdio.dev = net_dev;
2864 INIT_WORK(&efx->mac_work, efx_mac_work);
2865 init_waitqueue_head(&efx->flush_wq);
2866
2867 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2868 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2869 if (!efx->channel[i])
2870 goto fail;
2871 efx->msi_context[i].efx = efx;
2872 efx->msi_context[i].index = i;
2873 }
2874
2875 /* Higher numbered interrupt modes are less capable! */
2876 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2877 interrupt_mode);
2878
2879 /* Would be good to use the net_dev name, but we're too early */
2880 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2881 pci_name(pci_dev));
2882 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2883 if (!efx->workqueue)
2884 goto fail;
2885
2886 return 0;
2887
2888fail:
2889 efx_fini_struct(efx);
2890 return -ENOMEM;
2891}
2892
2893static void efx_fini_struct(struct efx_nic *efx)
2894{
2895 int i;
2896
2897 for (i = 0; i < EFX_MAX_CHANNELS; i++)
2898 kfree(efx->channel[i]);
2899
2900 kfree(efx->vpd_sn);
2901
2902 if (efx->workqueue) {
2903 destroy_workqueue(efx->workqueue);
2904 efx->workqueue = NULL;
2905 }
2906}
2907
2908void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
2909{
2910 u64 n_rx_nodesc_trunc = 0;
2911 struct efx_channel *channel;
2912
2913 efx_for_each_channel(channel, efx)
2914 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2915 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2916 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2917}
2918
2919/**************************************************************************
2920 *
2921 * PCI interface
2922 *
2923 **************************************************************************/
2924
2925/* Main body of final NIC shutdown code
2926 * This is called only at module unload (or hotplug removal).
2927 */
2928static void efx_pci_remove_main(struct efx_nic *efx)
2929{
2930 /* Flush reset_work. It can no longer be scheduled since we
2931 * are not READY.
2932 */
2933 BUG_ON(efx->state == STATE_READY);
2934 cancel_work_sync(&efx->reset_work);
2935
2936 efx_disable_interrupts(efx);
2937 efx_nic_fini_interrupt(efx);
2938 efx_fini_port(efx);
2939 efx->type->fini(efx);
2940 efx_fini_napi(efx);
2941 efx_remove_all(efx);
2942}
2943
2944/* Final NIC shutdown
2945 * This is called only at module unload (or hotplug removal). A PF can call
2946 * this on its VFs to ensure they are unbound first.
2947 */
2948static void efx_pci_remove(struct pci_dev *pci_dev)
2949{
2950 struct efx_nic *efx;
2951
2952 efx = pci_get_drvdata(pci_dev);
2953 if (!efx)
2954 return;
2955
2956 /* Mark the NIC as fini, then stop the interface */
2957 rtnl_lock();
2958 efx_dissociate(efx);
2959 dev_close(efx->net_dev);
2960 efx_disable_interrupts(efx);
2961 efx->state = STATE_UNINIT;
2962 rtnl_unlock();
2963
2964 if (efx->type->sriov_fini)
2965 efx->type->sriov_fini(efx);
2966
2967 efx_unregister_netdev(efx);
2968
2969 efx_mtd_remove(efx);
2970
2971 efx_pci_remove_main(efx);
2972
2973 efx_fini_io(efx);
2974 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2975
2976 efx_fini_struct(efx);
2977 free_netdev(efx->net_dev);
2978
2979 pci_disable_pcie_error_reporting(pci_dev);
2980};
2981
2982/* NIC VPD information
2983 * Called during probe to display the part number of the
2984 * installed NIC. VPD is potentially very large but this should
2985 * always appear within the first 512 bytes.
2986 */
2987#define SFC_VPD_LEN 512
2988static void efx_probe_vpd_strings(struct efx_nic *efx)
2989{
2990 struct pci_dev *dev = efx->pci_dev;
2991 char vpd_data[SFC_VPD_LEN];
2992 ssize_t vpd_size;
2993 int ro_start, ro_size, i, j;
2994
2995 /* Get the vpd data from the device */
2996 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
2997 if (vpd_size <= 0) {
2998 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
2999 return;
3000 }
3001
3002 /* Get the Read only section */
3003 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
3004 if (ro_start < 0) {
3005 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
3006 return;
3007 }
3008
3009 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
3010 j = ro_size;
3011 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3012 if (i + j > vpd_size)
3013 j = vpd_size - i;
3014
3015 /* Get the Part number */
3016 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
3017 if (i < 0) {
3018 netif_err(efx, drv, efx->net_dev, "Part number not found\n");
3019 return;
3020 }
3021
3022 j = pci_vpd_info_field_size(&vpd_data[i]);
3023 i += PCI_VPD_INFO_FLD_HDR_SIZE;
3024 if (i + j > vpd_size) {
3025 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
3026 return;
3027 }
3028
3029 netif_info(efx, drv, efx->net_dev,
3030 "Part Number : %.*s\n", j, &vpd_data[i]);
3031
3032 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3033 j = ro_size;
3034 i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
3035 if (i < 0) {
3036 netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
3037 return;
3038 }
3039
3040 j = pci_vpd_info_field_size(&vpd_data[i]);
3041 i += PCI_VPD_INFO_FLD_HDR_SIZE;
3042 if (i + j > vpd_size) {
3043 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
3044 return;
3045 }
3046
3047 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
3048 if (!efx->vpd_sn)
3049 return;
3050
3051 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
3052}
3053
3054
3055/* Main body of NIC initialisation
3056 * This is called at module load (or hotplug insertion, theoretically).
3057 */
3058static int efx_pci_probe_main(struct efx_nic *efx)
3059{
3060 int rc;
3061
3062 /* Do start-of-day initialisation */
3063 rc = efx_probe_all(efx);
3064 if (rc)
3065 goto fail1;
3066
3067 efx_init_napi(efx);
3068
3069 rc = efx->type->init(efx);
3070 if (rc) {
3071 netif_err(efx, probe, efx->net_dev,
3072 "failed to initialise NIC\n");
3073 goto fail3;
3074 }
3075
3076 rc = efx_init_port(efx);
3077 if (rc) {
3078 netif_err(efx, probe, efx->net_dev,
3079 "failed to initialise port\n");
3080 goto fail4;
3081 }
3082
3083 rc = efx_nic_init_interrupt(efx);
3084 if (rc)
3085 goto fail5;
3086 rc = efx_enable_interrupts(efx);
3087 if (rc)
3088 goto fail6;
3089
3090 return 0;
3091
3092 fail6:
3093 efx_nic_fini_interrupt(efx);
3094 fail5:
3095 efx_fini_port(efx);
3096 fail4:
3097 efx->type->fini(efx);
3098 fail3:
3099 efx_fini_napi(efx);
3100 efx_remove_all(efx);
3101 fail1:
3102 return rc;
3103}
3104
3105/* NIC initialisation
3106 *
3107 * This is called at module load (or hotplug insertion,
3108 * theoretically). It sets up PCI mappings, resets the NIC,
3109 * sets up and registers the network devices with the kernel and hooks
3110 * the interrupt service routine. It does not prepare the device for
3111 * transmission; this is left to the first time one of the network
3112 * interfaces is brought up (i.e. efx_net_open).
3113 */
3114static int efx_pci_probe(struct pci_dev *pci_dev,
3115 const struct pci_device_id *entry)
3116{
3117 struct net_device *net_dev;
3118 struct efx_nic *efx;
3119 int rc;
3120
3121 /* Allocate and initialise a struct net_device and struct efx_nic */
3122 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
3123 EFX_MAX_RX_QUEUES);
3124 if (!net_dev)
3125 return -ENOMEM;
3126 efx = netdev_priv(net_dev);
3127 efx->type = (const struct efx_nic_type *) entry->driver_data;
3128 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3129 NETIF_F_HIGHDMA | NETIF_F_TSO |
3130 NETIF_F_RXCSUM);
3131 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3132 net_dev->features |= NETIF_F_TSO6;
3133 /* Mask for features that also apply to VLAN devices */
3134 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
3135 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3136 NETIF_F_RXCSUM);
3137 /* All offloads can be toggled */
3138 net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
3139 pci_set_drvdata(pci_dev, efx);
3140 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3141 rc = efx_init_struct(efx, pci_dev, net_dev);
3142 if (rc)
3143 goto fail1;
3144
3145 netif_info(efx, probe, efx->net_dev,
3146 "Solarflare NIC detected\n");
3147
3148 if (!efx->type->is_vf)
3149 efx_probe_vpd_strings(efx);
3150
3151 /* Set up basic I/O (BAR mappings etc) */
3152 rc = efx_init_io(efx);
3153 if (rc)
3154 goto fail2;
3155
3156 rc = efx_pci_probe_main(efx);
3157 if (rc)
3158 goto fail3;
3159
3160 rc = efx_register_netdev(efx);
3161 if (rc)
3162 goto fail4;
3163
3164 if (efx->type->sriov_init) {
3165 rc = efx->type->sriov_init(efx);
3166 if (rc)
3167 netif_err(efx, probe, efx->net_dev,
3168 "SR-IOV can't be enabled rc %d\n", rc);
3169 }
3170
3171 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3172
3173 /* Try to create MTDs, but allow this to fail */
3174 rtnl_lock();
3175 rc = efx_mtd_probe(efx);
3176 rtnl_unlock();
3177 if (rc && rc != -EPERM)
3178 netif_warn(efx, probe, efx->net_dev,
3179 "failed to create MTDs (%d)\n", rc);
3180
3181 rc = pci_enable_pcie_error_reporting(pci_dev);
3182 if (rc && rc != -EINVAL)
3183 netif_notice(efx, probe, efx->net_dev,
3184 "PCIE error reporting unavailable (%d).\n",
3185 rc);
3186
3187 return 0;
3188
3189 fail4:
3190 efx_pci_remove_main(efx);
3191 fail3:
3192 efx_fini_io(efx);
3193 fail2:
3194 efx_fini_struct(efx);
3195 fail1:
3196 WARN_ON(rc > 0);
3197 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3198 free_netdev(net_dev);
3199 return rc;
3200}
3201
3202/* efx_pci_sriov_configure returns the actual number of Virtual Functions
3203 * enabled on success
3204 */
3205#ifdef CONFIG_SFC_SRIOV
3206static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
3207{
3208 int rc;
3209 struct efx_nic *efx = pci_get_drvdata(dev);
3210
3211 if (efx->type->sriov_configure) {
3212 rc = efx->type->sriov_configure(efx, num_vfs);
3213 if (rc)
3214 return rc;
3215 else
3216 return num_vfs;
3217 } else
3218 return -EOPNOTSUPP;
3219}
3220#endif
3221
3222static int efx_pm_freeze(struct device *dev)
3223{
3224 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3225
3226 rtnl_lock();
3227
3228 if (efx->state != STATE_DISABLED) {
3229 efx->state = STATE_UNINIT;
3230
3231 efx_device_detach_sync(efx);
3232
3233 efx_stop_all(efx);
3234 efx_disable_interrupts(efx);
3235 }
3236
3237 rtnl_unlock();
3238
3239 return 0;
3240}
3241
3242static int efx_pm_thaw(struct device *dev)
3243{
3244 int rc;
3245 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3246
3247 rtnl_lock();
3248
3249 if (efx->state != STATE_DISABLED) {
3250 rc = efx_enable_interrupts(efx);
3251 if (rc)
3252 goto fail;
3253
3254 mutex_lock(&efx->mac_lock);
3255 efx->phy_op->reconfigure(efx);
3256 mutex_unlock(&efx->mac_lock);
3257
3258 efx_start_all(efx);
3259
3260 netif_device_attach(efx->net_dev);
3261
3262 efx->state = STATE_READY;
3263
3264 efx->type->resume_wol(efx);
3265 }
3266
3267 rtnl_unlock();
3268
3269 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3270 queue_work(reset_workqueue, &efx->reset_work);
3271
3272 return 0;
3273
3274fail:
3275 rtnl_unlock();
3276
3277 return rc;
3278}
3279
3280static int efx_pm_poweroff(struct device *dev)
3281{
3282 struct pci_dev *pci_dev = to_pci_dev(dev);
3283 struct efx_nic *efx = pci_get_drvdata(pci_dev);
3284
3285 efx->type->fini(efx);
3286
3287 efx->reset_pending = 0;
3288
3289 pci_save_state(pci_dev);
3290 return pci_set_power_state(pci_dev, PCI_D3hot);
3291}
3292
3293/* Used for both resume and restore */
3294static int efx_pm_resume(struct device *dev)
3295{
3296 struct pci_dev *pci_dev = to_pci_dev(dev);
3297 struct efx_nic *efx = pci_get_drvdata(pci_dev);
3298 int rc;
3299
3300 rc = pci_set_power_state(pci_dev, PCI_D0);
3301 if (rc)
3302 return rc;
3303 pci_restore_state(pci_dev);
3304 rc = pci_enable_device(pci_dev);
3305 if (rc)
3306 return rc;
3307 pci_set_master(efx->pci_dev);
3308 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3309 if (rc)
3310 return rc;
3311 rc = efx->type->init(efx);
3312 if (rc)
3313 return rc;
3314 rc = efx_pm_thaw(dev);
3315 return rc;
3316}
3317
3318static int efx_pm_suspend(struct device *dev)
3319{
3320 int rc;
3321
3322 efx_pm_freeze(dev);
3323 rc = efx_pm_poweroff(dev);
3324 if (rc)
3325 efx_pm_resume(dev);
3326 return rc;
3327}
3328
3329static const struct dev_pm_ops efx_pm_ops = {
3330 .suspend = efx_pm_suspend,
3331 .resume = efx_pm_resume,
3332 .freeze = efx_pm_freeze,
3333 .thaw = efx_pm_thaw,
3334 .poweroff = efx_pm_poweroff,
3335 .restore = efx_pm_resume,
3336};
3337
3338/* A PCI error affecting this device was detected.
3339 * At this point MMIO and DMA may be disabled.
3340 * Stop the software path and request a slot reset.
3341 */
3342static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3343 enum pci_channel_state state)
3344{
3345 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3346 struct efx_nic *efx = pci_get_drvdata(pdev);
3347
3348 if (state == pci_channel_io_perm_failure)
3349 return PCI_ERS_RESULT_DISCONNECT;
3350
3351 rtnl_lock();
3352
3353 if (efx->state != STATE_DISABLED) {
3354 efx->state = STATE_RECOVERY;
3355 efx->reset_pending = 0;
3356
3357 efx_device_detach_sync(efx);
3358
3359 efx_stop_all(efx);
3360 efx_disable_interrupts(efx);
3361
3362 status = PCI_ERS_RESULT_NEED_RESET;
3363 } else {
3364 /* If the interface is disabled we don't want to do anything
3365 * with it.
3366 */
3367 status = PCI_ERS_RESULT_RECOVERED;
3368 }
3369
3370 rtnl_unlock();
3371
3372 pci_disable_device(pdev);
3373
3374 return status;
3375}
3376
3377/* Fake a successful reset, which will be performed later in efx_io_resume. */
3378static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3379{
3380 struct efx_nic *efx = pci_get_drvdata(pdev);
3381 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3382 int rc;
3383
3384 if (pci_enable_device(pdev)) {
3385 netif_err(efx, hw, efx->net_dev,
3386 "Cannot re-enable PCI device after reset.\n");
3387 status = PCI_ERS_RESULT_DISCONNECT;
3388 }
3389
3390 rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3391 if (rc) {
3392 netif_err(efx, hw, efx->net_dev,
3393 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3394 /* Non-fatal error. Continue. */
3395 }
3396
3397 return status;
3398}
3399
3400/* Perform the actual reset and resume I/O operations. */
3401static void efx_io_resume(struct pci_dev *pdev)
3402{
3403 struct efx_nic *efx = pci_get_drvdata(pdev);
3404 int rc;
3405
3406 rtnl_lock();
3407
3408 if (efx->state == STATE_DISABLED)
3409 goto out;
3410
3411 rc = efx_reset(efx, RESET_TYPE_ALL);
3412 if (rc) {
3413 netif_err(efx, hw, efx->net_dev,
3414 "efx_reset failed after PCI error (%d)\n", rc);
3415 } else {
3416 efx->state = STATE_READY;
3417 netif_dbg(efx, hw, efx->net_dev,
3418 "Done resetting and resuming IO after PCI error.\n");
3419 }
3420
3421out:
3422 rtnl_unlock();
3423}
3424
3425/* For simplicity and reliability, we always require a slot reset and try to
3426 * reset the hardware when a pci error affecting the device is detected.
3427 * We leave both the link_reset and mmio_enabled callback unimplemented:
3428 * with our request for slot reset the mmio_enabled callback will never be
3429 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3430 */
3431static const struct pci_error_handlers efx_err_handlers = {
3432 .error_detected = efx_io_error_detected,
3433 .slot_reset = efx_io_slot_reset,
3434 .resume = efx_io_resume,
3435};
3436
3437static struct pci_driver efx_pci_driver = {
3438 .name = KBUILD_MODNAME,
3439 .id_table = efx_pci_table,
3440 .probe = efx_pci_probe,
3441 .remove = efx_pci_remove,
3442 .driver.pm = &efx_pm_ops,
3443 .err_handler = &efx_err_handlers,
3444#ifdef CONFIG_SFC_SRIOV
3445 .sriov_configure = efx_pci_sriov_configure,
3446#endif
3447};
3448
3449/**************************************************************************
3450 *
3451 * Kernel module interface
3452 *
3453 *************************************************************************/
3454
3455module_param(interrupt_mode, uint, 0444);
3456MODULE_PARM_DESC(interrupt_mode,
3457 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3458
3459static int __init efx_init_module(void)
3460{
3461 int rc;
3462
3463 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3464
3465 rc = register_netdevice_notifier(&efx_netdev_notifier);
3466 if (rc)
3467 goto err_notifier;
3468
3469#ifdef CONFIG_SFC_SRIOV
3470 rc = efx_init_sriov();
3471 if (rc)
3472 goto err_sriov;
3473#endif
3474
3475 reset_workqueue = create_singlethread_workqueue("sfc_reset");
3476 if (!reset_workqueue) {
3477 rc = -ENOMEM;
3478 goto err_reset;
3479 }
3480
3481 rc = pci_register_driver(&efx_pci_driver);
3482 if (rc < 0)
3483 goto err_pci;
3484
3485 return 0;
3486
3487 err_pci:
3488 destroy_workqueue(reset_workqueue);
3489 err_reset:
3490#ifdef CONFIG_SFC_SRIOV
3491 efx_fini_sriov();
3492 err_sriov:
3493#endif
3494 unregister_netdevice_notifier(&efx_netdev_notifier);
3495 err_notifier:
3496 return rc;
3497}
3498
3499static void __exit efx_exit_module(void)
3500{
3501 printk(KERN_INFO "Solarflare NET driver unloading\n");
3502
3503 pci_unregister_driver(&efx_pci_driver);
3504 destroy_workqueue(reset_workqueue);
3505#ifdef CONFIG_SFC_SRIOV
3506 efx_fini_sriov();
3507#endif
3508 unregister_netdevice_notifier(&efx_netdev_notifier);
3509
3510}
3511
3512module_init(efx_init_module);
3513module_exit(efx_exit_module);
3514
3515MODULE_AUTHOR("Solarflare Communications and "
3516 "Michael Brown <mbrown@fensystems.co.uk>");
3517MODULE_DESCRIPTION("Solarflare network driver");
3518MODULE_LICENSE("GPL");
3519MODULE_DEVICE_TABLE(pci, efx_pci_table);