Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/module.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/bitops.h>
10#include <linux/vmalloc.h>
11#include <linux/pagemap.h>
12#include <linux/netdevice.h>
13#include <linux/ipv6.h>
14#include <linux/slab.h>
15#include <net/checksum.h>
16#include <net/ip6_checksum.h>
17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
19#include <linux/net_tstamp.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/if.h>
23#include <linux/if_vlan.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/interrupt.h>
27#include <linux/ip.h>
28#include <linux/tcp.h>
29#include <linux/sctp.h>
30#include <linux/if_ether.h>
31#include <linux/prefetch.h>
32#include <linux/bpf.h>
33#include <linux/bpf_trace.h>
34#include <linux/pm_runtime.h>
35#include <linux/etherdevice.h>
36#include <linux/lockdep.h>
37#ifdef CONFIG_IGB_DCA
38#include <linux/dca.h>
39#endif
40#include <linux/i2c.h>
41#include "igb.h"
42
43enum queue_mode {
44 QUEUE_MODE_STRICT_PRIORITY,
45 QUEUE_MODE_STREAM_RESERVATION,
46};
47
48enum tx_queue_prio {
49 TX_QUEUE_PRIO_HIGH,
50 TX_QUEUE_PRIO_LOW,
51};
52
53char igb_driver_name[] = "igb";
54static const char igb_driver_string[] =
55 "Intel(R) Gigabit Ethernet Network Driver";
56static const char igb_copyright[] =
57 "Copyright (c) 2007-2014 Intel Corporation.";
58
59static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static const struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
99 /* required last entry */
100 {0, }
101};
102
103MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
104
105static int igb_setup_all_tx_resources(struct igb_adapter *);
106static int igb_setup_all_rx_resources(struct igb_adapter *);
107static void igb_free_all_tx_resources(struct igb_adapter *);
108static void igb_free_all_rx_resources(struct igb_adapter *);
109static void igb_setup_mrqc(struct igb_adapter *);
110static void igb_init_queue_configuration(struct igb_adapter *adapter);
111static int igb_sw_init(struct igb_adapter *);
112int igb_open(struct net_device *);
113int igb_close(struct net_device *);
114static void igb_configure(struct igb_adapter *);
115static void igb_configure_tx(struct igb_adapter *);
116static void igb_configure_rx(struct igb_adapter *);
117static void igb_clean_all_tx_rings(struct igb_adapter *);
118static void igb_clean_all_rx_rings(struct igb_adapter *);
119static void igb_clean_tx_ring(struct igb_ring *);
120static void igb_clean_rx_ring(struct igb_ring *);
121static void igb_set_rx_mode(struct net_device *);
122static void igb_update_phy_info(struct timer_list *);
123static void igb_watchdog(struct timer_list *);
124static void igb_watchdog_task(struct work_struct *);
125static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
126static void igb_get_stats64(struct net_device *dev,
127 struct rtnl_link_stats64 *stats);
128static int igb_change_mtu(struct net_device *, int);
129static int igb_set_mac(struct net_device *, void *);
130static void igb_set_uta(struct igb_adapter *adapter, bool set);
131static irqreturn_t igb_intr(int irq, void *);
132static irqreturn_t igb_intr_msi(int irq, void *);
133static irqreturn_t igb_msix_other(int irq, void *);
134static irqreturn_t igb_msix_ring(int irq, void *);
135#ifdef CONFIG_IGB_DCA
136static void igb_update_dca(struct igb_q_vector *);
137static void igb_setup_dca(struct igb_adapter *);
138#endif /* CONFIG_IGB_DCA */
139static int igb_poll(struct napi_struct *, int);
140static bool igb_clean_tx_irq(struct igb_q_vector *, int);
141static int igb_clean_rx_irq(struct igb_q_vector *, int);
142static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
143static void igb_tx_timeout(struct net_device *, unsigned int txqueue);
144static void igb_reset_task(struct work_struct *);
145static void igb_vlan_mode(struct net_device *netdev,
146 netdev_features_t features);
147static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
148static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
149static void igb_restore_vlan(struct igb_adapter *);
150static void igb_rar_set_index(struct igb_adapter *, u32);
151static void igb_ping_all_vfs(struct igb_adapter *);
152static void igb_msg_task(struct igb_adapter *);
153static void igb_vmm_control(struct igb_adapter *);
154static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
155static void igb_flush_mac_table(struct igb_adapter *);
156static int igb_available_rars(struct igb_adapter *, u8);
157static void igb_set_default_mac_filter(struct igb_adapter *);
158static int igb_uc_sync(struct net_device *, const unsigned char *);
159static int igb_uc_unsync(struct net_device *, const unsigned char *);
160static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
161static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
162static int igb_ndo_set_vf_vlan(struct net_device *netdev,
163 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
164static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
165static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
166 bool setting);
167static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
168 bool setting);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
171static void igb_check_vf_rate_limit(struct igb_adapter *);
172static void igb_nfc_filter_exit(struct igb_adapter *adapter);
173static void igb_nfc_filter_restore(struct igb_adapter *adapter);
174
175#ifdef CONFIG_PCI_IOV
176static int igb_vf_configure(struct igb_adapter *adapter, int vf);
177static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
178#endif
179
180#ifdef CONFIG_IGB_DCA
181static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
182static struct notifier_block dca_notifier = {
183 .notifier_call = igb_notify_dca,
184 .next = NULL,
185 .priority = 0
186};
187#endif
188#ifdef CONFIG_PCI_IOV
189static unsigned int max_vfs;
190module_param(max_vfs, uint, 0444);
191MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
192#endif /* CONFIG_PCI_IOV */
193
194static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
195 pci_channel_state_t);
196static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
197static void igb_io_resume(struct pci_dev *);
198
199static const struct pci_error_handlers igb_err_handler = {
200 .error_detected = igb_io_error_detected,
201 .slot_reset = igb_io_slot_reset,
202 .resume = igb_io_resume,
203};
204
205static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
206
207MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
208MODULE_LICENSE("GPL v2");
209
210#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
211static int debug = -1;
212module_param(debug, int, 0);
213MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
214
215struct igb_reg_info {
216 u32 ofs;
217 char *name;
218};
219
220static const struct igb_reg_info igb_reg_info_tbl[] = {
221
222 /* General Registers */
223 {E1000_CTRL, "CTRL"},
224 {E1000_STATUS, "STATUS"},
225 {E1000_CTRL_EXT, "CTRL_EXT"},
226
227 /* Interrupt Registers */
228 {E1000_ICR, "ICR"},
229
230 /* RX Registers */
231 {E1000_RCTL, "RCTL"},
232 {E1000_RDLEN(0), "RDLEN"},
233 {E1000_RDH(0), "RDH"},
234 {E1000_RDT(0), "RDT"},
235 {E1000_RXDCTL(0), "RXDCTL"},
236 {E1000_RDBAL(0), "RDBAL"},
237 {E1000_RDBAH(0), "RDBAH"},
238
239 /* TX Registers */
240 {E1000_TCTL, "TCTL"},
241 {E1000_TDBAL(0), "TDBAL"},
242 {E1000_TDBAH(0), "TDBAH"},
243 {E1000_TDLEN(0), "TDLEN"},
244 {E1000_TDH(0), "TDH"},
245 {E1000_TDT(0), "TDT"},
246 {E1000_TXDCTL(0), "TXDCTL"},
247 {E1000_TDFH, "TDFH"},
248 {E1000_TDFT, "TDFT"},
249 {E1000_TDFHS, "TDFHS"},
250 {E1000_TDFPC, "TDFPC"},
251
252 /* List Terminator */
253 {}
254};
255
256/* igb_regdump - register printout routine */
257static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
258{
259 int n = 0;
260 char rname[16];
261 u32 regs[8];
262
263 switch (reginfo->ofs) {
264 case E1000_RDLEN(0):
265 for (n = 0; n < 4; n++)
266 regs[n] = rd32(E1000_RDLEN(n));
267 break;
268 case E1000_RDH(0):
269 for (n = 0; n < 4; n++)
270 regs[n] = rd32(E1000_RDH(n));
271 break;
272 case E1000_RDT(0):
273 for (n = 0; n < 4; n++)
274 regs[n] = rd32(E1000_RDT(n));
275 break;
276 case E1000_RXDCTL(0):
277 for (n = 0; n < 4; n++)
278 regs[n] = rd32(E1000_RXDCTL(n));
279 break;
280 case E1000_RDBAL(0):
281 for (n = 0; n < 4; n++)
282 regs[n] = rd32(E1000_RDBAL(n));
283 break;
284 case E1000_RDBAH(0):
285 for (n = 0; n < 4; n++)
286 regs[n] = rd32(E1000_RDBAH(n));
287 break;
288 case E1000_TDBAL(0):
289 for (n = 0; n < 4; n++)
290 regs[n] = rd32(E1000_TDBAL(n));
291 break;
292 case E1000_TDBAH(0):
293 for (n = 0; n < 4; n++)
294 regs[n] = rd32(E1000_TDBAH(n));
295 break;
296 case E1000_TDLEN(0):
297 for (n = 0; n < 4; n++)
298 regs[n] = rd32(E1000_TDLEN(n));
299 break;
300 case E1000_TDH(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_TDH(n));
303 break;
304 case E1000_TDT(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_TDT(n));
307 break;
308 case E1000_TXDCTL(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_TXDCTL(n));
311 break;
312 default:
313 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
314 return;
315 }
316
317 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
318 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
319 regs[2], regs[3]);
320}
321
322/* igb_dump - Print registers, Tx-rings and Rx-rings */
323static void igb_dump(struct igb_adapter *adapter)
324{
325 struct net_device *netdev = adapter->netdev;
326 struct e1000_hw *hw = &adapter->hw;
327 struct igb_reg_info *reginfo;
328 struct igb_ring *tx_ring;
329 union e1000_adv_tx_desc *tx_desc;
330 struct my_u0 { __le64 a; __le64 b; } *u0;
331 struct igb_ring *rx_ring;
332 union e1000_adv_rx_desc *rx_desc;
333 u32 staterr;
334 u16 i, n;
335
336 if (!netif_msg_hw(adapter))
337 return;
338
339 /* Print netdevice Info */
340 if (netdev) {
341 dev_info(&adapter->pdev->dev, "Net device Info\n");
342 pr_info("Device Name state trans_start\n");
343 pr_info("%-15s %016lX %016lX\n", netdev->name,
344 netdev->state, dev_trans_start(netdev));
345 }
346
347 /* Print Registers */
348 dev_info(&adapter->pdev->dev, "Register Dump\n");
349 pr_info(" Register Name Value\n");
350 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
351 reginfo->name; reginfo++) {
352 igb_regdump(hw, reginfo);
353 }
354
355 /* Print TX Ring Summary */
356 if (!netdev || !netif_running(netdev))
357 goto exit;
358
359 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
360 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
361 for (n = 0; n < adapter->num_tx_queues; n++) {
362 struct igb_tx_buffer *buffer_info;
363 tx_ring = adapter->tx_ring[n];
364 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
365 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
366 n, tx_ring->next_to_use, tx_ring->next_to_clean,
367 (u64)dma_unmap_addr(buffer_info, dma),
368 dma_unmap_len(buffer_info, len),
369 buffer_info->next_to_watch,
370 (u64)buffer_info->time_stamp);
371 }
372
373 /* Print TX Rings */
374 if (!netif_msg_tx_done(adapter))
375 goto rx_ring_summary;
376
377 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
378
379 /* Transmit Descriptor Formats
380 *
381 * Advanced Transmit Descriptor
382 * +--------------------------------------------------------------+
383 * 0 | Buffer Address [63:0] |
384 * +--------------------------------------------------------------+
385 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
386 * +--------------------------------------------------------------+
387 * 63 46 45 40 39 38 36 35 32 31 24 15 0
388 */
389
390 for (n = 0; n < adapter->num_tx_queues; n++) {
391 tx_ring = adapter->tx_ring[n];
392 pr_info("------------------------------------\n");
393 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
394 pr_info("------------------------------------\n");
395 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
396
397 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
398 const char *next_desc;
399 struct igb_tx_buffer *buffer_info;
400 tx_desc = IGB_TX_DESC(tx_ring, i);
401 buffer_info = &tx_ring->tx_buffer_info[i];
402 u0 = (struct my_u0 *)tx_desc;
403 if (i == tx_ring->next_to_use &&
404 i == tx_ring->next_to_clean)
405 next_desc = " NTC/U";
406 else if (i == tx_ring->next_to_use)
407 next_desc = " NTU";
408 else if (i == tx_ring->next_to_clean)
409 next_desc = " NTC";
410 else
411 next_desc = "";
412
413 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
414 i, le64_to_cpu(u0->a),
415 le64_to_cpu(u0->b),
416 (u64)dma_unmap_addr(buffer_info, dma),
417 dma_unmap_len(buffer_info, len),
418 buffer_info->next_to_watch,
419 (u64)buffer_info->time_stamp,
420 buffer_info->skb, next_desc);
421
422 if (netif_msg_pktdata(adapter) && buffer_info->skb)
423 print_hex_dump(KERN_INFO, "",
424 DUMP_PREFIX_ADDRESS,
425 16, 1, buffer_info->skb->data,
426 dma_unmap_len(buffer_info, len),
427 true);
428 }
429 }
430
431 /* Print RX Rings Summary */
432rx_ring_summary:
433 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
434 pr_info("Queue [NTU] [NTC]\n");
435 for (n = 0; n < adapter->num_rx_queues; n++) {
436 rx_ring = adapter->rx_ring[n];
437 pr_info(" %5d %5X %5X\n",
438 n, rx_ring->next_to_use, rx_ring->next_to_clean);
439 }
440
441 /* Print RX Rings */
442 if (!netif_msg_rx_status(adapter))
443 goto exit;
444
445 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
446
447 /* Advanced Receive Descriptor (Read) Format
448 * 63 1 0
449 * +-----------------------------------------------------+
450 * 0 | Packet Buffer Address [63:1] |A0/NSE|
451 * +----------------------------------------------+------+
452 * 8 | Header Buffer Address [63:1] | DD |
453 * +-----------------------------------------------------+
454 *
455 *
456 * Advanced Receive Descriptor (Write-Back) Format
457 *
458 * 63 48 47 32 31 30 21 20 17 16 4 3 0
459 * +------------------------------------------------------+
460 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
461 * | Checksum Ident | | | | Type | Type |
462 * +------------------------------------------------------+
463 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
464 * +------------------------------------------------------+
465 * 63 48 47 32 31 20 19 0
466 */
467
468 for (n = 0; n < adapter->num_rx_queues; n++) {
469 rx_ring = adapter->rx_ring[n];
470 pr_info("------------------------------------\n");
471 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
472 pr_info("------------------------------------\n");
473 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
474 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
475
476 for (i = 0; i < rx_ring->count; i++) {
477 const char *next_desc;
478 struct igb_rx_buffer *buffer_info;
479 buffer_info = &rx_ring->rx_buffer_info[i];
480 rx_desc = IGB_RX_DESC(rx_ring, i);
481 u0 = (struct my_u0 *)rx_desc;
482 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
483
484 if (i == rx_ring->next_to_use)
485 next_desc = " NTU";
486 else if (i == rx_ring->next_to_clean)
487 next_desc = " NTC";
488 else
489 next_desc = "";
490
491 if (staterr & E1000_RXD_STAT_DD) {
492 /* Descriptor Done */
493 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
494 "RWB", i,
495 le64_to_cpu(u0->a),
496 le64_to_cpu(u0->b),
497 next_desc);
498 } else {
499 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
500 "R ", i,
501 le64_to_cpu(u0->a),
502 le64_to_cpu(u0->b),
503 (u64)buffer_info->dma,
504 next_desc);
505
506 if (netif_msg_pktdata(adapter) &&
507 buffer_info->dma && buffer_info->page) {
508 print_hex_dump(KERN_INFO, "",
509 DUMP_PREFIX_ADDRESS,
510 16, 1,
511 page_address(buffer_info->page) +
512 buffer_info->page_offset,
513 igb_rx_bufsz(rx_ring), true);
514 }
515 }
516 }
517 }
518
519exit:
520 return;
521}
522
523/**
524 * igb_get_i2c_data - Reads the I2C SDA data bit
525 * @data: opaque pointer to adapter struct
526 *
527 * Returns the I2C data bit value
528 **/
529static int igb_get_i2c_data(void *data)
530{
531 struct igb_adapter *adapter = (struct igb_adapter *)data;
532 struct e1000_hw *hw = &adapter->hw;
533 s32 i2cctl = rd32(E1000_I2CPARAMS);
534
535 return !!(i2cctl & E1000_I2C_DATA_IN);
536}
537
538/**
539 * igb_set_i2c_data - Sets the I2C data bit
540 * @data: pointer to hardware structure
541 * @state: I2C data value (0 or 1) to set
542 *
543 * Sets the I2C data bit
544 **/
545static void igb_set_i2c_data(void *data, int state)
546{
547 struct igb_adapter *adapter = (struct igb_adapter *)data;
548 struct e1000_hw *hw = &adapter->hw;
549 s32 i2cctl = rd32(E1000_I2CPARAMS);
550
551 if (state) {
552 i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
553 } else {
554 i2cctl &= ~E1000_I2C_DATA_OE_N;
555 i2cctl &= ~E1000_I2C_DATA_OUT;
556 }
557
558 wr32(E1000_I2CPARAMS, i2cctl);
559 wrfl();
560}
561
562/**
563 * igb_set_i2c_clk - Sets the I2C SCL clock
564 * @data: pointer to hardware structure
565 * @state: state to set clock
566 *
567 * Sets the I2C clock line to state
568 **/
569static void igb_set_i2c_clk(void *data, int state)
570{
571 struct igb_adapter *adapter = (struct igb_adapter *)data;
572 struct e1000_hw *hw = &adapter->hw;
573 s32 i2cctl = rd32(E1000_I2CPARAMS);
574
575 if (state) {
576 i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N;
577 } else {
578 i2cctl &= ~E1000_I2C_CLK_OUT;
579 i2cctl &= ~E1000_I2C_CLK_OE_N;
580 }
581 wr32(E1000_I2CPARAMS, i2cctl);
582 wrfl();
583}
584
585/**
586 * igb_get_i2c_clk - Gets the I2C SCL clock state
587 * @data: pointer to hardware structure
588 *
589 * Gets the I2C clock state
590 **/
591static int igb_get_i2c_clk(void *data)
592{
593 struct igb_adapter *adapter = (struct igb_adapter *)data;
594 struct e1000_hw *hw = &adapter->hw;
595 s32 i2cctl = rd32(E1000_I2CPARAMS);
596
597 return !!(i2cctl & E1000_I2C_CLK_IN);
598}
599
600static const struct i2c_algo_bit_data igb_i2c_algo = {
601 .setsda = igb_set_i2c_data,
602 .setscl = igb_set_i2c_clk,
603 .getsda = igb_get_i2c_data,
604 .getscl = igb_get_i2c_clk,
605 .udelay = 5,
606 .timeout = 20,
607};
608
609/**
610 * igb_get_hw_dev - return device
611 * @hw: pointer to hardware structure
612 *
613 * used by hardware layer to print debugging information
614 **/
615struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
616{
617 struct igb_adapter *adapter = hw->back;
618 return adapter->netdev;
619}
620
621static struct pci_driver igb_driver;
622
623/**
624 * igb_init_module - Driver Registration Routine
625 *
626 * igb_init_module is the first routine called when the driver is
627 * loaded. All it does is register with the PCI subsystem.
628 **/
629static int __init igb_init_module(void)
630{
631 int ret;
632
633 pr_info("%s\n", igb_driver_string);
634 pr_info("%s\n", igb_copyright);
635
636#ifdef CONFIG_IGB_DCA
637 dca_register_notify(&dca_notifier);
638#endif
639 ret = pci_register_driver(&igb_driver);
640#ifdef CONFIG_IGB_DCA
641 if (ret)
642 dca_unregister_notify(&dca_notifier);
643#endif
644 return ret;
645}
646
647module_init(igb_init_module);
648
649/**
650 * igb_exit_module - Driver Exit Cleanup Routine
651 *
652 * igb_exit_module is called just before the driver is removed
653 * from memory.
654 **/
655static void __exit igb_exit_module(void)
656{
657#ifdef CONFIG_IGB_DCA
658 dca_unregister_notify(&dca_notifier);
659#endif
660 pci_unregister_driver(&igb_driver);
661}
662
663module_exit(igb_exit_module);
664
665#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
666/**
667 * igb_cache_ring_register - Descriptor ring to register mapping
668 * @adapter: board private structure to initialize
669 *
670 * Once we know the feature-set enabled for the device, we'll cache
671 * the register offset the descriptor ring is assigned to.
672 **/
673static void igb_cache_ring_register(struct igb_adapter *adapter)
674{
675 int i = 0, j = 0;
676 u32 rbase_offset = adapter->vfs_allocated_count;
677
678 switch (adapter->hw.mac.type) {
679 case e1000_82576:
680 /* The queues are allocated for virtualization such that VF 0
681 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
682 * In order to avoid collision we start at the first free queue
683 * and continue consuming queues in the same sequence
684 */
685 if (adapter->vfs_allocated_count) {
686 for (; i < adapter->rss_queues; i++)
687 adapter->rx_ring[i]->reg_idx = rbase_offset +
688 Q_IDX_82576(i);
689 }
690 fallthrough;
691 case e1000_82575:
692 case e1000_82580:
693 case e1000_i350:
694 case e1000_i354:
695 case e1000_i210:
696 case e1000_i211:
697 default:
698 for (; i < adapter->num_rx_queues; i++)
699 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
700 for (; j < adapter->num_tx_queues; j++)
701 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
702 break;
703 }
704}
705
706u32 igb_rd32(struct e1000_hw *hw, u32 reg)
707{
708 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
709 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
710 u32 value = 0;
711
712 if (E1000_REMOVED(hw_addr))
713 return ~value;
714
715 value = readl(&hw_addr[reg]);
716
717 /* reads should not return all F's */
718 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
719 struct net_device *netdev = igb->netdev;
720 hw->hw_addr = NULL;
721 netdev_err(netdev, "PCIe link lost\n");
722 WARN(pci_device_is_present(igb->pdev),
723 "igb: Failed to read reg 0x%x!\n", reg);
724 }
725
726 return value;
727}
728
729/**
730 * igb_write_ivar - configure ivar for given MSI-X vector
731 * @hw: pointer to the HW structure
732 * @msix_vector: vector number we are allocating to a given ring
733 * @index: row index of IVAR register to write within IVAR table
734 * @offset: column offset of in IVAR, should be multiple of 8
735 *
736 * This function is intended to handle the writing of the IVAR register
737 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
738 * each containing an cause allocation for an Rx and Tx ring, and a
739 * variable number of rows depending on the number of queues supported.
740 **/
741static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
742 int index, int offset)
743{
744 u32 ivar = array_rd32(E1000_IVAR0, index);
745
746 /* clear any bits that are currently set */
747 ivar &= ~((u32)0xFF << offset);
748
749 /* write vector and valid bit */
750 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
751
752 array_wr32(E1000_IVAR0, index, ivar);
753}
754
755#define IGB_N0_QUEUE -1
756static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
757{
758 struct igb_adapter *adapter = q_vector->adapter;
759 struct e1000_hw *hw = &adapter->hw;
760 int rx_queue = IGB_N0_QUEUE;
761 int tx_queue = IGB_N0_QUEUE;
762 u32 msixbm = 0;
763
764 if (q_vector->rx.ring)
765 rx_queue = q_vector->rx.ring->reg_idx;
766 if (q_vector->tx.ring)
767 tx_queue = q_vector->tx.ring->reg_idx;
768
769 switch (hw->mac.type) {
770 case e1000_82575:
771 /* The 82575 assigns vectors using a bitmask, which matches the
772 * bitmask for the EICR/EIMS/EIMC registers. To assign one
773 * or more queues to a vector, we write the appropriate bits
774 * into the MSIXBM register for that vector.
775 */
776 if (rx_queue > IGB_N0_QUEUE)
777 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
778 if (tx_queue > IGB_N0_QUEUE)
779 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
780 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
781 msixbm |= E1000_EIMS_OTHER;
782 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
783 q_vector->eims_value = msixbm;
784 break;
785 case e1000_82576:
786 /* 82576 uses a table that essentially consists of 2 columns
787 * with 8 rows. The ordering is column-major so we use the
788 * lower 3 bits as the row index, and the 4th bit as the
789 * column offset.
790 */
791 if (rx_queue > IGB_N0_QUEUE)
792 igb_write_ivar(hw, msix_vector,
793 rx_queue & 0x7,
794 (rx_queue & 0x8) << 1);
795 if (tx_queue > IGB_N0_QUEUE)
796 igb_write_ivar(hw, msix_vector,
797 tx_queue & 0x7,
798 ((tx_queue & 0x8) << 1) + 8);
799 q_vector->eims_value = BIT(msix_vector);
800 break;
801 case e1000_82580:
802 case e1000_i350:
803 case e1000_i354:
804 case e1000_i210:
805 case e1000_i211:
806 /* On 82580 and newer adapters the scheme is similar to 82576
807 * however instead of ordering column-major we have things
808 * ordered row-major. So we traverse the table by using
809 * bit 0 as the column offset, and the remaining bits as the
810 * row index.
811 */
812 if (rx_queue > IGB_N0_QUEUE)
813 igb_write_ivar(hw, msix_vector,
814 rx_queue >> 1,
815 (rx_queue & 0x1) << 4);
816 if (tx_queue > IGB_N0_QUEUE)
817 igb_write_ivar(hw, msix_vector,
818 tx_queue >> 1,
819 ((tx_queue & 0x1) << 4) + 8);
820 q_vector->eims_value = BIT(msix_vector);
821 break;
822 default:
823 BUG();
824 break;
825 }
826
827 /* add q_vector eims value to global eims_enable_mask */
828 adapter->eims_enable_mask |= q_vector->eims_value;
829
830 /* configure q_vector to set itr on first interrupt */
831 q_vector->set_itr = 1;
832}
833
834/**
835 * igb_configure_msix - Configure MSI-X hardware
836 * @adapter: board private structure to initialize
837 *
838 * igb_configure_msix sets up the hardware to properly
839 * generate MSI-X interrupts.
840 **/
841static void igb_configure_msix(struct igb_adapter *adapter)
842{
843 u32 tmp;
844 int i, vector = 0;
845 struct e1000_hw *hw = &adapter->hw;
846
847 adapter->eims_enable_mask = 0;
848
849 /* set vector for other causes, i.e. link changes */
850 switch (hw->mac.type) {
851 case e1000_82575:
852 tmp = rd32(E1000_CTRL_EXT);
853 /* enable MSI-X PBA support*/
854 tmp |= E1000_CTRL_EXT_PBA_CLR;
855
856 /* Auto-Mask interrupts upon ICR read. */
857 tmp |= E1000_CTRL_EXT_EIAME;
858 tmp |= E1000_CTRL_EXT_IRCA;
859
860 wr32(E1000_CTRL_EXT, tmp);
861
862 /* enable msix_other interrupt */
863 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
864 adapter->eims_other = E1000_EIMS_OTHER;
865
866 break;
867
868 case e1000_82576:
869 case e1000_82580:
870 case e1000_i350:
871 case e1000_i354:
872 case e1000_i210:
873 case e1000_i211:
874 /* Turn on MSI-X capability first, or our settings
875 * won't stick. And it will take days to debug.
876 */
877 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
878 E1000_GPIE_PBA | E1000_GPIE_EIAME |
879 E1000_GPIE_NSICR);
880
881 /* enable msix_other interrupt */
882 adapter->eims_other = BIT(vector);
883 tmp = (vector++ | E1000_IVAR_VALID) << 8;
884
885 wr32(E1000_IVAR_MISC, tmp);
886 break;
887 default:
888 /* do nothing, since nothing else supports MSI-X */
889 break;
890 } /* switch (hw->mac.type) */
891
892 adapter->eims_enable_mask |= adapter->eims_other;
893
894 for (i = 0; i < adapter->num_q_vectors; i++)
895 igb_assign_vector(adapter->q_vector[i], vector++);
896
897 wrfl();
898}
899
900/**
901 * igb_request_msix - Initialize MSI-X interrupts
902 * @adapter: board private structure to initialize
903 *
904 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
905 * kernel.
906 **/
907static int igb_request_msix(struct igb_adapter *adapter)
908{
909 unsigned int num_q_vectors = adapter->num_q_vectors;
910 struct net_device *netdev = adapter->netdev;
911 int i, err = 0, vector = 0, free_vector = 0;
912
913 err = request_irq(adapter->msix_entries[vector].vector,
914 igb_msix_other, 0, netdev->name, adapter);
915 if (err)
916 goto err_out;
917
918 if (num_q_vectors > MAX_Q_VECTORS) {
919 num_q_vectors = MAX_Q_VECTORS;
920 dev_warn(&adapter->pdev->dev,
921 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
922 adapter->num_q_vectors, MAX_Q_VECTORS);
923 }
924 for (i = 0; i < num_q_vectors; i++) {
925 struct igb_q_vector *q_vector = adapter->q_vector[i];
926
927 vector++;
928
929 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
930
931 if (q_vector->rx.ring && q_vector->tx.ring)
932 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
933 q_vector->rx.ring->queue_index);
934 else if (q_vector->tx.ring)
935 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
936 q_vector->tx.ring->queue_index);
937 else if (q_vector->rx.ring)
938 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
939 q_vector->rx.ring->queue_index);
940 else
941 sprintf(q_vector->name, "%s-unused", netdev->name);
942
943 err = request_irq(adapter->msix_entries[vector].vector,
944 igb_msix_ring, 0, q_vector->name,
945 q_vector);
946 if (err)
947 goto err_free;
948 }
949
950 igb_configure_msix(adapter);
951 return 0;
952
953err_free:
954 /* free already assigned IRQs */
955 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
956
957 vector--;
958 for (i = 0; i < vector; i++) {
959 free_irq(adapter->msix_entries[free_vector++].vector,
960 adapter->q_vector[i]);
961 }
962err_out:
963 return err;
964}
965
966/**
967 * igb_free_q_vector - Free memory allocated for specific interrupt vector
968 * @adapter: board private structure to initialize
969 * @v_idx: Index of vector to be freed
970 *
971 * This function frees the memory allocated to the q_vector.
972 **/
973static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
974{
975 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
976
977 adapter->q_vector[v_idx] = NULL;
978
979 /* igb_get_stats64() might access the rings on this vector,
980 * we must wait a grace period before freeing it.
981 */
982 if (q_vector)
983 kfree_rcu(q_vector, rcu);
984}
985
986/**
987 * igb_reset_q_vector - Reset config for interrupt vector
988 * @adapter: board private structure to initialize
989 * @v_idx: Index of vector to be reset
990 *
991 * If NAPI is enabled it will delete any references to the
992 * NAPI struct. This is preparation for igb_free_q_vector.
993 **/
994static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
995{
996 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
997
998 /* Coming from igb_set_interrupt_capability, the vectors are not yet
999 * allocated. So, q_vector is NULL so we should stop here.
1000 */
1001 if (!q_vector)
1002 return;
1003
1004 if (q_vector->tx.ring)
1005 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1006
1007 if (q_vector->rx.ring)
1008 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1009
1010 netif_napi_del(&q_vector->napi);
1011
1012}
1013
1014static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1015{
1016 int v_idx = adapter->num_q_vectors;
1017
1018 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1019 pci_disable_msix(adapter->pdev);
1020 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1021 pci_disable_msi(adapter->pdev);
1022
1023 while (v_idx--)
1024 igb_reset_q_vector(adapter, v_idx);
1025}
1026
1027/**
1028 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1029 * @adapter: board private structure to initialize
1030 *
1031 * This function frees the memory allocated to the q_vectors. In addition if
1032 * NAPI is enabled it will delete any references to the NAPI struct prior
1033 * to freeing the q_vector.
1034 **/
1035static void igb_free_q_vectors(struct igb_adapter *adapter)
1036{
1037 int v_idx = adapter->num_q_vectors;
1038
1039 adapter->num_tx_queues = 0;
1040 adapter->num_rx_queues = 0;
1041 adapter->num_q_vectors = 0;
1042
1043 while (v_idx--) {
1044 igb_reset_q_vector(adapter, v_idx);
1045 igb_free_q_vector(adapter, v_idx);
1046 }
1047}
1048
1049/**
1050 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1051 * @adapter: board private structure to initialize
1052 *
1053 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1054 * MSI-X interrupts allocated.
1055 */
1056static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1057{
1058 igb_free_q_vectors(adapter);
1059 igb_reset_interrupt_capability(adapter);
1060}
1061
1062/**
1063 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1064 * @adapter: board private structure to initialize
1065 * @msix: boolean value of MSIX capability
1066 *
1067 * Attempt to configure interrupts using the best available
1068 * capabilities of the hardware and kernel.
1069 **/
1070static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1071{
1072 int err;
1073 int numvecs, i;
1074
1075 if (!msix)
1076 goto msi_only;
1077 adapter->flags |= IGB_FLAG_HAS_MSIX;
1078
1079 /* Number of supported queues. */
1080 adapter->num_rx_queues = adapter->rss_queues;
1081 if (adapter->vfs_allocated_count)
1082 adapter->num_tx_queues = 1;
1083 else
1084 adapter->num_tx_queues = adapter->rss_queues;
1085
1086 /* start with one vector for every Rx queue */
1087 numvecs = adapter->num_rx_queues;
1088
1089 /* if Tx handler is separate add 1 for every Tx queue */
1090 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1091 numvecs += adapter->num_tx_queues;
1092
1093 /* store the number of vectors reserved for queues */
1094 adapter->num_q_vectors = numvecs;
1095
1096 /* add 1 vector for link status interrupts */
1097 numvecs++;
1098 for (i = 0; i < numvecs; i++)
1099 adapter->msix_entries[i].entry = i;
1100
1101 err = pci_enable_msix_range(adapter->pdev,
1102 adapter->msix_entries,
1103 numvecs,
1104 numvecs);
1105 if (err > 0)
1106 return;
1107
1108 igb_reset_interrupt_capability(adapter);
1109
1110 /* If we can't do MSI-X, try MSI */
1111msi_only:
1112 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1113#ifdef CONFIG_PCI_IOV
1114 /* disable SR-IOV for non MSI-X configurations */
1115 if (adapter->vf_data) {
1116 struct e1000_hw *hw = &adapter->hw;
1117 /* disable iov and allow time for transactions to clear */
1118 pci_disable_sriov(adapter->pdev);
1119 msleep(500);
1120
1121 kfree(adapter->vf_mac_list);
1122 adapter->vf_mac_list = NULL;
1123 kfree(adapter->vf_data);
1124 adapter->vf_data = NULL;
1125 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1126 wrfl();
1127 msleep(100);
1128 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1129 }
1130#endif
1131 adapter->vfs_allocated_count = 0;
1132 adapter->rss_queues = 1;
1133 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1134 adapter->num_rx_queues = 1;
1135 adapter->num_tx_queues = 1;
1136 adapter->num_q_vectors = 1;
1137 if (!pci_enable_msi(adapter->pdev))
1138 adapter->flags |= IGB_FLAG_HAS_MSI;
1139}
1140
1141static void igb_add_ring(struct igb_ring *ring,
1142 struct igb_ring_container *head)
1143{
1144 head->ring = ring;
1145 head->count++;
1146}
1147
1148/**
1149 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1150 * @adapter: board private structure to initialize
1151 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1152 * @v_idx: index of vector in adapter struct
1153 * @txr_count: total number of Tx rings to allocate
1154 * @txr_idx: index of first Tx ring to allocate
1155 * @rxr_count: total number of Rx rings to allocate
1156 * @rxr_idx: index of first Rx ring to allocate
1157 *
1158 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1159 **/
1160static int igb_alloc_q_vector(struct igb_adapter *adapter,
1161 int v_count, int v_idx,
1162 int txr_count, int txr_idx,
1163 int rxr_count, int rxr_idx)
1164{
1165 struct igb_q_vector *q_vector;
1166 struct igb_ring *ring;
1167 int ring_count;
1168 size_t size;
1169
1170 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1171 if (txr_count > 1 || rxr_count > 1)
1172 return -ENOMEM;
1173
1174 ring_count = txr_count + rxr_count;
1175 size = kmalloc_size_roundup(struct_size(q_vector, ring, ring_count));
1176
1177 /* allocate q_vector and rings */
1178 q_vector = adapter->q_vector[v_idx];
1179 if (!q_vector) {
1180 q_vector = kzalloc(size, GFP_KERNEL);
1181 } else if (size > ksize(q_vector)) {
1182 struct igb_q_vector *new_q_vector;
1183
1184 new_q_vector = kzalloc(size, GFP_KERNEL);
1185 if (new_q_vector)
1186 kfree_rcu(q_vector, rcu);
1187 q_vector = new_q_vector;
1188 } else {
1189 memset(q_vector, 0, size);
1190 }
1191 if (!q_vector)
1192 return -ENOMEM;
1193
1194 /* initialize NAPI */
1195 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
1196
1197 /* tie q_vector and adapter together */
1198 adapter->q_vector[v_idx] = q_vector;
1199 q_vector->adapter = adapter;
1200
1201 /* initialize work limits */
1202 q_vector->tx.work_limit = adapter->tx_work_limit;
1203
1204 /* initialize ITR configuration */
1205 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1206 q_vector->itr_val = IGB_START_ITR;
1207
1208 /* initialize pointer to rings */
1209 ring = q_vector->ring;
1210
1211 /* initialize ITR */
1212 if (rxr_count) {
1213 /* rx or rx/tx vector */
1214 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1215 q_vector->itr_val = adapter->rx_itr_setting;
1216 } else {
1217 /* tx only vector */
1218 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1219 q_vector->itr_val = adapter->tx_itr_setting;
1220 }
1221
1222 if (txr_count) {
1223 /* assign generic ring traits */
1224 ring->dev = &adapter->pdev->dev;
1225 ring->netdev = adapter->netdev;
1226
1227 /* configure backlink on ring */
1228 ring->q_vector = q_vector;
1229
1230 /* update q_vector Tx values */
1231 igb_add_ring(ring, &q_vector->tx);
1232
1233 /* For 82575, context index must be unique per ring. */
1234 if (adapter->hw.mac.type == e1000_82575)
1235 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1236
1237 /* apply Tx specific ring traits */
1238 ring->count = adapter->tx_ring_count;
1239 ring->queue_index = txr_idx;
1240
1241 ring->cbs_enable = false;
1242 ring->idleslope = 0;
1243 ring->sendslope = 0;
1244 ring->hicredit = 0;
1245 ring->locredit = 0;
1246
1247 u64_stats_init(&ring->tx_syncp);
1248 u64_stats_init(&ring->tx_syncp2);
1249
1250 /* assign ring to adapter */
1251 adapter->tx_ring[txr_idx] = ring;
1252
1253 /* push pointer to next ring */
1254 ring++;
1255 }
1256
1257 if (rxr_count) {
1258 /* assign generic ring traits */
1259 ring->dev = &adapter->pdev->dev;
1260 ring->netdev = adapter->netdev;
1261
1262 /* configure backlink on ring */
1263 ring->q_vector = q_vector;
1264
1265 /* update q_vector Rx values */
1266 igb_add_ring(ring, &q_vector->rx);
1267
1268 /* set flag indicating ring supports SCTP checksum offload */
1269 if (adapter->hw.mac.type >= e1000_82576)
1270 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1271
1272 /* On i350, i354, i210, and i211, loopback VLAN packets
1273 * have the tag byte-swapped.
1274 */
1275 if (adapter->hw.mac.type >= e1000_i350)
1276 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1277
1278 /* apply Rx specific ring traits */
1279 ring->count = adapter->rx_ring_count;
1280 ring->queue_index = rxr_idx;
1281
1282 u64_stats_init(&ring->rx_syncp);
1283
1284 /* assign ring to adapter */
1285 adapter->rx_ring[rxr_idx] = ring;
1286 }
1287
1288 return 0;
1289}
1290
1291
1292/**
1293 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1294 * @adapter: board private structure to initialize
1295 *
1296 * We allocate one q_vector per queue interrupt. If allocation fails we
1297 * return -ENOMEM.
1298 **/
1299static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1300{
1301 int q_vectors = adapter->num_q_vectors;
1302 int rxr_remaining = adapter->num_rx_queues;
1303 int txr_remaining = adapter->num_tx_queues;
1304 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1305 int err;
1306
1307 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1308 for (; rxr_remaining; v_idx++) {
1309 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1310 0, 0, 1, rxr_idx);
1311
1312 if (err)
1313 goto err_out;
1314
1315 /* update counts and index */
1316 rxr_remaining--;
1317 rxr_idx++;
1318 }
1319 }
1320
1321 for (; v_idx < q_vectors; v_idx++) {
1322 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1323 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1324
1325 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1326 tqpv, txr_idx, rqpv, rxr_idx);
1327
1328 if (err)
1329 goto err_out;
1330
1331 /* update counts and index */
1332 rxr_remaining -= rqpv;
1333 txr_remaining -= tqpv;
1334 rxr_idx++;
1335 txr_idx++;
1336 }
1337
1338 return 0;
1339
1340err_out:
1341 adapter->num_tx_queues = 0;
1342 adapter->num_rx_queues = 0;
1343 adapter->num_q_vectors = 0;
1344
1345 while (v_idx--)
1346 igb_free_q_vector(adapter, v_idx);
1347
1348 return -ENOMEM;
1349}
1350
1351/**
1352 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1353 * @adapter: board private structure to initialize
1354 * @msix: boolean value of MSIX capability
1355 *
1356 * This function initializes the interrupts and allocates all of the queues.
1357 **/
1358static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1359{
1360 struct pci_dev *pdev = adapter->pdev;
1361 int err;
1362
1363 igb_set_interrupt_capability(adapter, msix);
1364
1365 err = igb_alloc_q_vectors(adapter);
1366 if (err) {
1367 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1368 goto err_alloc_q_vectors;
1369 }
1370
1371 igb_cache_ring_register(adapter);
1372
1373 return 0;
1374
1375err_alloc_q_vectors:
1376 igb_reset_interrupt_capability(adapter);
1377 return err;
1378}
1379
1380/**
1381 * igb_request_irq - initialize interrupts
1382 * @adapter: board private structure to initialize
1383 *
1384 * Attempts to configure interrupts using the best available
1385 * capabilities of the hardware and kernel.
1386 **/
1387static int igb_request_irq(struct igb_adapter *adapter)
1388{
1389 struct net_device *netdev = adapter->netdev;
1390 struct pci_dev *pdev = adapter->pdev;
1391 int err = 0;
1392
1393 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1394 err = igb_request_msix(adapter);
1395 if (!err)
1396 goto request_done;
1397 /* fall back to MSI */
1398 igb_free_all_tx_resources(adapter);
1399 igb_free_all_rx_resources(adapter);
1400
1401 igb_clear_interrupt_scheme(adapter);
1402 err = igb_init_interrupt_scheme(adapter, false);
1403 if (err)
1404 goto request_done;
1405
1406 igb_setup_all_tx_resources(adapter);
1407 igb_setup_all_rx_resources(adapter);
1408 igb_configure(adapter);
1409 }
1410
1411 igb_assign_vector(adapter->q_vector[0], 0);
1412
1413 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1414 err = request_irq(pdev->irq, igb_intr_msi, 0,
1415 netdev->name, adapter);
1416 if (!err)
1417 goto request_done;
1418
1419 /* fall back to legacy interrupts */
1420 igb_reset_interrupt_capability(adapter);
1421 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1422 }
1423
1424 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1425 netdev->name, adapter);
1426
1427 if (err)
1428 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1429 err);
1430
1431request_done:
1432 return err;
1433}
1434
1435static void igb_free_irq(struct igb_adapter *adapter)
1436{
1437 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1438 int vector = 0, i;
1439
1440 free_irq(adapter->msix_entries[vector++].vector, adapter);
1441
1442 for (i = 0; i < adapter->num_q_vectors; i++)
1443 free_irq(adapter->msix_entries[vector++].vector,
1444 adapter->q_vector[i]);
1445 } else {
1446 free_irq(adapter->pdev->irq, adapter);
1447 }
1448}
1449
1450/**
1451 * igb_irq_disable - Mask off interrupt generation on the NIC
1452 * @adapter: board private structure
1453 **/
1454static void igb_irq_disable(struct igb_adapter *adapter)
1455{
1456 struct e1000_hw *hw = &adapter->hw;
1457
1458 /* we need to be careful when disabling interrupts. The VFs are also
1459 * mapped into these registers and so clearing the bits can cause
1460 * issues on the VF drivers so we only need to clear what we set
1461 */
1462 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1463 u32 regval = rd32(E1000_EIAM);
1464
1465 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1466 wr32(E1000_EIMC, adapter->eims_enable_mask);
1467 regval = rd32(E1000_EIAC);
1468 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1469 }
1470
1471 wr32(E1000_IAM, 0);
1472 wr32(E1000_IMC, ~0);
1473 wrfl();
1474 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1475 int i;
1476
1477 for (i = 0; i < adapter->num_q_vectors; i++)
1478 synchronize_irq(adapter->msix_entries[i].vector);
1479 } else {
1480 synchronize_irq(adapter->pdev->irq);
1481 }
1482}
1483
1484/**
1485 * igb_irq_enable - Enable default interrupt generation settings
1486 * @adapter: board private structure
1487 **/
1488static void igb_irq_enable(struct igb_adapter *adapter)
1489{
1490 struct e1000_hw *hw = &adapter->hw;
1491
1492 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1493 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1494 u32 regval = rd32(E1000_EIAC);
1495
1496 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1497 regval = rd32(E1000_EIAM);
1498 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1499 wr32(E1000_EIMS, adapter->eims_enable_mask);
1500 if (adapter->vfs_allocated_count) {
1501 wr32(E1000_MBVFIMR, 0xFF);
1502 ims |= E1000_IMS_VMMB;
1503 }
1504 wr32(E1000_IMS, ims);
1505 } else {
1506 wr32(E1000_IMS, IMS_ENABLE_MASK |
1507 E1000_IMS_DRSTA);
1508 wr32(E1000_IAM, IMS_ENABLE_MASK |
1509 E1000_IMS_DRSTA);
1510 }
1511}
1512
1513static void igb_update_mng_vlan(struct igb_adapter *adapter)
1514{
1515 struct e1000_hw *hw = &adapter->hw;
1516 u16 pf_id = adapter->vfs_allocated_count;
1517 u16 vid = adapter->hw.mng_cookie.vlan_id;
1518 u16 old_vid = adapter->mng_vlan_id;
1519
1520 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1521 /* add VID to filter table */
1522 igb_vfta_set(hw, vid, pf_id, true, true);
1523 adapter->mng_vlan_id = vid;
1524 } else {
1525 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1526 }
1527
1528 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1529 (vid != old_vid) &&
1530 !test_bit(old_vid, adapter->active_vlans)) {
1531 /* remove VID from filter table */
1532 igb_vfta_set(hw, vid, pf_id, false, true);
1533 }
1534}
1535
1536/**
1537 * igb_release_hw_control - release control of the h/w to f/w
1538 * @adapter: address of board private structure
1539 *
1540 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1541 * For ASF and Pass Through versions of f/w this means that the
1542 * driver is no longer loaded.
1543 **/
1544static void igb_release_hw_control(struct igb_adapter *adapter)
1545{
1546 struct e1000_hw *hw = &adapter->hw;
1547 u32 ctrl_ext;
1548
1549 /* Let firmware take over control of h/w */
1550 ctrl_ext = rd32(E1000_CTRL_EXT);
1551 wr32(E1000_CTRL_EXT,
1552 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1553}
1554
1555/**
1556 * igb_get_hw_control - get control of the h/w from f/w
1557 * @adapter: address of board private structure
1558 *
1559 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1560 * For ASF and Pass Through versions of f/w this means that
1561 * the driver is loaded.
1562 **/
1563static void igb_get_hw_control(struct igb_adapter *adapter)
1564{
1565 struct e1000_hw *hw = &adapter->hw;
1566 u32 ctrl_ext;
1567
1568 /* Let firmware know the driver has taken over */
1569 ctrl_ext = rd32(E1000_CTRL_EXT);
1570 wr32(E1000_CTRL_EXT,
1571 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1572}
1573
1574static void enable_fqtss(struct igb_adapter *adapter, bool enable)
1575{
1576 struct net_device *netdev = adapter->netdev;
1577 struct e1000_hw *hw = &adapter->hw;
1578
1579 WARN_ON(hw->mac.type != e1000_i210);
1580
1581 if (enable)
1582 adapter->flags |= IGB_FLAG_FQTSS;
1583 else
1584 adapter->flags &= ~IGB_FLAG_FQTSS;
1585
1586 if (netif_running(netdev))
1587 schedule_work(&adapter->reset_task);
1588}
1589
1590static bool is_fqtss_enabled(struct igb_adapter *adapter)
1591{
1592 return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
1593}
1594
1595static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
1596 enum tx_queue_prio prio)
1597{
1598 u32 val;
1599
1600 WARN_ON(hw->mac.type != e1000_i210);
1601 WARN_ON(queue < 0 || queue > 4);
1602
1603 val = rd32(E1000_I210_TXDCTL(queue));
1604
1605 if (prio == TX_QUEUE_PRIO_HIGH)
1606 val |= E1000_TXDCTL_PRIORITY;
1607 else
1608 val &= ~E1000_TXDCTL_PRIORITY;
1609
1610 wr32(E1000_I210_TXDCTL(queue), val);
1611}
1612
1613static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
1614{
1615 u32 val;
1616
1617 WARN_ON(hw->mac.type != e1000_i210);
1618 WARN_ON(queue < 0 || queue > 1);
1619
1620 val = rd32(E1000_I210_TQAVCC(queue));
1621
1622 if (mode == QUEUE_MODE_STREAM_RESERVATION)
1623 val |= E1000_TQAVCC_QUEUEMODE;
1624 else
1625 val &= ~E1000_TQAVCC_QUEUEMODE;
1626
1627 wr32(E1000_I210_TQAVCC(queue), val);
1628}
1629
1630static bool is_any_cbs_enabled(struct igb_adapter *adapter)
1631{
1632 int i;
1633
1634 for (i = 0; i < adapter->num_tx_queues; i++) {
1635 if (adapter->tx_ring[i]->cbs_enable)
1636 return true;
1637 }
1638
1639 return false;
1640}
1641
1642static bool is_any_txtime_enabled(struct igb_adapter *adapter)
1643{
1644 int i;
1645
1646 for (i = 0; i < adapter->num_tx_queues; i++) {
1647 if (adapter->tx_ring[i]->launchtime_enable)
1648 return true;
1649 }
1650
1651 return false;
1652}
1653
1654/**
1655 * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
1656 * @adapter: pointer to adapter struct
1657 * @queue: queue number
1658 *
1659 * Configure CBS and Launchtime for a given hardware queue.
1660 * Parameters are retrieved from the correct Tx ring, so
1661 * igb_save_cbs_params() and igb_save_txtime_params() should be used
1662 * for setting those correctly prior to this function being called.
1663 **/
1664static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
1665{
1666 struct net_device *netdev = adapter->netdev;
1667 struct e1000_hw *hw = &adapter->hw;
1668 struct igb_ring *ring;
1669 u32 tqavcc, tqavctrl;
1670 u16 value;
1671
1672 WARN_ON(hw->mac.type != e1000_i210);
1673 WARN_ON(queue < 0 || queue > 1);
1674 ring = adapter->tx_ring[queue];
1675
1676 /* If any of the Qav features is enabled, configure queues as SR and
1677 * with HIGH PRIO. If none is, then configure them with LOW PRIO and
1678 * as SP.
1679 */
1680 if (ring->cbs_enable || ring->launchtime_enable) {
1681 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
1682 set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
1683 } else {
1684 set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
1685 set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
1686 }
1687
1688 /* If CBS is enabled, set DataTranARB and config its parameters. */
1689 if (ring->cbs_enable || queue == 0) {
1690 /* i210 does not allow the queue 0 to be in the Strict
1691 * Priority mode while the Qav mode is enabled, so,
1692 * instead of disabling strict priority mode, we give
1693 * queue 0 the maximum of credits possible.
1694 *
1695 * See section 8.12.19 of the i210 datasheet, "Note:
1696 * Queue0 QueueMode must be set to 1b when
1697 * TransmitMode is set to Qav."
1698 */
1699 if (queue == 0 && !ring->cbs_enable) {
1700 /* max "linkspeed" idleslope in kbps */
1701 ring->idleslope = 1000000;
1702 ring->hicredit = ETH_FRAME_LEN;
1703 }
1704
1705 /* Always set data transfer arbitration to credit-based
1706 * shaper algorithm on TQAVCTRL if CBS is enabled for any of
1707 * the queues.
1708 */
1709 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1710 tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
1711 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1712
1713 /* According to i210 datasheet section 7.2.7.7, we should set
1714 * the 'idleSlope' field from TQAVCC register following the
1715 * equation:
1716 *
1717 * For 100 Mbps link speed:
1718 *
1719 * value = BW * 0x7735 * 0.2 (E1)
1720 *
1721 * For 1000Mbps link speed:
1722 *
1723 * value = BW * 0x7735 * 2 (E2)
1724 *
1725 * E1 and E2 can be merged into one equation as shown below.
1726 * Note that 'link-speed' is in Mbps.
1727 *
1728 * value = BW * 0x7735 * 2 * link-speed
1729 * -------------- (E3)
1730 * 1000
1731 *
1732 * 'BW' is the percentage bandwidth out of full link speed
1733 * which can be found with the following equation. Note that
1734 * idleSlope here is the parameter from this function which
1735 * is in kbps.
1736 *
1737 * BW = idleSlope
1738 * ----------------- (E4)
1739 * link-speed * 1000
1740 *
1741 * That said, we can come up with a generic equation to
1742 * calculate the value we should set it TQAVCC register by
1743 * replacing 'BW' in E3 by E4. The resulting equation is:
1744 *
1745 * value = idleSlope * 0x7735 * 2 * link-speed
1746 * ----------------- -------------- (E5)
1747 * link-speed * 1000 1000
1748 *
1749 * 'link-speed' is present in both sides of the fraction so
1750 * it is canceled out. The final equation is the following:
1751 *
1752 * value = idleSlope * 61034
1753 * ----------------- (E6)
1754 * 1000000
1755 *
1756 * NOTE: For i210, given the above, we can see that idleslope
1757 * is represented in 16.38431 kbps units by the value at
1758 * the TQAVCC register (1Gbps / 61034), which reduces
1759 * the granularity for idleslope increments.
1760 * For instance, if you want to configure a 2576kbps
1761 * idleslope, the value to be written on the register
1762 * would have to be 157.23. If rounded down, you end
1763 * up with less bandwidth available than originally
1764 * required (~2572 kbps). If rounded up, you end up
1765 * with a higher bandwidth (~2589 kbps). Below the
1766 * approach we take is to always round up the
1767 * calculated value, so the resulting bandwidth might
1768 * be slightly higher for some configurations.
1769 */
1770 value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
1771
1772 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1773 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1774 tqavcc |= value;
1775 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1776
1777 wr32(E1000_I210_TQAVHC(queue),
1778 0x80000000 + ring->hicredit * 0x7735);
1779 } else {
1780
1781 /* Set idleSlope to zero. */
1782 tqavcc = rd32(E1000_I210_TQAVCC(queue));
1783 tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
1784 wr32(E1000_I210_TQAVCC(queue), tqavcc);
1785
1786 /* Set hiCredit to zero. */
1787 wr32(E1000_I210_TQAVHC(queue), 0);
1788
1789 /* If CBS is not enabled for any queues anymore, then return to
1790 * the default state of Data Transmission Arbitration on
1791 * TQAVCTRL.
1792 */
1793 if (!is_any_cbs_enabled(adapter)) {
1794 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1795 tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
1796 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1797 }
1798 }
1799
1800 /* If LaunchTime is enabled, set DataTranTIM. */
1801 if (ring->launchtime_enable) {
1802 /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
1803 * for any of the SR queues, and configure fetchtime delta.
1804 * XXX NOTE:
1805 * - LaunchTime will be enabled for all SR queues.
1806 * - A fixed offset can be added relative to the launch
1807 * time of all packets if configured at reg LAUNCH_OS0.
1808 * We are keeping it as 0 for now (default value).
1809 */
1810 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1811 tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
1812 E1000_TQAVCTRL_FETCHTIME_DELTA;
1813 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1814 } else {
1815 /* If Launchtime is not enabled for any SR queues anymore,
1816 * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
1817 * effectively disabling Launchtime.
1818 */
1819 if (!is_any_txtime_enabled(adapter)) {
1820 tqavctrl = rd32(E1000_I210_TQAVCTRL);
1821 tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
1822 tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
1823 wr32(E1000_I210_TQAVCTRL, tqavctrl);
1824 }
1825 }
1826
1827 /* XXX: In i210 controller the sendSlope and loCredit parameters from
1828 * CBS are not configurable by software so we don't do any 'controller
1829 * configuration' in respect to these parameters.
1830 */
1831
1832 netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
1833 ring->cbs_enable ? "enabled" : "disabled",
1834 ring->launchtime_enable ? "enabled" : "disabled",
1835 queue,
1836 ring->idleslope, ring->sendslope,
1837 ring->hicredit, ring->locredit);
1838}
1839
1840static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
1841 bool enable)
1842{
1843 struct igb_ring *ring;
1844
1845 if (queue < 0 || queue > adapter->num_tx_queues)
1846 return -EINVAL;
1847
1848 ring = adapter->tx_ring[queue];
1849 ring->launchtime_enable = enable;
1850
1851 return 0;
1852}
1853
1854static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
1855 bool enable, int idleslope, int sendslope,
1856 int hicredit, int locredit)
1857{
1858 struct igb_ring *ring;
1859
1860 if (queue < 0 || queue > adapter->num_tx_queues)
1861 return -EINVAL;
1862
1863 ring = adapter->tx_ring[queue];
1864
1865 ring->cbs_enable = enable;
1866 ring->idleslope = idleslope;
1867 ring->sendslope = sendslope;
1868 ring->hicredit = hicredit;
1869 ring->locredit = locredit;
1870
1871 return 0;
1872}
1873
1874/**
1875 * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
1876 * @adapter: pointer to adapter struct
1877 *
1878 * Configure TQAVCTRL register switching the controller's Tx mode
1879 * if FQTSS mode is enabled or disabled. Additionally, will issue
1880 * a call to igb_config_tx_modes() per queue so any previously saved
1881 * Tx parameters are applied.
1882 **/
1883static void igb_setup_tx_mode(struct igb_adapter *adapter)
1884{
1885 struct net_device *netdev = adapter->netdev;
1886 struct e1000_hw *hw = &adapter->hw;
1887 u32 val;
1888
1889 /* Only i210 controller supports changing the transmission mode. */
1890 if (hw->mac.type != e1000_i210)
1891 return;
1892
1893 if (is_fqtss_enabled(adapter)) {
1894 int i, max_queue;
1895
1896 /* Configure TQAVCTRL register: set transmit mode to 'Qav',
1897 * set data fetch arbitration to 'round robin', set SP_WAIT_SR
1898 * so SP queues wait for SR ones.
1899 */
1900 val = rd32(E1000_I210_TQAVCTRL);
1901 val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
1902 val &= ~E1000_TQAVCTRL_DATAFETCHARB;
1903 wr32(E1000_I210_TQAVCTRL, val);
1904
1905 /* Configure Tx and Rx packet buffers sizes as described in
1906 * i210 datasheet section 7.2.7.7.
1907 */
1908 val = rd32(E1000_TXPBS);
1909 val &= ~I210_TXPBSIZE_MASK;
1910 val |= I210_TXPBSIZE_PB0_6KB | I210_TXPBSIZE_PB1_6KB |
1911 I210_TXPBSIZE_PB2_6KB | I210_TXPBSIZE_PB3_6KB;
1912 wr32(E1000_TXPBS, val);
1913
1914 val = rd32(E1000_RXPBS);
1915 val &= ~I210_RXPBSIZE_MASK;
1916 val |= I210_RXPBSIZE_PB_30KB;
1917 wr32(E1000_RXPBS, val);
1918
1919 /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
1920 * register should not exceed the buffer size programmed in
1921 * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
1922 * so according to the datasheet we should set MAX_TPKT_SIZE to
1923 * 4kB / 64.
1924 *
1925 * However, when we do so, no frame from queue 2 and 3 are
1926 * transmitted. It seems the MAX_TPKT_SIZE should not be great
1927 * or _equal_ to the buffer size programmed in TXPBS. For this
1928 * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
1929 */
1930 val = (4096 - 1) / 64;
1931 wr32(E1000_I210_DTXMXPKTSZ, val);
1932
1933 /* Since FQTSS mode is enabled, apply any CBS configuration
1934 * previously set. If no previous CBS configuration has been
1935 * done, then the initial configuration is applied, which means
1936 * CBS is disabled.
1937 */
1938 max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
1939 adapter->num_tx_queues : I210_SR_QUEUES_NUM;
1940
1941 for (i = 0; i < max_queue; i++) {
1942 igb_config_tx_modes(adapter, i);
1943 }
1944 } else {
1945 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
1946 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
1947 wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
1948
1949 val = rd32(E1000_I210_TQAVCTRL);
1950 /* According to Section 8.12.21, the other flags we've set when
1951 * enabling FQTSS are not relevant when disabling FQTSS so we
1952 * don't set they here.
1953 */
1954 val &= ~E1000_TQAVCTRL_XMIT_MODE;
1955 wr32(E1000_I210_TQAVCTRL, val);
1956 }
1957
1958 netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
1959 "enabled" : "disabled");
1960}
1961
1962/**
1963 * igb_configure - configure the hardware for RX and TX
1964 * @adapter: private board structure
1965 **/
1966static void igb_configure(struct igb_adapter *adapter)
1967{
1968 struct net_device *netdev = adapter->netdev;
1969 int i;
1970
1971 igb_get_hw_control(adapter);
1972 igb_set_rx_mode(netdev);
1973 igb_setup_tx_mode(adapter);
1974
1975 igb_restore_vlan(adapter);
1976
1977 igb_setup_tctl(adapter);
1978 igb_setup_mrqc(adapter);
1979 igb_setup_rctl(adapter);
1980
1981 igb_nfc_filter_restore(adapter);
1982 igb_configure_tx(adapter);
1983 igb_configure_rx(adapter);
1984
1985 igb_rx_fifo_flush_82575(&adapter->hw);
1986
1987 /* call igb_desc_unused which always leaves
1988 * at least 1 descriptor unused to make sure
1989 * next_to_use != next_to_clean
1990 */
1991 for (i = 0; i < adapter->num_rx_queues; i++) {
1992 struct igb_ring *ring = adapter->rx_ring[i];
1993 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1994 }
1995}
1996
1997/**
1998 * igb_power_up_link - Power up the phy/serdes link
1999 * @adapter: address of board private structure
2000 **/
2001void igb_power_up_link(struct igb_adapter *adapter)
2002{
2003 igb_reset_phy(&adapter->hw);
2004
2005 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2006 igb_power_up_phy_copper(&adapter->hw);
2007 else
2008 igb_power_up_serdes_link_82575(&adapter->hw);
2009
2010 igb_setup_link(&adapter->hw);
2011}
2012
2013/**
2014 * igb_power_down_link - Power down the phy/serdes link
2015 * @adapter: address of board private structure
2016 */
2017static void igb_power_down_link(struct igb_adapter *adapter)
2018{
2019 if (adapter->hw.phy.media_type == e1000_media_type_copper)
2020 igb_power_down_phy_copper_82575(&adapter->hw);
2021 else
2022 igb_shutdown_serdes_link_82575(&adapter->hw);
2023}
2024
2025/**
2026 * igb_check_swap_media - Detect and switch function for Media Auto Sense
2027 * @adapter: address of the board private structure
2028 **/
2029static void igb_check_swap_media(struct igb_adapter *adapter)
2030{
2031 struct e1000_hw *hw = &adapter->hw;
2032 u32 ctrl_ext, connsw;
2033 bool swap_now = false;
2034
2035 ctrl_ext = rd32(E1000_CTRL_EXT);
2036 connsw = rd32(E1000_CONNSW);
2037
2038 /* need to live swap if current media is copper and we have fiber/serdes
2039 * to go to.
2040 */
2041
2042 if ((hw->phy.media_type == e1000_media_type_copper) &&
2043 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
2044 swap_now = true;
2045 } else if ((hw->phy.media_type != e1000_media_type_copper) &&
2046 !(connsw & E1000_CONNSW_SERDESD)) {
2047 /* copper signal takes time to appear */
2048 if (adapter->copper_tries < 4) {
2049 adapter->copper_tries++;
2050 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
2051 wr32(E1000_CONNSW, connsw);
2052 return;
2053 } else {
2054 adapter->copper_tries = 0;
2055 if ((connsw & E1000_CONNSW_PHYSD) &&
2056 (!(connsw & E1000_CONNSW_PHY_PDN))) {
2057 swap_now = true;
2058 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
2059 wr32(E1000_CONNSW, connsw);
2060 }
2061 }
2062 }
2063
2064 if (!swap_now)
2065 return;
2066
2067 switch (hw->phy.media_type) {
2068 case e1000_media_type_copper:
2069 netdev_info(adapter->netdev,
2070 "MAS: changing media to fiber/serdes\n");
2071 ctrl_ext |=
2072 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2073 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2074 adapter->copper_tries = 0;
2075 break;
2076 case e1000_media_type_internal_serdes:
2077 case e1000_media_type_fiber:
2078 netdev_info(adapter->netdev,
2079 "MAS: changing media to copper\n");
2080 ctrl_ext &=
2081 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
2082 adapter->flags |= IGB_FLAG_MEDIA_RESET;
2083 break;
2084 default:
2085 /* shouldn't get here during regular operation */
2086 netdev_err(adapter->netdev,
2087 "AMS: Invalid media type found, returning\n");
2088 break;
2089 }
2090 wr32(E1000_CTRL_EXT, ctrl_ext);
2091}
2092
2093/**
2094 * igb_up - Open the interface and prepare it to handle traffic
2095 * @adapter: board private structure
2096 **/
2097int igb_up(struct igb_adapter *adapter)
2098{
2099 struct e1000_hw *hw = &adapter->hw;
2100 int i;
2101
2102 /* hardware has been reset, we need to reload some things */
2103 igb_configure(adapter);
2104
2105 clear_bit(__IGB_DOWN, &adapter->state);
2106
2107 for (i = 0; i < adapter->num_q_vectors; i++)
2108 napi_enable(&(adapter->q_vector[i]->napi));
2109
2110 if (adapter->flags & IGB_FLAG_HAS_MSIX)
2111 igb_configure_msix(adapter);
2112 else
2113 igb_assign_vector(adapter->q_vector[0], 0);
2114
2115 /* Clear any pending interrupts. */
2116 rd32(E1000_TSICR);
2117 rd32(E1000_ICR);
2118 igb_irq_enable(adapter);
2119
2120 /* notify VFs that reset has been completed */
2121 if (adapter->vfs_allocated_count) {
2122 u32 reg_data = rd32(E1000_CTRL_EXT);
2123
2124 reg_data |= E1000_CTRL_EXT_PFRSTD;
2125 wr32(E1000_CTRL_EXT, reg_data);
2126 }
2127
2128 netif_tx_start_all_queues(adapter->netdev);
2129
2130 /* start the watchdog. */
2131 hw->mac.get_link_status = 1;
2132 schedule_work(&adapter->watchdog_task);
2133
2134 if ((adapter->flags & IGB_FLAG_EEE) &&
2135 (!hw->dev_spec._82575.eee_disable))
2136 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
2137
2138 return 0;
2139}
2140
2141void igb_down(struct igb_adapter *adapter)
2142{
2143 struct net_device *netdev = adapter->netdev;
2144 struct e1000_hw *hw = &adapter->hw;
2145 u32 tctl, rctl;
2146 int i;
2147
2148 /* signal that we're down so the interrupt handler does not
2149 * reschedule our watchdog timer
2150 */
2151 set_bit(__IGB_DOWN, &adapter->state);
2152
2153 /* disable receives in the hardware */
2154 rctl = rd32(E1000_RCTL);
2155 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2156 /* flush and sleep below */
2157
2158 igb_nfc_filter_exit(adapter);
2159
2160 netif_carrier_off(netdev);
2161 netif_tx_stop_all_queues(netdev);
2162
2163 /* disable transmits in the hardware */
2164 tctl = rd32(E1000_TCTL);
2165 tctl &= ~E1000_TCTL_EN;
2166 wr32(E1000_TCTL, tctl);
2167 /* flush both disables and wait for them to finish */
2168 wrfl();
2169 usleep_range(10000, 11000);
2170
2171 igb_irq_disable(adapter);
2172
2173 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
2174
2175 for (i = 0; i < adapter->num_q_vectors; i++) {
2176 if (adapter->q_vector[i]) {
2177 napi_synchronize(&adapter->q_vector[i]->napi);
2178 napi_disable(&adapter->q_vector[i]->napi);
2179 }
2180 }
2181
2182 del_timer_sync(&adapter->watchdog_timer);
2183 del_timer_sync(&adapter->phy_info_timer);
2184
2185 /* record the stats before reset*/
2186 spin_lock(&adapter->stats64_lock);
2187 igb_update_stats(adapter);
2188 spin_unlock(&adapter->stats64_lock);
2189
2190 adapter->link_speed = 0;
2191 adapter->link_duplex = 0;
2192
2193 if (!pci_channel_offline(adapter->pdev))
2194 igb_reset(adapter);
2195
2196 /* clear VLAN promisc flag so VFTA will be updated if necessary */
2197 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
2198
2199 igb_clean_all_tx_rings(adapter);
2200 igb_clean_all_rx_rings(adapter);
2201#ifdef CONFIG_IGB_DCA
2202
2203 /* since we reset the hardware DCA settings were cleared */
2204 igb_setup_dca(adapter);
2205#endif
2206}
2207
2208void igb_reinit_locked(struct igb_adapter *adapter)
2209{
2210 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
2211 usleep_range(1000, 2000);
2212 igb_down(adapter);
2213 igb_up(adapter);
2214 clear_bit(__IGB_RESETTING, &adapter->state);
2215}
2216
2217/** igb_enable_mas - Media Autosense re-enable after swap
2218 *
2219 * @adapter: adapter struct
2220 **/
2221static void igb_enable_mas(struct igb_adapter *adapter)
2222{
2223 struct e1000_hw *hw = &adapter->hw;
2224 u32 connsw = rd32(E1000_CONNSW);
2225
2226 /* configure for SerDes media detect */
2227 if ((hw->phy.media_type == e1000_media_type_copper) &&
2228 (!(connsw & E1000_CONNSW_SERDESD))) {
2229 connsw |= E1000_CONNSW_ENRGSRC;
2230 connsw |= E1000_CONNSW_AUTOSENSE_EN;
2231 wr32(E1000_CONNSW, connsw);
2232 wrfl();
2233 }
2234}
2235
2236#ifdef CONFIG_IGB_HWMON
2237/**
2238 * igb_set_i2c_bb - Init I2C interface
2239 * @hw: pointer to hardware structure
2240 **/
2241static void igb_set_i2c_bb(struct e1000_hw *hw)
2242{
2243 u32 ctrl_ext;
2244 s32 i2cctl;
2245
2246 ctrl_ext = rd32(E1000_CTRL_EXT);
2247 ctrl_ext |= E1000_CTRL_I2C_ENA;
2248 wr32(E1000_CTRL_EXT, ctrl_ext);
2249 wrfl();
2250
2251 i2cctl = rd32(E1000_I2CPARAMS);
2252 i2cctl |= E1000_I2CBB_EN
2253 | E1000_I2C_CLK_OE_N
2254 | E1000_I2C_DATA_OE_N;
2255 wr32(E1000_I2CPARAMS, i2cctl);
2256 wrfl();
2257}
2258#endif
2259
2260void igb_reset(struct igb_adapter *adapter)
2261{
2262 struct pci_dev *pdev = adapter->pdev;
2263 struct e1000_hw *hw = &adapter->hw;
2264 struct e1000_mac_info *mac = &hw->mac;
2265 struct e1000_fc_info *fc = &hw->fc;
2266 u32 pba, hwm;
2267
2268 /* Repartition Pba for greater than 9k mtu
2269 * To take effect CTRL.RST is required.
2270 */
2271 switch (mac->type) {
2272 case e1000_i350:
2273 case e1000_i354:
2274 case e1000_82580:
2275 pba = rd32(E1000_RXPBS);
2276 pba = igb_rxpbs_adjust_82580(pba);
2277 break;
2278 case e1000_82576:
2279 pba = rd32(E1000_RXPBS);
2280 pba &= E1000_RXPBS_SIZE_MASK_82576;
2281 break;
2282 case e1000_82575:
2283 case e1000_i210:
2284 case e1000_i211:
2285 default:
2286 pba = E1000_PBA_34K;
2287 break;
2288 }
2289
2290 if (mac->type == e1000_82575) {
2291 u32 min_rx_space, min_tx_space, needed_tx_space;
2292
2293 /* write Rx PBA so that hardware can report correct Tx PBA */
2294 wr32(E1000_PBA, pba);
2295
2296 /* To maintain wire speed transmits, the Tx FIFO should be
2297 * large enough to accommodate two full transmit packets,
2298 * rounded up to the next 1KB and expressed in KB. Likewise,
2299 * the Rx FIFO should be large enough to accommodate at least
2300 * one full receive packet and is similarly rounded up and
2301 * expressed in KB.
2302 */
2303 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
2304
2305 /* The Tx FIFO also stores 16 bytes of information about the Tx
2306 * but don't include Ethernet FCS because hardware appends it.
2307 * We only need to round down to the nearest 512 byte block
2308 * count since the value we care about is 2 frames, not 1.
2309 */
2310 min_tx_space = adapter->max_frame_size;
2311 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
2312 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
2313
2314 /* upper 16 bits has Tx packet buffer allocation size in KB */
2315 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
2316
2317 /* If current Tx allocation is less than the min Tx FIFO size,
2318 * and the min Tx FIFO size is less than the current Rx FIFO
2319 * allocation, take space away from current Rx allocation.
2320 */
2321 if (needed_tx_space < pba) {
2322 pba -= needed_tx_space;
2323
2324 /* if short on Rx space, Rx wins and must trump Tx
2325 * adjustment
2326 */
2327 if (pba < min_rx_space)
2328 pba = min_rx_space;
2329 }
2330
2331 /* adjust PBA for jumbo frames */
2332 wr32(E1000_PBA, pba);
2333 }
2334
2335 /* flow control settings
2336 * The high water mark must be low enough to fit one full frame
2337 * after transmitting the pause frame. As such we must have enough
2338 * space to allow for us to complete our current transmit and then
2339 * receive the frame that is in progress from the link partner.
2340 * Set it to:
2341 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
2342 */
2343 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
2344
2345 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
2346 fc->low_water = fc->high_water - 16;
2347 fc->pause_time = 0xFFFF;
2348 fc->send_xon = 1;
2349 fc->current_mode = fc->requested_mode;
2350
2351 /* disable receive for all VFs and wait one second */
2352 if (adapter->vfs_allocated_count) {
2353 int i;
2354
2355 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
2356 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
2357
2358 /* ping all the active vfs to let them know we are going down */
2359 igb_ping_all_vfs(adapter);
2360
2361 /* disable transmits and receives */
2362 wr32(E1000_VFRE, 0);
2363 wr32(E1000_VFTE, 0);
2364 }
2365
2366 /* Allow time for pending master requests to run */
2367 hw->mac.ops.reset_hw(hw);
2368 wr32(E1000_WUC, 0);
2369
2370 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
2371 /* need to resetup here after media swap */
2372 adapter->ei.get_invariants(hw);
2373 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
2374 }
2375 if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
2376 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
2377 igb_enable_mas(adapter);
2378 }
2379 if (hw->mac.ops.init_hw(hw))
2380 dev_err(&pdev->dev, "Hardware Error\n");
2381
2382 /* RAR registers were cleared during init_hw, clear mac table */
2383 igb_flush_mac_table(adapter);
2384 __dev_uc_unsync(adapter->netdev, NULL);
2385
2386 /* Recover default RAR entry */
2387 igb_set_default_mac_filter(adapter);
2388
2389 /* Flow control settings reset on hardware reset, so guarantee flow
2390 * control is off when forcing speed.
2391 */
2392 if (!hw->mac.autoneg)
2393 igb_force_mac_fc(hw);
2394
2395 igb_init_dmac(adapter, pba);
2396#ifdef CONFIG_IGB_HWMON
2397 /* Re-initialize the thermal sensor on i350 devices. */
2398 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2399 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2400 /* If present, re-initialize the external thermal sensor
2401 * interface.
2402 */
2403 if (adapter->ets)
2404 igb_set_i2c_bb(hw);
2405 mac->ops.init_thermal_sensor_thresh(hw);
2406 }
2407 }
2408#endif
2409 /* Re-establish EEE setting */
2410 if (hw->phy.media_type == e1000_media_type_copper) {
2411 switch (mac->type) {
2412 case e1000_i350:
2413 case e1000_i210:
2414 case e1000_i211:
2415 igb_set_eee_i350(hw, true, true);
2416 break;
2417 case e1000_i354:
2418 igb_set_eee_i354(hw, true, true);
2419 break;
2420 default:
2421 break;
2422 }
2423 }
2424 if (!netif_running(adapter->netdev))
2425 igb_power_down_link(adapter);
2426
2427 igb_update_mng_vlan(adapter);
2428
2429 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2430 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2431
2432 /* Re-enable PTP, where applicable. */
2433 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2434 igb_ptp_reset(adapter);
2435
2436 igb_get_phy_info(hw);
2437}
2438
2439static netdev_features_t igb_fix_features(struct net_device *netdev,
2440 netdev_features_t features)
2441{
2442 /* Since there is no support for separate Rx/Tx vlan accel
2443 * enable/disable make sure Tx flag is always in same state as Rx.
2444 */
2445 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2446 features |= NETIF_F_HW_VLAN_CTAG_TX;
2447 else
2448 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2449
2450 return features;
2451}
2452
2453static int igb_set_features(struct net_device *netdev,
2454 netdev_features_t features)
2455{
2456 netdev_features_t changed = netdev->features ^ features;
2457 struct igb_adapter *adapter = netdev_priv(netdev);
2458
2459 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2460 igb_vlan_mode(netdev, features);
2461
2462 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2463 return 0;
2464
2465 if (!(features & NETIF_F_NTUPLE)) {
2466 struct hlist_node *node2;
2467 struct igb_nfc_filter *rule;
2468
2469 spin_lock(&adapter->nfc_lock);
2470 hlist_for_each_entry_safe(rule, node2,
2471 &adapter->nfc_filter_list, nfc_node) {
2472 igb_erase_filter(adapter, rule);
2473 hlist_del(&rule->nfc_node);
2474 kfree(rule);
2475 }
2476 spin_unlock(&adapter->nfc_lock);
2477 adapter->nfc_filter_count = 0;
2478 }
2479
2480 netdev->features = features;
2481
2482 if (netif_running(netdev))
2483 igb_reinit_locked(adapter);
2484 else
2485 igb_reset(adapter);
2486
2487 return 1;
2488}
2489
2490static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2491 struct net_device *dev,
2492 const unsigned char *addr, u16 vid,
2493 u16 flags, bool *notified,
2494 struct netlink_ext_ack *extack)
2495{
2496 /* guarantee we can provide a unique filter for the unicast address */
2497 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2498 struct igb_adapter *adapter = netdev_priv(dev);
2499 int vfn = adapter->vfs_allocated_count;
2500
2501 if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
2502 return -ENOMEM;
2503 }
2504
2505 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2506}
2507
2508#define IGB_MAX_MAC_HDR_LEN 127
2509#define IGB_MAX_NETWORK_HDR_LEN 511
2510
2511static netdev_features_t
2512igb_features_check(struct sk_buff *skb, struct net_device *dev,
2513 netdev_features_t features)
2514{
2515 unsigned int network_hdr_len, mac_hdr_len;
2516
2517 /* Make certain the headers can be described by a context descriptor */
2518 mac_hdr_len = skb_network_offset(skb);
2519 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2520 return features & ~(NETIF_F_HW_CSUM |
2521 NETIF_F_SCTP_CRC |
2522 NETIF_F_GSO_UDP_L4 |
2523 NETIF_F_HW_VLAN_CTAG_TX |
2524 NETIF_F_TSO |
2525 NETIF_F_TSO6);
2526
2527 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2528 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2529 return features & ~(NETIF_F_HW_CSUM |
2530 NETIF_F_SCTP_CRC |
2531 NETIF_F_GSO_UDP_L4 |
2532 NETIF_F_TSO |
2533 NETIF_F_TSO6);
2534
2535 /* We can only support IPV4 TSO in tunnels if we can mangle the
2536 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2537 */
2538 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2539 features &= ~NETIF_F_TSO;
2540
2541 return features;
2542}
2543
2544static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
2545{
2546 if (!is_fqtss_enabled(adapter)) {
2547 enable_fqtss(adapter, true);
2548 return;
2549 }
2550
2551 igb_config_tx_modes(adapter, queue);
2552
2553 if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
2554 enable_fqtss(adapter, false);
2555}
2556
2557static int igb_offload_cbs(struct igb_adapter *adapter,
2558 struct tc_cbs_qopt_offload *qopt)
2559{
2560 struct e1000_hw *hw = &adapter->hw;
2561 int err;
2562
2563 /* CBS offloading is only supported by i210 controller. */
2564 if (hw->mac.type != e1000_i210)
2565 return -EOPNOTSUPP;
2566
2567 /* CBS offloading is only supported by queue 0 and queue 1. */
2568 if (qopt->queue < 0 || qopt->queue > 1)
2569 return -EINVAL;
2570
2571 err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
2572 qopt->idleslope, qopt->sendslope,
2573 qopt->hicredit, qopt->locredit);
2574 if (err)
2575 return err;
2576
2577 igb_offload_apply(adapter, qopt->queue);
2578
2579 return 0;
2580}
2581
2582#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
2583#define VLAN_PRIO_FULL_MASK (0x07)
2584
2585static int igb_parse_cls_flower(struct igb_adapter *adapter,
2586 struct flow_cls_offload *f,
2587 int traffic_class,
2588 struct igb_nfc_filter *input)
2589{
2590 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2591 struct flow_dissector *dissector = rule->match.dissector;
2592 struct netlink_ext_ack *extack = f->common.extack;
2593
2594 if (dissector->used_keys &
2595 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
2596 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
2597 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2598 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
2599 NL_SET_ERR_MSG_MOD(extack,
2600 "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
2601 return -EOPNOTSUPP;
2602 }
2603
2604 if (flow_rule_match_has_control_flags(rule, extack))
2605 return -EOPNOTSUPP;
2606
2607 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2608 struct flow_match_eth_addrs match;
2609
2610 flow_rule_match_eth_addrs(rule, &match);
2611 if (!is_zero_ether_addr(match.mask->dst)) {
2612 if (!is_broadcast_ether_addr(match.mask->dst)) {
2613 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
2614 return -EINVAL;
2615 }
2616
2617 input->filter.match_flags |=
2618 IGB_FILTER_FLAG_DST_MAC_ADDR;
2619 ether_addr_copy(input->filter.dst_addr, match.key->dst);
2620 }
2621
2622 if (!is_zero_ether_addr(match.mask->src)) {
2623 if (!is_broadcast_ether_addr(match.mask->src)) {
2624 NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
2625 return -EINVAL;
2626 }
2627
2628 input->filter.match_flags |=
2629 IGB_FILTER_FLAG_SRC_MAC_ADDR;
2630 ether_addr_copy(input->filter.src_addr, match.key->src);
2631 }
2632 }
2633
2634 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2635 struct flow_match_basic match;
2636
2637 flow_rule_match_basic(rule, &match);
2638 if (match.mask->n_proto) {
2639 if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
2640 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
2641 return -EINVAL;
2642 }
2643
2644 input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
2645 input->filter.etype = match.key->n_proto;
2646 }
2647 }
2648
2649 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2650 struct flow_match_vlan match;
2651
2652 flow_rule_match_vlan(rule, &match);
2653 if (match.mask->vlan_priority) {
2654 if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
2655 NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
2656 return -EINVAL;
2657 }
2658
2659 input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
2660 input->filter.vlan_tci =
2661 (__force __be16)match.key->vlan_priority;
2662 }
2663 }
2664
2665 input->action = traffic_class;
2666 input->cookie = f->cookie;
2667
2668 return 0;
2669}
2670
2671static int igb_configure_clsflower(struct igb_adapter *adapter,
2672 struct flow_cls_offload *cls_flower)
2673{
2674 struct netlink_ext_ack *extack = cls_flower->common.extack;
2675 struct igb_nfc_filter *filter, *f;
2676 int err, tc;
2677
2678 tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
2679 if (tc < 0) {
2680 NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
2681 return -EINVAL;
2682 }
2683
2684 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
2685 if (!filter)
2686 return -ENOMEM;
2687
2688 err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
2689 if (err < 0)
2690 goto err_parse;
2691
2692 spin_lock(&adapter->nfc_lock);
2693
2694 hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
2695 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2696 err = -EEXIST;
2697 NL_SET_ERR_MSG_MOD(extack,
2698 "This filter is already set in ethtool");
2699 goto err_locked;
2700 }
2701 }
2702
2703 hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
2704 if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
2705 err = -EEXIST;
2706 NL_SET_ERR_MSG_MOD(extack,
2707 "This filter is already set in cls_flower");
2708 goto err_locked;
2709 }
2710 }
2711
2712 err = igb_add_filter(adapter, filter);
2713 if (err < 0) {
2714 NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
2715 goto err_locked;
2716 }
2717
2718 hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
2719
2720 spin_unlock(&adapter->nfc_lock);
2721
2722 return 0;
2723
2724err_locked:
2725 spin_unlock(&adapter->nfc_lock);
2726
2727err_parse:
2728 kfree(filter);
2729
2730 return err;
2731}
2732
2733static int igb_delete_clsflower(struct igb_adapter *adapter,
2734 struct flow_cls_offload *cls_flower)
2735{
2736 struct igb_nfc_filter *filter;
2737 int err;
2738
2739 spin_lock(&adapter->nfc_lock);
2740
2741 hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
2742 if (filter->cookie == cls_flower->cookie)
2743 break;
2744
2745 if (!filter) {
2746 err = -ENOENT;
2747 goto out;
2748 }
2749
2750 err = igb_erase_filter(adapter, filter);
2751 if (err < 0)
2752 goto out;
2753
2754 hlist_del(&filter->nfc_node);
2755 kfree(filter);
2756
2757out:
2758 spin_unlock(&adapter->nfc_lock);
2759
2760 return err;
2761}
2762
2763static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
2764 struct flow_cls_offload *cls_flower)
2765{
2766 switch (cls_flower->command) {
2767 case FLOW_CLS_REPLACE:
2768 return igb_configure_clsflower(adapter, cls_flower);
2769 case FLOW_CLS_DESTROY:
2770 return igb_delete_clsflower(adapter, cls_flower);
2771 case FLOW_CLS_STATS:
2772 return -EOPNOTSUPP;
2773 default:
2774 return -EOPNOTSUPP;
2775 }
2776}
2777
2778static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
2779 void *cb_priv)
2780{
2781 struct igb_adapter *adapter = cb_priv;
2782
2783 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
2784 return -EOPNOTSUPP;
2785
2786 switch (type) {
2787 case TC_SETUP_CLSFLOWER:
2788 return igb_setup_tc_cls_flower(adapter, type_data);
2789
2790 default:
2791 return -EOPNOTSUPP;
2792 }
2793}
2794
2795static int igb_offload_txtime(struct igb_adapter *adapter,
2796 struct tc_etf_qopt_offload *qopt)
2797{
2798 struct e1000_hw *hw = &adapter->hw;
2799 int err;
2800
2801 /* Launchtime offloading is only supported by i210 controller. */
2802 if (hw->mac.type != e1000_i210)
2803 return -EOPNOTSUPP;
2804
2805 /* Launchtime offloading is only supported by queues 0 and 1. */
2806 if (qopt->queue < 0 || qopt->queue > 1)
2807 return -EINVAL;
2808
2809 err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
2810 if (err)
2811 return err;
2812
2813 igb_offload_apply(adapter, qopt->queue);
2814
2815 return 0;
2816}
2817
2818static int igb_tc_query_caps(struct igb_adapter *adapter,
2819 struct tc_query_caps_base *base)
2820{
2821 switch (base->type) {
2822 case TC_SETUP_QDISC_TAPRIO: {
2823 struct tc_taprio_caps *caps = base->caps;
2824
2825 caps->broken_mqprio = true;
2826
2827 return 0;
2828 }
2829 default:
2830 return -EOPNOTSUPP;
2831 }
2832}
2833
2834static LIST_HEAD(igb_block_cb_list);
2835
2836static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
2837 void *type_data)
2838{
2839 struct igb_adapter *adapter = netdev_priv(dev);
2840
2841 switch (type) {
2842 case TC_QUERY_CAPS:
2843 return igb_tc_query_caps(adapter, type_data);
2844 case TC_SETUP_QDISC_CBS:
2845 return igb_offload_cbs(adapter, type_data);
2846 case TC_SETUP_BLOCK:
2847 return flow_block_cb_setup_simple(type_data,
2848 &igb_block_cb_list,
2849 igb_setup_tc_block_cb,
2850 adapter, adapter, true);
2851
2852 case TC_SETUP_QDISC_ETF:
2853 return igb_offload_txtime(adapter, type_data);
2854
2855 default:
2856 return -EOPNOTSUPP;
2857 }
2858}
2859
2860static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
2861{
2862 int i, frame_size = dev->mtu + IGB_ETH_PKT_HDR_PAD;
2863 struct igb_adapter *adapter = netdev_priv(dev);
2864 struct bpf_prog *prog = bpf->prog, *old_prog;
2865 bool running = netif_running(dev);
2866 bool need_reset;
2867
2868 /* verify igb ring attributes are sufficient for XDP */
2869 for (i = 0; i < adapter->num_rx_queues; i++) {
2870 struct igb_ring *ring = adapter->rx_ring[i];
2871
2872 if (frame_size > igb_rx_bufsz(ring)) {
2873 NL_SET_ERR_MSG_MOD(bpf->extack,
2874 "The RX buffer size is too small for the frame size");
2875 netdev_warn(dev, "XDP RX buffer size %d is too small for the frame size %d\n",
2876 igb_rx_bufsz(ring), frame_size);
2877 return -EINVAL;
2878 }
2879 }
2880
2881 old_prog = xchg(&adapter->xdp_prog, prog);
2882 need_reset = (!!prog != !!old_prog);
2883
2884 /* device is up and bpf is added/removed, must setup the RX queues */
2885 if (need_reset && running) {
2886 igb_close(dev);
2887 } else {
2888 for (i = 0; i < adapter->num_rx_queues; i++)
2889 (void)xchg(&adapter->rx_ring[i]->xdp_prog,
2890 adapter->xdp_prog);
2891 }
2892
2893 if (old_prog)
2894 bpf_prog_put(old_prog);
2895
2896 /* bpf is just replaced, RXQ and MTU are already setup */
2897 if (!need_reset) {
2898 return 0;
2899 } else {
2900 if (prog)
2901 xdp_features_set_redirect_target(dev, true);
2902 else
2903 xdp_features_clear_redirect_target(dev);
2904 }
2905
2906 if (running)
2907 igb_open(dev);
2908
2909 return 0;
2910}
2911
2912static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2913{
2914 switch (xdp->command) {
2915 case XDP_SETUP_PROG:
2916 return igb_xdp_setup(dev, xdp);
2917 default:
2918 return -EINVAL;
2919 }
2920}
2921
2922/* This function assumes __netif_tx_lock is held by the caller. */
2923static void igb_xdp_ring_update_tail(struct igb_ring *ring)
2924{
2925 lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
2926
2927 /* Force memory writes to complete before letting h/w know there
2928 * are new descriptors to fetch.
2929 */
2930 wmb();
2931 writel(ring->next_to_use, ring->tail);
2932}
2933
2934static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
2935{
2936 unsigned int r_idx = smp_processor_id();
2937
2938 if (r_idx >= adapter->num_tx_queues)
2939 r_idx = r_idx % adapter->num_tx_queues;
2940
2941 return adapter->tx_ring[r_idx];
2942}
2943
2944static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
2945{
2946 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2947 int cpu = smp_processor_id();
2948 struct igb_ring *tx_ring;
2949 struct netdev_queue *nq;
2950 u32 ret;
2951
2952 if (unlikely(!xdpf))
2953 return IGB_XDP_CONSUMED;
2954
2955 /* During program transitions its possible adapter->xdp_prog is assigned
2956 * but ring has not been configured yet. In this case simply abort xmit.
2957 */
2958 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2959 if (unlikely(!tx_ring))
2960 return IGB_XDP_CONSUMED;
2961
2962 nq = txring_txq(tx_ring);
2963 __netif_tx_lock(nq, cpu);
2964 /* Avoid transmit queue timeout since we share it with the slow path */
2965 txq_trans_cond_update(nq);
2966 ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
2967 __netif_tx_unlock(nq);
2968
2969 return ret;
2970}
2971
2972static int igb_xdp_xmit(struct net_device *dev, int n,
2973 struct xdp_frame **frames, u32 flags)
2974{
2975 struct igb_adapter *adapter = netdev_priv(dev);
2976 int cpu = smp_processor_id();
2977 struct igb_ring *tx_ring;
2978 struct netdev_queue *nq;
2979 int nxmit = 0;
2980 int i;
2981
2982 if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
2983 return -ENETDOWN;
2984
2985 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2986 return -EINVAL;
2987
2988 /* During program transitions its possible adapter->xdp_prog is assigned
2989 * but ring has not been configured yet. In this case simply abort xmit.
2990 */
2991 tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
2992 if (unlikely(!tx_ring))
2993 return -ENXIO;
2994
2995 nq = txring_txq(tx_ring);
2996 __netif_tx_lock(nq, cpu);
2997
2998 /* Avoid transmit queue timeout since we share it with the slow path */
2999 txq_trans_cond_update(nq);
3000
3001 for (i = 0; i < n; i++) {
3002 struct xdp_frame *xdpf = frames[i];
3003 int err;
3004
3005 err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
3006 if (err != IGB_XDP_TX)
3007 break;
3008 nxmit++;
3009 }
3010
3011 if (unlikely(flags & XDP_XMIT_FLUSH))
3012 igb_xdp_ring_update_tail(tx_ring);
3013
3014 __netif_tx_unlock(nq);
3015
3016 return nxmit;
3017}
3018
3019static const struct net_device_ops igb_netdev_ops = {
3020 .ndo_open = igb_open,
3021 .ndo_stop = igb_close,
3022 .ndo_start_xmit = igb_xmit_frame,
3023 .ndo_get_stats64 = igb_get_stats64,
3024 .ndo_set_rx_mode = igb_set_rx_mode,
3025 .ndo_set_mac_address = igb_set_mac,
3026 .ndo_change_mtu = igb_change_mtu,
3027 .ndo_eth_ioctl = igb_ioctl,
3028 .ndo_tx_timeout = igb_tx_timeout,
3029 .ndo_validate_addr = eth_validate_addr,
3030 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
3031 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
3032 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
3033 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
3034 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
3035 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
3036 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
3037 .ndo_get_vf_config = igb_ndo_get_vf_config,
3038 .ndo_fix_features = igb_fix_features,
3039 .ndo_set_features = igb_set_features,
3040 .ndo_fdb_add = igb_ndo_fdb_add,
3041 .ndo_features_check = igb_features_check,
3042 .ndo_setup_tc = igb_setup_tc,
3043 .ndo_bpf = igb_xdp,
3044 .ndo_xdp_xmit = igb_xdp_xmit,
3045};
3046
3047/**
3048 * igb_set_fw_version - Configure version string for ethtool
3049 * @adapter: adapter struct
3050 **/
3051void igb_set_fw_version(struct igb_adapter *adapter)
3052{
3053 struct e1000_hw *hw = &adapter->hw;
3054 struct e1000_fw_version fw;
3055
3056 igb_get_fw_version(hw, &fw);
3057
3058 switch (hw->mac.type) {
3059 case e1000_i210:
3060 case e1000_i211:
3061 if (!(igb_get_flash_presence_i210(hw))) {
3062 snprintf(adapter->fw_version,
3063 sizeof(adapter->fw_version),
3064 "%2d.%2d-%d",
3065 fw.invm_major, fw.invm_minor,
3066 fw.invm_img_type);
3067 break;
3068 }
3069 fallthrough;
3070 default:
3071 /* if option rom is valid, display its version too */
3072 if (fw.or_valid) {
3073 snprintf(adapter->fw_version,
3074 sizeof(adapter->fw_version),
3075 "%d.%d, 0x%08x, %d.%d.%d",
3076 fw.eep_major, fw.eep_minor, fw.etrack_id,
3077 fw.or_major, fw.or_build, fw.or_patch);
3078 /* no option rom */
3079 } else if (fw.etrack_id != 0X0000) {
3080 snprintf(adapter->fw_version,
3081 sizeof(adapter->fw_version),
3082 "%d.%d, 0x%08x",
3083 fw.eep_major, fw.eep_minor, fw.etrack_id);
3084 } else {
3085 snprintf(adapter->fw_version,
3086 sizeof(adapter->fw_version),
3087 "%d.%d.%d",
3088 fw.eep_major, fw.eep_minor, fw.eep_build);
3089 }
3090 break;
3091 }
3092}
3093
3094/**
3095 * igb_init_mas - init Media Autosense feature if enabled in the NVM
3096 *
3097 * @adapter: adapter struct
3098 **/
3099static void igb_init_mas(struct igb_adapter *adapter)
3100{
3101 struct e1000_hw *hw = &adapter->hw;
3102 u16 eeprom_data;
3103
3104 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
3105 switch (hw->bus.func) {
3106 case E1000_FUNC_0:
3107 if (eeprom_data & IGB_MAS_ENABLE_0) {
3108 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3109 netdev_info(adapter->netdev,
3110 "MAS: Enabling Media Autosense for port %d\n",
3111 hw->bus.func);
3112 }
3113 break;
3114 case E1000_FUNC_1:
3115 if (eeprom_data & IGB_MAS_ENABLE_1) {
3116 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3117 netdev_info(adapter->netdev,
3118 "MAS: Enabling Media Autosense for port %d\n",
3119 hw->bus.func);
3120 }
3121 break;
3122 case E1000_FUNC_2:
3123 if (eeprom_data & IGB_MAS_ENABLE_2) {
3124 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3125 netdev_info(adapter->netdev,
3126 "MAS: Enabling Media Autosense for port %d\n",
3127 hw->bus.func);
3128 }
3129 break;
3130 case E1000_FUNC_3:
3131 if (eeprom_data & IGB_MAS_ENABLE_3) {
3132 adapter->flags |= IGB_FLAG_MAS_ENABLE;
3133 netdev_info(adapter->netdev,
3134 "MAS: Enabling Media Autosense for port %d\n",
3135 hw->bus.func);
3136 }
3137 break;
3138 default:
3139 /* Shouldn't get here */
3140 netdev_err(adapter->netdev,
3141 "MAS: Invalid port configuration, returning\n");
3142 break;
3143 }
3144}
3145
3146/**
3147 * igb_init_i2c - Init I2C interface
3148 * @adapter: pointer to adapter structure
3149 **/
3150static s32 igb_init_i2c(struct igb_adapter *adapter)
3151{
3152 s32 status = 0;
3153
3154 /* I2C interface supported on i350 devices */
3155 if (adapter->hw.mac.type != e1000_i350)
3156 return 0;
3157
3158 /* Initialize the i2c bus which is controlled by the registers.
3159 * This bus will use the i2c_algo_bit structure that implements
3160 * the protocol through toggling of the 4 bits in the register.
3161 */
3162 adapter->i2c_adap.owner = THIS_MODULE;
3163 adapter->i2c_algo = igb_i2c_algo;
3164 adapter->i2c_algo.data = adapter;
3165 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
3166 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
3167 strscpy(adapter->i2c_adap.name, "igb BB",
3168 sizeof(adapter->i2c_adap.name));
3169 status = i2c_bit_add_bus(&adapter->i2c_adap);
3170 return status;
3171}
3172
3173/**
3174 * igb_probe - Device Initialization Routine
3175 * @pdev: PCI device information struct
3176 * @ent: entry in igb_pci_tbl
3177 *
3178 * Returns 0 on success, negative on failure
3179 *
3180 * igb_probe initializes an adapter identified by a pci_dev structure.
3181 * The OS initialization, configuring of the adapter private structure,
3182 * and a hardware reset occur.
3183 **/
3184static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3185{
3186 struct net_device *netdev;
3187 struct igb_adapter *adapter;
3188 struct e1000_hw *hw;
3189 u16 eeprom_data = 0;
3190 s32 ret_val;
3191 static int global_quad_port_a; /* global quad port a indication */
3192 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
3193 u8 part_str[E1000_PBANUM_LENGTH];
3194 int err;
3195
3196 /* Catch broken hardware that put the wrong VF device ID in
3197 * the PCIe SR-IOV capability.
3198 */
3199 if (pdev->is_virtfn) {
3200 WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
3201 pci_name(pdev), pdev->vendor, pdev->device);
3202 return -EINVAL;
3203 }
3204
3205 err = pci_enable_device_mem(pdev);
3206 if (err)
3207 return err;
3208
3209 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3210 if (err) {
3211 dev_err(&pdev->dev,
3212 "No usable DMA configuration, aborting\n");
3213 goto err_dma;
3214 }
3215
3216 err = pci_request_mem_regions(pdev, igb_driver_name);
3217 if (err)
3218 goto err_pci_reg;
3219
3220 pci_set_master(pdev);
3221 pci_save_state(pdev);
3222
3223 err = -ENOMEM;
3224 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
3225 IGB_MAX_TX_QUEUES);
3226 if (!netdev)
3227 goto err_alloc_etherdev;
3228
3229 SET_NETDEV_DEV(netdev, &pdev->dev);
3230
3231 pci_set_drvdata(pdev, netdev);
3232 adapter = netdev_priv(netdev);
3233 adapter->netdev = netdev;
3234 adapter->pdev = pdev;
3235 hw = &adapter->hw;
3236 hw->back = adapter;
3237 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3238
3239 err = -EIO;
3240 adapter->io_addr = pci_iomap(pdev, 0, 0);
3241 if (!adapter->io_addr)
3242 goto err_ioremap;
3243 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
3244 hw->hw_addr = adapter->io_addr;
3245
3246 netdev->netdev_ops = &igb_netdev_ops;
3247 igb_set_ethtool_ops(netdev);
3248 netdev->watchdog_timeo = 5 * HZ;
3249
3250 strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
3251
3252 netdev->mem_start = pci_resource_start(pdev, 0);
3253 netdev->mem_end = pci_resource_end(pdev, 0);
3254
3255 /* PCI config space info */
3256 hw->vendor_id = pdev->vendor;
3257 hw->device_id = pdev->device;
3258 hw->revision_id = pdev->revision;
3259 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3260 hw->subsystem_device_id = pdev->subsystem_device;
3261
3262 /* Copy the default MAC, PHY and NVM function pointers */
3263 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
3264 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3265 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3266 /* Initialize skew-specific constants */
3267 err = ei->get_invariants(hw);
3268 if (err)
3269 goto err_sw_init;
3270
3271 /* setup the private structure */
3272 err = igb_sw_init(adapter);
3273 if (err)
3274 goto err_sw_init;
3275
3276 igb_get_bus_info_pcie(hw);
3277
3278 hw->phy.autoneg_wait_to_complete = false;
3279
3280 /* Copper options */
3281 if (hw->phy.media_type == e1000_media_type_copper) {
3282 hw->phy.mdix = AUTO_ALL_MODES;
3283 hw->phy.disable_polarity_correction = false;
3284 hw->phy.ms_type = e1000_ms_hw_default;
3285 }
3286
3287 if (igb_check_reset_block(hw))
3288 dev_info(&pdev->dev,
3289 "PHY reset is blocked due to SOL/IDER session.\n");
3290
3291 /* features is initialized to 0 in allocation, it might have bits
3292 * set by igb_sw_init so we should use an or instead of an
3293 * assignment.
3294 */
3295 netdev->features |= NETIF_F_SG |
3296 NETIF_F_TSO |
3297 NETIF_F_TSO6 |
3298 NETIF_F_RXHASH |
3299 NETIF_F_RXCSUM |
3300 NETIF_F_HW_CSUM;
3301
3302 if (hw->mac.type >= e1000_82576)
3303 netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
3304
3305 if (hw->mac.type >= e1000_i350)
3306 netdev->features |= NETIF_F_HW_TC;
3307
3308#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
3309 NETIF_F_GSO_GRE_CSUM | \
3310 NETIF_F_GSO_IPXIP4 | \
3311 NETIF_F_GSO_IPXIP6 | \
3312 NETIF_F_GSO_UDP_TUNNEL | \
3313 NETIF_F_GSO_UDP_TUNNEL_CSUM)
3314
3315 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
3316 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
3317
3318 /* copy netdev features into list of user selectable features */
3319 netdev->hw_features |= netdev->features |
3320 NETIF_F_HW_VLAN_CTAG_RX |
3321 NETIF_F_HW_VLAN_CTAG_TX |
3322 NETIF_F_RXALL;
3323
3324 if (hw->mac.type >= e1000_i350)
3325 netdev->hw_features |= NETIF_F_NTUPLE;
3326
3327 netdev->features |= NETIF_F_HIGHDMA;
3328
3329 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
3330 netdev->mpls_features |= NETIF_F_HW_CSUM;
3331 netdev->hw_enc_features |= netdev->vlan_features;
3332
3333 /* set this bit last since it cannot be part of vlan_features */
3334 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
3335 NETIF_F_HW_VLAN_CTAG_RX |
3336 NETIF_F_HW_VLAN_CTAG_TX;
3337
3338 netdev->priv_flags |= IFF_SUPP_NOFCS;
3339
3340 netdev->priv_flags |= IFF_UNICAST_FLT;
3341 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
3342
3343 /* MTU range: 68 - 9216 */
3344 netdev->min_mtu = ETH_MIN_MTU;
3345 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
3346
3347 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
3348
3349 /* before reading the NVM, reset the controller to put the device in a
3350 * known good starting state
3351 */
3352 hw->mac.ops.reset_hw(hw);
3353
3354 /* make sure the NVM is good , i211/i210 parts can have special NVM
3355 * that doesn't contain a checksum
3356 */
3357 switch (hw->mac.type) {
3358 case e1000_i210:
3359 case e1000_i211:
3360 if (igb_get_flash_presence_i210(hw)) {
3361 if (hw->nvm.ops.validate(hw) < 0) {
3362 dev_err(&pdev->dev,
3363 "The NVM Checksum Is Not Valid\n");
3364 err = -EIO;
3365 goto err_eeprom;
3366 }
3367 }
3368 break;
3369 default:
3370 if (hw->nvm.ops.validate(hw) < 0) {
3371 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
3372 err = -EIO;
3373 goto err_eeprom;
3374 }
3375 break;
3376 }
3377
3378 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
3379 /* copy the MAC address out of the NVM */
3380 if (hw->mac.ops.read_mac_addr(hw))
3381 dev_err(&pdev->dev, "NVM Read Error\n");
3382 }
3383
3384 eth_hw_addr_set(netdev, hw->mac.addr);
3385
3386 if (!is_valid_ether_addr(netdev->dev_addr)) {
3387 dev_err(&pdev->dev, "Invalid MAC Address\n");
3388 err = -EIO;
3389 goto err_eeprom;
3390 }
3391
3392 igb_set_default_mac_filter(adapter);
3393
3394 /* get firmware version for ethtool -i */
3395 igb_set_fw_version(adapter);
3396
3397 /* configure RXPBSIZE and TXPBSIZE */
3398 if (hw->mac.type == e1000_i210) {
3399 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
3400 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
3401 }
3402
3403 timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
3404 timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
3405
3406 INIT_WORK(&adapter->reset_task, igb_reset_task);
3407 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
3408
3409 /* Initialize link properties that are user-changeable */
3410 adapter->fc_autoneg = true;
3411 hw->mac.autoneg = true;
3412 hw->phy.autoneg_advertised = 0x2f;
3413
3414 hw->fc.requested_mode = e1000_fc_default;
3415 hw->fc.current_mode = e1000_fc_default;
3416
3417 igb_validate_mdi_setting(hw);
3418
3419 /* By default, support wake on port A */
3420 if (hw->bus.func == 0)
3421 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3422
3423 /* Check the NVM for wake support on non-port A ports */
3424 if (hw->mac.type >= e1000_82580)
3425 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
3426 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
3427 &eeprom_data);
3428 else if (hw->bus.func == 1)
3429 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
3430
3431 if (eeprom_data & IGB_EEPROM_APME)
3432 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3433
3434 /* now that we have the eeprom settings, apply the special cases where
3435 * the eeprom may be wrong or the board simply won't support wake on
3436 * lan on a particular port
3437 */
3438 switch (pdev->device) {
3439 case E1000_DEV_ID_82575GB_QUAD_COPPER:
3440 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3441 break;
3442 case E1000_DEV_ID_82575EB_FIBER_SERDES:
3443 case E1000_DEV_ID_82576_FIBER:
3444 case E1000_DEV_ID_82576_SERDES:
3445 /* Wake events only supported on port A for dual fiber
3446 * regardless of eeprom setting
3447 */
3448 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
3449 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3450 break;
3451 case E1000_DEV_ID_82576_QUAD_COPPER:
3452 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
3453 /* if quad port adapter, disable WoL on all but port A */
3454 if (global_quad_port_a != 0)
3455 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3456 else
3457 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
3458 /* Reset for multiple quad port adapters */
3459 if (++global_quad_port_a == 4)
3460 global_quad_port_a = 0;
3461 break;
3462 default:
3463 /* If the device can't wake, don't set software support */
3464 if (!device_can_wakeup(&adapter->pdev->dev))
3465 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
3466 }
3467
3468 /* initialize the wol settings based on the eeprom settings */
3469 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
3470 adapter->wol |= E1000_WUFC_MAG;
3471
3472 /* Some vendors want WoL disabled by default, but still supported */
3473 if ((hw->mac.type == e1000_i350) &&
3474 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
3475 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3476 adapter->wol = 0;
3477 }
3478
3479 /* Some vendors want the ability to Use the EEPROM setting as
3480 * enable/disable only, and not for capability
3481 */
3482 if (((hw->mac.type == e1000_i350) ||
3483 (hw->mac.type == e1000_i354)) &&
3484 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
3485 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3486 adapter->wol = 0;
3487 }
3488 if (hw->mac.type == e1000_i350) {
3489 if (((pdev->subsystem_device == 0x5001) ||
3490 (pdev->subsystem_device == 0x5002)) &&
3491 (hw->bus.func == 0)) {
3492 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3493 adapter->wol = 0;
3494 }
3495 if (pdev->subsystem_device == 0x1F52)
3496 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
3497 }
3498
3499 device_set_wakeup_enable(&adapter->pdev->dev,
3500 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
3501
3502 /* reset the hardware with the new settings */
3503 igb_reset(adapter);
3504
3505 /* Init the I2C interface */
3506 err = igb_init_i2c(adapter);
3507 if (err) {
3508 dev_err(&pdev->dev, "failed to init i2c interface\n");
3509 goto err_eeprom;
3510 }
3511
3512 /* let the f/w know that the h/w is now under the control of the
3513 * driver.
3514 */
3515 igb_get_hw_control(adapter);
3516
3517 strcpy(netdev->name, "eth%d");
3518 err = register_netdev(netdev);
3519 if (err)
3520 goto err_register;
3521
3522 /* carrier off reporting is important to ethtool even BEFORE open */
3523 netif_carrier_off(netdev);
3524
3525#ifdef CONFIG_IGB_DCA
3526 if (dca_add_requester(&pdev->dev) == 0) {
3527 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3528 dev_info(&pdev->dev, "DCA enabled\n");
3529 igb_setup_dca(adapter);
3530 }
3531
3532#endif
3533#ifdef CONFIG_IGB_HWMON
3534 /* Initialize the thermal sensor on i350 devices. */
3535 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
3536 u16 ets_word;
3537
3538 /* Read the NVM to determine if this i350 device supports an
3539 * external thermal sensor.
3540 */
3541 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
3542 if (ets_word != 0x0000 && ets_word != 0xFFFF)
3543 adapter->ets = true;
3544 else
3545 adapter->ets = false;
3546 /* Only enable I2C bit banging if an external thermal
3547 * sensor is supported.
3548 */
3549 if (adapter->ets)
3550 igb_set_i2c_bb(hw);
3551 hw->mac.ops.init_thermal_sensor_thresh(hw);
3552 if (igb_sysfs_init(adapter))
3553 dev_err(&pdev->dev,
3554 "failed to allocate sysfs resources\n");
3555 } else {
3556 adapter->ets = false;
3557 }
3558#endif
3559 /* Check if Media Autosense is enabled */
3560 adapter->ei = *ei;
3561 if (hw->dev_spec._82575.mas_capable)
3562 igb_init_mas(adapter);
3563
3564 /* do hw tstamp init after resetting */
3565 igb_ptp_init(adapter);
3566
3567 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
3568 /* print bus type/speed/width info, not applicable to i354 */
3569 if (hw->mac.type != e1000_i354) {
3570 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
3571 netdev->name,
3572 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
3573 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
3574 "unknown"),
3575 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
3576 "Width x4" :
3577 (hw->bus.width == e1000_bus_width_pcie_x2) ?
3578 "Width x2" :
3579 (hw->bus.width == e1000_bus_width_pcie_x1) ?
3580 "Width x1" : "unknown"), netdev->dev_addr);
3581 }
3582
3583 if ((hw->mac.type == e1000_82576 &&
3584 rd32(E1000_EECD) & E1000_EECD_PRES) ||
3585 (hw->mac.type >= e1000_i210 ||
3586 igb_get_flash_presence_i210(hw))) {
3587 ret_val = igb_read_part_string(hw, part_str,
3588 E1000_PBANUM_LENGTH);
3589 } else {
3590 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
3591 }
3592
3593 if (ret_val)
3594 strcpy(part_str, "Unknown");
3595 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
3596 dev_info(&pdev->dev,
3597 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
3598 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
3599 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
3600 adapter->num_rx_queues, adapter->num_tx_queues);
3601 if (hw->phy.media_type == e1000_media_type_copper) {
3602 switch (hw->mac.type) {
3603 case e1000_i350:
3604 case e1000_i210:
3605 case e1000_i211:
3606 /* Enable EEE for internal copper PHY devices */
3607 err = igb_set_eee_i350(hw, true, true);
3608 if ((!err) &&
3609 (!hw->dev_spec._82575.eee_disable)) {
3610 adapter->eee_advert =
3611 MDIO_EEE_100TX | MDIO_EEE_1000T;
3612 adapter->flags |= IGB_FLAG_EEE;
3613 }
3614 break;
3615 case e1000_i354:
3616 if ((rd32(E1000_CTRL_EXT) &
3617 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3618 err = igb_set_eee_i354(hw, true, true);
3619 if ((!err) &&
3620 (!hw->dev_spec._82575.eee_disable)) {
3621 adapter->eee_advert =
3622 MDIO_EEE_100TX | MDIO_EEE_1000T;
3623 adapter->flags |= IGB_FLAG_EEE;
3624 }
3625 }
3626 break;
3627 default:
3628 break;
3629 }
3630 }
3631
3632 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
3633
3634 pm_runtime_put_noidle(&pdev->dev);
3635 return 0;
3636
3637err_register:
3638 igb_release_hw_control(adapter);
3639 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
3640err_eeprom:
3641 if (!igb_check_reset_block(hw))
3642 igb_reset_phy(hw);
3643
3644 if (hw->flash_address)
3645 iounmap(hw->flash_address);
3646err_sw_init:
3647 kfree(adapter->mac_table);
3648 kfree(adapter->shadow_vfta);
3649 igb_clear_interrupt_scheme(adapter);
3650#ifdef CONFIG_PCI_IOV
3651 igb_disable_sriov(pdev, false);
3652#endif
3653 pci_iounmap(pdev, adapter->io_addr);
3654err_ioremap:
3655 free_netdev(netdev);
3656err_alloc_etherdev:
3657 pci_release_mem_regions(pdev);
3658err_pci_reg:
3659err_dma:
3660 pci_disable_device(pdev);
3661 return err;
3662}
3663
3664#ifdef CONFIG_PCI_IOV
3665static int igb_sriov_reinit(struct pci_dev *dev)
3666{
3667 struct net_device *netdev = pci_get_drvdata(dev);
3668 struct igb_adapter *adapter = netdev_priv(netdev);
3669 struct pci_dev *pdev = adapter->pdev;
3670
3671 rtnl_lock();
3672
3673 if (netif_running(netdev))
3674 igb_close(netdev);
3675 else
3676 igb_reset(adapter);
3677
3678 igb_clear_interrupt_scheme(adapter);
3679
3680 igb_init_queue_configuration(adapter);
3681
3682 if (igb_init_interrupt_scheme(adapter, true)) {
3683 rtnl_unlock();
3684 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3685 return -ENOMEM;
3686 }
3687
3688 if (netif_running(netdev))
3689 igb_open(netdev);
3690
3691 rtnl_unlock();
3692
3693 return 0;
3694}
3695
3696static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
3697{
3698 struct net_device *netdev = pci_get_drvdata(pdev);
3699 struct igb_adapter *adapter = netdev_priv(netdev);
3700 struct e1000_hw *hw = &adapter->hw;
3701 unsigned long flags;
3702
3703 /* reclaim resources allocated to VFs */
3704 if (adapter->vf_data) {
3705 /* disable iov and allow time for transactions to clear */
3706 if (pci_vfs_assigned(pdev)) {
3707 dev_warn(&pdev->dev,
3708 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
3709 return -EPERM;
3710 } else {
3711 pci_disable_sriov(pdev);
3712 msleep(500);
3713 }
3714 spin_lock_irqsave(&adapter->vfs_lock, flags);
3715 kfree(adapter->vf_mac_list);
3716 adapter->vf_mac_list = NULL;
3717 kfree(adapter->vf_data);
3718 adapter->vf_data = NULL;
3719 adapter->vfs_allocated_count = 0;
3720 spin_unlock_irqrestore(&adapter->vfs_lock, flags);
3721 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
3722 wrfl();
3723 msleep(100);
3724 dev_info(&pdev->dev, "IOV Disabled\n");
3725
3726 /* Re-enable DMA Coalescing flag since IOV is turned off */
3727 adapter->flags |= IGB_FLAG_DMAC;
3728 }
3729
3730 return reinit ? igb_sriov_reinit(pdev) : 0;
3731}
3732
3733static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
3734{
3735 struct net_device *netdev = pci_get_drvdata(pdev);
3736 struct igb_adapter *adapter = netdev_priv(netdev);
3737 int old_vfs = pci_num_vf(pdev);
3738 struct vf_mac_filter *mac_list;
3739 int err = 0;
3740 int num_vf_mac_filters, i;
3741
3742 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
3743 err = -EPERM;
3744 goto out;
3745 }
3746 if (!num_vfs)
3747 goto out;
3748
3749 if (old_vfs) {
3750 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
3751 old_vfs, max_vfs);
3752 adapter->vfs_allocated_count = old_vfs;
3753 } else
3754 adapter->vfs_allocated_count = num_vfs;
3755
3756 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
3757 sizeof(struct vf_data_storage), GFP_KERNEL);
3758
3759 /* if allocation failed then we do not support SR-IOV */
3760 if (!adapter->vf_data) {
3761 adapter->vfs_allocated_count = 0;
3762 err = -ENOMEM;
3763 goto out;
3764 }
3765
3766 /* Due to the limited number of RAR entries calculate potential
3767 * number of MAC filters available for the VFs. Reserve entries
3768 * for PF default MAC, PF MAC filters and at least one RAR entry
3769 * for each VF for VF MAC.
3770 */
3771 num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
3772 (1 + IGB_PF_MAC_FILTERS_RESERVED +
3773 adapter->vfs_allocated_count);
3774
3775 adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
3776 sizeof(struct vf_mac_filter),
3777 GFP_KERNEL);
3778
3779 mac_list = adapter->vf_mac_list;
3780 INIT_LIST_HEAD(&adapter->vf_macs.l);
3781
3782 if (adapter->vf_mac_list) {
3783 /* Initialize list of VF MAC filters */
3784 for (i = 0; i < num_vf_mac_filters; i++) {
3785 mac_list->vf = -1;
3786 mac_list->free = true;
3787 list_add(&mac_list->l, &adapter->vf_macs.l);
3788 mac_list++;
3789 }
3790 } else {
3791 /* If we could not allocate memory for the VF MAC filters
3792 * we can continue without this feature but warn user.
3793 */
3794 dev_err(&pdev->dev,
3795 "Unable to allocate memory for VF MAC filter list\n");
3796 }
3797
3798 dev_info(&pdev->dev, "%d VFs allocated\n",
3799 adapter->vfs_allocated_count);
3800 for (i = 0; i < adapter->vfs_allocated_count; i++)
3801 igb_vf_configure(adapter, i);
3802
3803 /* DMA Coalescing is not supported in IOV mode. */
3804 adapter->flags &= ~IGB_FLAG_DMAC;
3805
3806 if (reinit) {
3807 err = igb_sriov_reinit(pdev);
3808 if (err)
3809 goto err_out;
3810 }
3811
3812 /* only call pci_enable_sriov() if no VFs are allocated already */
3813 if (!old_vfs) {
3814 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
3815 if (err)
3816 goto err_out;
3817 }
3818
3819 goto out;
3820
3821err_out:
3822 kfree(adapter->vf_mac_list);
3823 adapter->vf_mac_list = NULL;
3824 kfree(adapter->vf_data);
3825 adapter->vf_data = NULL;
3826 adapter->vfs_allocated_count = 0;
3827out:
3828 return err;
3829}
3830
3831#endif
3832/**
3833 * igb_remove_i2c - Cleanup I2C interface
3834 * @adapter: pointer to adapter structure
3835 **/
3836static void igb_remove_i2c(struct igb_adapter *adapter)
3837{
3838 /* free the adapter bus structure */
3839 i2c_del_adapter(&adapter->i2c_adap);
3840}
3841
3842/**
3843 * igb_remove - Device Removal Routine
3844 * @pdev: PCI device information struct
3845 *
3846 * igb_remove is called by the PCI subsystem to alert the driver
3847 * that it should release a PCI device. The could be caused by a
3848 * Hot-Plug event, or because the driver is going to be removed from
3849 * memory.
3850 **/
3851static void igb_remove(struct pci_dev *pdev)
3852{
3853 struct net_device *netdev = pci_get_drvdata(pdev);
3854 struct igb_adapter *adapter = netdev_priv(netdev);
3855 struct e1000_hw *hw = &adapter->hw;
3856
3857 pm_runtime_get_noresume(&pdev->dev);
3858#ifdef CONFIG_IGB_HWMON
3859 igb_sysfs_exit(adapter);
3860#endif
3861 igb_remove_i2c(adapter);
3862 igb_ptp_stop(adapter);
3863 /* The watchdog timer may be rescheduled, so explicitly
3864 * disable watchdog from being rescheduled.
3865 */
3866 set_bit(__IGB_DOWN, &adapter->state);
3867 del_timer_sync(&adapter->watchdog_timer);
3868 del_timer_sync(&adapter->phy_info_timer);
3869
3870 cancel_work_sync(&adapter->reset_task);
3871 cancel_work_sync(&adapter->watchdog_task);
3872
3873#ifdef CONFIG_IGB_DCA
3874 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3875 dev_info(&pdev->dev, "DCA disabled\n");
3876 dca_remove_requester(&pdev->dev);
3877 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3878 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3879 }
3880#endif
3881
3882 /* Release control of h/w to f/w. If f/w is AMT enabled, this
3883 * would have already happened in close and is redundant.
3884 */
3885 igb_release_hw_control(adapter);
3886
3887#ifdef CONFIG_PCI_IOV
3888 igb_disable_sriov(pdev, false);
3889#endif
3890
3891 unregister_netdev(netdev);
3892
3893 igb_clear_interrupt_scheme(adapter);
3894
3895 pci_iounmap(pdev, adapter->io_addr);
3896 if (hw->flash_address)
3897 iounmap(hw->flash_address);
3898 pci_release_mem_regions(pdev);
3899
3900 kfree(adapter->mac_table);
3901 kfree(adapter->shadow_vfta);
3902 free_netdev(netdev);
3903
3904 pci_disable_device(pdev);
3905}
3906
3907/**
3908 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
3909 * @adapter: board private structure to initialize
3910 *
3911 * This function initializes the vf specific data storage and then attempts to
3912 * allocate the VFs. The reason for ordering it this way is because it is much
3913 * more expensive time wise to disable SR-IOV than it is to allocate and free
3914 * the memory for the VFs.
3915 **/
3916static void igb_probe_vfs(struct igb_adapter *adapter)
3917{
3918#ifdef CONFIG_PCI_IOV
3919 struct pci_dev *pdev = adapter->pdev;
3920 struct e1000_hw *hw = &adapter->hw;
3921
3922 /* Virtualization features not supported on i210 and 82580 family. */
3923 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
3924 (hw->mac.type == e1000_82580))
3925 return;
3926
3927 /* Of the below we really only want the effect of getting
3928 * IGB_FLAG_HAS_MSIX set (if available), without which
3929 * igb_enable_sriov() has no effect.
3930 */
3931 igb_set_interrupt_capability(adapter, true);
3932 igb_reset_interrupt_capability(adapter);
3933
3934 pci_sriov_set_totalvfs(pdev, 7);
3935 igb_enable_sriov(pdev, max_vfs, false);
3936
3937#endif /* CONFIG_PCI_IOV */
3938}
3939
3940unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
3941{
3942 struct e1000_hw *hw = &adapter->hw;
3943 unsigned int max_rss_queues;
3944
3945 /* Determine the maximum number of RSS queues supported. */
3946 switch (hw->mac.type) {
3947 case e1000_i211:
3948 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
3949 break;
3950 case e1000_82575:
3951 case e1000_i210:
3952 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
3953 break;
3954 case e1000_i350:
3955 /* I350 cannot do RSS and SR-IOV at the same time */
3956 if (!!adapter->vfs_allocated_count) {
3957 max_rss_queues = 1;
3958 break;
3959 }
3960 fallthrough;
3961 case e1000_82576:
3962 if (!!adapter->vfs_allocated_count) {
3963 max_rss_queues = 2;
3964 break;
3965 }
3966 fallthrough;
3967 case e1000_82580:
3968 case e1000_i354:
3969 default:
3970 max_rss_queues = IGB_MAX_RX_QUEUES;
3971 break;
3972 }
3973
3974 return max_rss_queues;
3975}
3976
3977static void igb_init_queue_configuration(struct igb_adapter *adapter)
3978{
3979 u32 max_rss_queues;
3980
3981 max_rss_queues = igb_get_max_rss_queues(adapter);
3982 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3983
3984 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3985}
3986
3987void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3988 const u32 max_rss_queues)
3989{
3990 struct e1000_hw *hw = &adapter->hw;
3991
3992 /* Determine if we need to pair queues. */
3993 switch (hw->mac.type) {
3994 case e1000_82575:
3995 case e1000_i211:
3996 /* Device supports enough interrupts without queue pairing. */
3997 break;
3998 case e1000_82576:
3999 case e1000_82580:
4000 case e1000_i350:
4001 case e1000_i354:
4002 case e1000_i210:
4003 default:
4004 /* If rss_queues > half of max_rss_queues, pair the queues in
4005 * order to conserve interrupts due to limited supply.
4006 */
4007 if (adapter->rss_queues > (max_rss_queues / 2))
4008 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
4009 else
4010 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
4011 break;
4012 }
4013}
4014
4015/**
4016 * igb_sw_init - Initialize general software structures (struct igb_adapter)
4017 * @adapter: board private structure to initialize
4018 *
4019 * igb_sw_init initializes the Adapter private data structure.
4020 * Fields are initialized based on PCI device information and
4021 * OS network device settings (MTU size).
4022 **/
4023static int igb_sw_init(struct igb_adapter *adapter)
4024{
4025 struct e1000_hw *hw = &adapter->hw;
4026 struct net_device *netdev = adapter->netdev;
4027 struct pci_dev *pdev = adapter->pdev;
4028
4029 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4030
4031 /* set default ring sizes */
4032 adapter->tx_ring_count = IGB_DEFAULT_TXD;
4033 adapter->rx_ring_count = IGB_DEFAULT_RXD;
4034
4035 /* set default ITR values */
4036 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
4037 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
4038
4039 /* set default work limits */
4040 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
4041
4042 adapter->max_frame_size = netdev->mtu + IGB_ETH_PKT_HDR_PAD;
4043 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4044
4045 spin_lock_init(&adapter->nfc_lock);
4046 spin_lock_init(&adapter->stats64_lock);
4047
4048 /* init spinlock to avoid concurrency of VF resources */
4049 spin_lock_init(&adapter->vfs_lock);
4050#ifdef CONFIG_PCI_IOV
4051 switch (hw->mac.type) {
4052 case e1000_82576:
4053 case e1000_i350:
4054 if (max_vfs > 7) {
4055 dev_warn(&pdev->dev,
4056 "Maximum of 7 VFs per PF, using max\n");
4057 max_vfs = adapter->vfs_allocated_count = 7;
4058 } else
4059 adapter->vfs_allocated_count = max_vfs;
4060 if (adapter->vfs_allocated_count)
4061 dev_warn(&pdev->dev,
4062 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
4063 break;
4064 default:
4065 break;
4066 }
4067#endif /* CONFIG_PCI_IOV */
4068
4069 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4070 adapter->flags |= IGB_FLAG_HAS_MSIX;
4071
4072 adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
4073 sizeof(struct igb_mac_addr),
4074 GFP_KERNEL);
4075 if (!adapter->mac_table)
4076 return -ENOMEM;
4077
4078 igb_probe_vfs(adapter);
4079
4080 igb_init_queue_configuration(adapter);
4081
4082 /* Setup and initialize a copy of the hw vlan table array */
4083 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
4084 GFP_KERNEL);
4085 if (!adapter->shadow_vfta)
4086 return -ENOMEM;
4087
4088 /* This call may decrease the number of queues */
4089 if (igb_init_interrupt_scheme(adapter, true)) {
4090 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4091 return -ENOMEM;
4092 }
4093
4094 /* Explicitly disable IRQ since the NIC can be in any state. */
4095 igb_irq_disable(adapter);
4096
4097 if (hw->mac.type >= e1000_i350)
4098 adapter->flags &= ~IGB_FLAG_DMAC;
4099
4100 set_bit(__IGB_DOWN, &adapter->state);
4101 return 0;
4102}
4103
4104/**
4105 * __igb_open - Called when a network interface is made active
4106 * @netdev: network interface device structure
4107 * @resuming: indicates whether we are in a resume call
4108 *
4109 * Returns 0 on success, negative value on failure
4110 *
4111 * The open entry point is called when a network interface is made
4112 * active by the system (IFF_UP). At this point all resources needed
4113 * for transmit and receive operations are allocated, the interrupt
4114 * handler is registered with the OS, the watchdog timer is started,
4115 * and the stack is notified that the interface is ready.
4116 **/
4117static int __igb_open(struct net_device *netdev, bool resuming)
4118{
4119 struct igb_adapter *adapter = netdev_priv(netdev);
4120 struct e1000_hw *hw = &adapter->hw;
4121 struct pci_dev *pdev = adapter->pdev;
4122 int err;
4123 int i;
4124
4125 /* disallow open during test */
4126 if (test_bit(__IGB_TESTING, &adapter->state)) {
4127 WARN_ON(resuming);
4128 return -EBUSY;
4129 }
4130
4131 if (!resuming)
4132 pm_runtime_get_sync(&pdev->dev);
4133
4134 netif_carrier_off(netdev);
4135
4136 /* allocate transmit descriptors */
4137 err = igb_setup_all_tx_resources(adapter);
4138 if (err)
4139 goto err_setup_tx;
4140
4141 /* allocate receive descriptors */
4142 err = igb_setup_all_rx_resources(adapter);
4143 if (err)
4144 goto err_setup_rx;
4145
4146 igb_power_up_link(adapter);
4147
4148 /* before we allocate an interrupt, we must be ready to handle it.
4149 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4150 * as soon as we call pci_request_irq, so we have to setup our
4151 * clean_rx handler before we do so.
4152 */
4153 igb_configure(adapter);
4154
4155 err = igb_request_irq(adapter);
4156 if (err)
4157 goto err_req_irq;
4158
4159 /* Notify the stack of the actual queue counts. */
4160 err = netif_set_real_num_tx_queues(adapter->netdev,
4161 adapter->num_tx_queues);
4162 if (err)
4163 goto err_set_queues;
4164
4165 err = netif_set_real_num_rx_queues(adapter->netdev,
4166 adapter->num_rx_queues);
4167 if (err)
4168 goto err_set_queues;
4169
4170 /* From here on the code is the same as igb_up() */
4171 clear_bit(__IGB_DOWN, &adapter->state);
4172
4173 for (i = 0; i < adapter->num_q_vectors; i++)
4174 napi_enable(&(adapter->q_vector[i]->napi));
4175
4176 /* Clear any pending interrupts. */
4177 rd32(E1000_TSICR);
4178 rd32(E1000_ICR);
4179
4180 igb_irq_enable(adapter);
4181
4182 /* notify VFs that reset has been completed */
4183 if (adapter->vfs_allocated_count) {
4184 u32 reg_data = rd32(E1000_CTRL_EXT);
4185
4186 reg_data |= E1000_CTRL_EXT_PFRSTD;
4187 wr32(E1000_CTRL_EXT, reg_data);
4188 }
4189
4190 netif_tx_start_all_queues(netdev);
4191
4192 if (!resuming)
4193 pm_runtime_put(&pdev->dev);
4194
4195 /* start the watchdog. */
4196 hw->mac.get_link_status = 1;
4197 schedule_work(&adapter->watchdog_task);
4198
4199 return 0;
4200
4201err_set_queues:
4202 igb_free_irq(adapter);
4203err_req_irq:
4204 igb_release_hw_control(adapter);
4205 igb_power_down_link(adapter);
4206 igb_free_all_rx_resources(adapter);
4207err_setup_rx:
4208 igb_free_all_tx_resources(adapter);
4209err_setup_tx:
4210 igb_reset(adapter);
4211 if (!resuming)
4212 pm_runtime_put(&pdev->dev);
4213
4214 return err;
4215}
4216
4217int igb_open(struct net_device *netdev)
4218{
4219 return __igb_open(netdev, false);
4220}
4221
4222/**
4223 * __igb_close - Disables a network interface
4224 * @netdev: network interface device structure
4225 * @suspending: indicates we are in a suspend call
4226 *
4227 * Returns 0, this is not allowed to fail
4228 *
4229 * The close entry point is called when an interface is de-activated
4230 * by the OS. The hardware is still under the driver's control, but
4231 * needs to be disabled. A global MAC reset is issued to stop the
4232 * hardware, and all transmit and receive resources are freed.
4233 **/
4234static int __igb_close(struct net_device *netdev, bool suspending)
4235{
4236 struct igb_adapter *adapter = netdev_priv(netdev);
4237 struct pci_dev *pdev = adapter->pdev;
4238
4239 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
4240
4241 if (!suspending)
4242 pm_runtime_get_sync(&pdev->dev);
4243
4244 igb_down(adapter);
4245 igb_free_irq(adapter);
4246
4247 igb_free_all_tx_resources(adapter);
4248 igb_free_all_rx_resources(adapter);
4249
4250 if (!suspending)
4251 pm_runtime_put_sync(&pdev->dev);
4252 return 0;
4253}
4254
4255int igb_close(struct net_device *netdev)
4256{
4257 if (netif_device_present(netdev) || netdev->dismantle)
4258 return __igb_close(netdev, false);
4259 return 0;
4260}
4261
4262/**
4263 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
4264 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4265 *
4266 * Return 0 on success, negative on failure
4267 **/
4268int igb_setup_tx_resources(struct igb_ring *tx_ring)
4269{
4270 struct device *dev = tx_ring->dev;
4271 int size;
4272
4273 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
4274
4275 tx_ring->tx_buffer_info = vmalloc(size);
4276 if (!tx_ring->tx_buffer_info)
4277 goto err;
4278
4279 /* round up to nearest 4K */
4280 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
4281 tx_ring->size = ALIGN(tx_ring->size, 4096);
4282
4283 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4284 &tx_ring->dma, GFP_KERNEL);
4285 if (!tx_ring->desc)
4286 goto err;
4287
4288 tx_ring->next_to_use = 0;
4289 tx_ring->next_to_clean = 0;
4290
4291 return 0;
4292
4293err:
4294 vfree(tx_ring->tx_buffer_info);
4295 tx_ring->tx_buffer_info = NULL;
4296 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4297 return -ENOMEM;
4298}
4299
4300/**
4301 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
4302 * (Descriptors) for all queues
4303 * @adapter: board private structure
4304 *
4305 * Return 0 on success, negative on failure
4306 **/
4307static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
4308{
4309 struct pci_dev *pdev = adapter->pdev;
4310 int i, err = 0;
4311
4312 for (i = 0; i < adapter->num_tx_queues; i++) {
4313 err = igb_setup_tx_resources(adapter->tx_ring[i]);
4314 if (err) {
4315 dev_err(&pdev->dev,
4316 "Allocation for Tx Queue %u failed\n", i);
4317 for (i--; i >= 0; i--)
4318 igb_free_tx_resources(adapter->tx_ring[i]);
4319 break;
4320 }
4321 }
4322
4323 return err;
4324}
4325
4326/**
4327 * igb_setup_tctl - configure the transmit control registers
4328 * @adapter: Board private structure
4329 **/
4330void igb_setup_tctl(struct igb_adapter *adapter)
4331{
4332 struct e1000_hw *hw = &adapter->hw;
4333 u32 tctl;
4334
4335 /* disable queue 0 which is enabled by default on 82575 and 82576 */
4336 wr32(E1000_TXDCTL(0), 0);
4337
4338 /* Program the Transmit Control Register */
4339 tctl = rd32(E1000_TCTL);
4340 tctl &= ~E1000_TCTL_CT;
4341 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
4342 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
4343
4344 igb_config_collision_dist(hw);
4345
4346 /* Enable transmits */
4347 tctl |= E1000_TCTL_EN;
4348
4349 wr32(E1000_TCTL, tctl);
4350}
4351
4352/**
4353 * igb_configure_tx_ring - Configure transmit ring after Reset
4354 * @adapter: board private structure
4355 * @ring: tx ring to configure
4356 *
4357 * Configure a transmit ring after a reset.
4358 **/
4359void igb_configure_tx_ring(struct igb_adapter *adapter,
4360 struct igb_ring *ring)
4361{
4362 struct e1000_hw *hw = &adapter->hw;
4363 u32 txdctl = 0;
4364 u64 tdba = ring->dma;
4365 int reg_idx = ring->reg_idx;
4366
4367 wr32(E1000_TDLEN(reg_idx),
4368 ring->count * sizeof(union e1000_adv_tx_desc));
4369 wr32(E1000_TDBAL(reg_idx),
4370 tdba & 0x00000000ffffffffULL);
4371 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
4372
4373 ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
4374 wr32(E1000_TDH(reg_idx), 0);
4375 writel(0, ring->tail);
4376
4377 txdctl |= IGB_TX_PTHRESH;
4378 txdctl |= IGB_TX_HTHRESH << 8;
4379 txdctl |= IGB_TX_WTHRESH << 16;
4380
4381 /* reinitialize tx_buffer_info */
4382 memset(ring->tx_buffer_info, 0,
4383 sizeof(struct igb_tx_buffer) * ring->count);
4384
4385 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
4386 wr32(E1000_TXDCTL(reg_idx), txdctl);
4387}
4388
4389/**
4390 * igb_configure_tx - Configure transmit Unit after Reset
4391 * @adapter: board private structure
4392 *
4393 * Configure the Tx unit of the MAC after a reset.
4394 **/
4395static void igb_configure_tx(struct igb_adapter *adapter)
4396{
4397 struct e1000_hw *hw = &adapter->hw;
4398 int i;
4399
4400 /* disable the queues */
4401 for (i = 0; i < adapter->num_tx_queues; i++)
4402 wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
4403
4404 wrfl();
4405 usleep_range(10000, 20000);
4406
4407 for (i = 0; i < adapter->num_tx_queues; i++)
4408 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
4409}
4410
4411/**
4412 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
4413 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
4414 *
4415 * Returns 0 on success, negative on failure
4416 **/
4417int igb_setup_rx_resources(struct igb_ring *rx_ring)
4418{
4419 struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
4420 struct device *dev = rx_ring->dev;
4421 int size, res;
4422
4423 /* XDP RX-queue info */
4424 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
4425 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4426 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
4427 rx_ring->queue_index, 0);
4428 if (res < 0) {
4429 dev_err(dev, "Failed to register xdp_rxq index %u\n",
4430 rx_ring->queue_index);
4431 return res;
4432 }
4433
4434 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
4435
4436 rx_ring->rx_buffer_info = vmalloc(size);
4437 if (!rx_ring->rx_buffer_info)
4438 goto err;
4439
4440 /* Round up to nearest 4K */
4441 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
4442 rx_ring->size = ALIGN(rx_ring->size, 4096);
4443
4444 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
4445 &rx_ring->dma, GFP_KERNEL);
4446 if (!rx_ring->desc)
4447 goto err;
4448
4449 rx_ring->next_to_alloc = 0;
4450 rx_ring->next_to_clean = 0;
4451 rx_ring->next_to_use = 0;
4452
4453 rx_ring->xdp_prog = adapter->xdp_prog;
4454
4455 return 0;
4456
4457err:
4458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4459 vfree(rx_ring->rx_buffer_info);
4460 rx_ring->rx_buffer_info = NULL;
4461 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
4462 return -ENOMEM;
4463}
4464
4465/**
4466 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
4467 * (Descriptors) for all queues
4468 * @adapter: board private structure
4469 *
4470 * Return 0 on success, negative on failure
4471 **/
4472static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
4473{
4474 struct pci_dev *pdev = adapter->pdev;
4475 int i, err = 0;
4476
4477 for (i = 0; i < adapter->num_rx_queues; i++) {
4478 err = igb_setup_rx_resources(adapter->rx_ring[i]);
4479 if (err) {
4480 dev_err(&pdev->dev,
4481 "Allocation for Rx Queue %u failed\n", i);
4482 for (i--; i >= 0; i--)
4483 igb_free_rx_resources(adapter->rx_ring[i]);
4484 break;
4485 }
4486 }
4487
4488 return err;
4489}
4490
4491/**
4492 * igb_setup_mrqc - configure the multiple receive queue control registers
4493 * @adapter: Board private structure
4494 **/
4495static void igb_setup_mrqc(struct igb_adapter *adapter)
4496{
4497 struct e1000_hw *hw = &adapter->hw;
4498 u32 mrqc, rxcsum;
4499 u32 j, num_rx_queues;
4500 u32 rss_key[10];
4501
4502 netdev_rss_key_fill(rss_key, sizeof(rss_key));
4503 for (j = 0; j < 10; j++)
4504 wr32(E1000_RSSRK(j), rss_key[j]);
4505
4506 num_rx_queues = adapter->rss_queues;
4507
4508 switch (hw->mac.type) {
4509 case e1000_82576:
4510 /* 82576 supports 2 RSS queues for SR-IOV */
4511 if (adapter->vfs_allocated_count)
4512 num_rx_queues = 2;
4513 break;
4514 default:
4515 break;
4516 }
4517
4518 if (adapter->rss_indir_tbl_init != num_rx_queues) {
4519 for (j = 0; j < IGB_RETA_SIZE; j++)
4520 adapter->rss_indir_tbl[j] =
4521 (j * num_rx_queues) / IGB_RETA_SIZE;
4522 adapter->rss_indir_tbl_init = num_rx_queues;
4523 }
4524 igb_write_rss_indir_tbl(adapter);
4525
4526 /* Disable raw packet checksumming so that RSS hash is placed in
4527 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
4528 * offloads as they are enabled by default
4529 */
4530 rxcsum = rd32(E1000_RXCSUM);
4531 rxcsum |= E1000_RXCSUM_PCSD;
4532
4533 if (adapter->hw.mac.type >= e1000_82576)
4534 /* Enable Receive Checksum Offload for SCTP */
4535 rxcsum |= E1000_RXCSUM_CRCOFL;
4536
4537 /* Don't need to set TUOFL or IPOFL, they default to 1 */
4538 wr32(E1000_RXCSUM, rxcsum);
4539
4540 /* Generate RSS hash based on packet types, TCP/UDP
4541 * port numbers and/or IPv4/v6 src and dst addresses
4542 */
4543 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
4544 E1000_MRQC_RSS_FIELD_IPV4_TCP |
4545 E1000_MRQC_RSS_FIELD_IPV6 |
4546 E1000_MRQC_RSS_FIELD_IPV6_TCP |
4547 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
4548
4549 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
4550 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
4551 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
4552 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
4553
4554 /* If VMDq is enabled then we set the appropriate mode for that, else
4555 * we default to RSS so that an RSS hash is calculated per packet even
4556 * if we are only using one queue
4557 */
4558 if (adapter->vfs_allocated_count) {
4559 if (hw->mac.type > e1000_82575) {
4560 /* Set the default pool for the PF's first queue */
4561 u32 vtctl = rd32(E1000_VT_CTL);
4562
4563 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
4564 E1000_VT_CTL_DISABLE_DEF_POOL);
4565 vtctl |= adapter->vfs_allocated_count <<
4566 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
4567 wr32(E1000_VT_CTL, vtctl);
4568 }
4569 if (adapter->rss_queues > 1)
4570 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
4571 else
4572 mrqc |= E1000_MRQC_ENABLE_VMDQ;
4573 } else {
4574 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
4575 }
4576 igb_vmm_control(adapter);
4577
4578 wr32(E1000_MRQC, mrqc);
4579}
4580
4581/**
4582 * igb_setup_rctl - configure the receive control registers
4583 * @adapter: Board private structure
4584 **/
4585void igb_setup_rctl(struct igb_adapter *adapter)
4586{
4587 struct e1000_hw *hw = &adapter->hw;
4588 u32 rctl;
4589
4590 rctl = rd32(E1000_RCTL);
4591
4592 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
4593 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
4594
4595 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
4596 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
4597
4598 /* enable stripping of CRC. It's unlikely this will break BMC
4599 * redirection as it did with e1000. Newer features require
4600 * that the HW strips the CRC.
4601 */
4602 rctl |= E1000_RCTL_SECRC;
4603
4604 /* disable store bad packets and clear size bits. */
4605 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
4606
4607 /* enable LPE to allow for reception of jumbo frames */
4608 rctl |= E1000_RCTL_LPE;
4609
4610 /* disable queue 0 to prevent tail write w/o re-config */
4611 wr32(E1000_RXDCTL(0), 0);
4612
4613 /* Attention!!! For SR-IOV PF driver operations you must enable
4614 * queue drop for all VF and PF queues to prevent head of line blocking
4615 * if an un-trusted VF does not provide descriptors to hardware.
4616 */
4617 if (adapter->vfs_allocated_count) {
4618 /* set all queue drop enable bits */
4619 wr32(E1000_QDE, ALL_QUEUES);
4620 }
4621
4622 /* This is useful for sniffing bad packets. */
4623 if (adapter->netdev->features & NETIF_F_RXALL) {
4624 /* UPE and MPE will be handled by normal PROMISC logic
4625 * in e1000e_set_rx_mode
4626 */
4627 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
4628 E1000_RCTL_BAM | /* RX All Bcast Pkts */
4629 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
4630
4631 rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
4632 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
4633 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
4634 * and that breaks VLANs.
4635 */
4636 }
4637
4638 wr32(E1000_RCTL, rctl);
4639}
4640
4641static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
4642 int vfn)
4643{
4644 struct e1000_hw *hw = &adapter->hw;
4645 u32 vmolr;
4646
4647 if (size > MAX_JUMBO_FRAME_SIZE)
4648 size = MAX_JUMBO_FRAME_SIZE;
4649
4650 vmolr = rd32(E1000_VMOLR(vfn));
4651 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4652 vmolr |= size | E1000_VMOLR_LPE;
4653 wr32(E1000_VMOLR(vfn), vmolr);
4654
4655 return 0;
4656}
4657
4658static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
4659 int vfn, bool enable)
4660{
4661 struct e1000_hw *hw = &adapter->hw;
4662 u32 val, reg;
4663
4664 if (hw->mac.type < e1000_82576)
4665 return;
4666
4667 if (hw->mac.type == e1000_i350)
4668 reg = E1000_DVMOLR(vfn);
4669 else
4670 reg = E1000_VMOLR(vfn);
4671
4672 val = rd32(reg);
4673 if (enable)
4674 val |= E1000_VMOLR_STRVLAN;
4675 else
4676 val &= ~(E1000_VMOLR_STRVLAN);
4677 wr32(reg, val);
4678}
4679
4680static inline void igb_set_vmolr(struct igb_adapter *adapter,
4681 int vfn, bool aupe)
4682{
4683 struct e1000_hw *hw = &adapter->hw;
4684 u32 vmolr;
4685
4686 /* This register exists only on 82576 and newer so if we are older then
4687 * we should exit and do nothing
4688 */
4689 if (hw->mac.type < e1000_82576)
4690 return;
4691
4692 vmolr = rd32(E1000_VMOLR(vfn));
4693 if (aupe)
4694 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
4695 else
4696 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
4697
4698 /* clear all bits that might not be set */
4699 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
4700
4701 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
4702 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
4703 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
4704 * multicast packets
4705 */
4706 if (vfn <= adapter->vfs_allocated_count)
4707 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
4708
4709 wr32(E1000_VMOLR(vfn), vmolr);
4710}
4711
4712/**
4713 * igb_setup_srrctl - configure the split and replication receive control
4714 * registers
4715 * @adapter: Board private structure
4716 * @ring: receive ring to be configured
4717 **/
4718void igb_setup_srrctl(struct igb_adapter *adapter, struct igb_ring *ring)
4719{
4720 struct e1000_hw *hw = &adapter->hw;
4721 int reg_idx = ring->reg_idx;
4722 u32 srrctl = 0;
4723
4724 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
4725 if (ring_uses_large_buffer(ring))
4726 srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4727 else
4728 srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
4729 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
4730 if (hw->mac.type >= e1000_82580)
4731 srrctl |= E1000_SRRCTL_TIMESTAMP;
4732 /* Only set Drop Enable if VFs allocated, or we are supporting multiple
4733 * queues and rx flow control is disabled
4734 */
4735 if (adapter->vfs_allocated_count ||
4736 (!(hw->fc.current_mode & e1000_fc_rx_pause) &&
4737 adapter->num_rx_queues > 1))
4738 srrctl |= E1000_SRRCTL_DROP_EN;
4739
4740 wr32(E1000_SRRCTL(reg_idx), srrctl);
4741}
4742
4743/**
4744 * igb_configure_rx_ring - Configure a receive ring after Reset
4745 * @adapter: board private structure
4746 * @ring: receive ring to be configured
4747 *
4748 * Configure the Rx unit of the MAC after a reset.
4749 **/
4750void igb_configure_rx_ring(struct igb_adapter *adapter,
4751 struct igb_ring *ring)
4752{
4753 struct e1000_hw *hw = &adapter->hw;
4754 union e1000_adv_rx_desc *rx_desc;
4755 u64 rdba = ring->dma;
4756 int reg_idx = ring->reg_idx;
4757 u32 rxdctl = 0;
4758
4759 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
4760 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
4761 MEM_TYPE_PAGE_SHARED, NULL));
4762
4763 /* disable the queue */
4764 wr32(E1000_RXDCTL(reg_idx), 0);
4765
4766 /* Set DMA base address registers */
4767 wr32(E1000_RDBAL(reg_idx),
4768 rdba & 0x00000000ffffffffULL);
4769 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
4770 wr32(E1000_RDLEN(reg_idx),
4771 ring->count * sizeof(union e1000_adv_rx_desc));
4772
4773 /* initialize head and tail */
4774 ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
4775 wr32(E1000_RDH(reg_idx), 0);
4776 writel(0, ring->tail);
4777
4778 /* set descriptor configuration */
4779 igb_setup_srrctl(adapter, ring);
4780
4781 /* set filtering for VMDQ pools */
4782 igb_set_vmolr(adapter, reg_idx & 0x7, true);
4783
4784 rxdctl |= IGB_RX_PTHRESH;
4785 rxdctl |= IGB_RX_HTHRESH << 8;
4786 rxdctl |= IGB_RX_WTHRESH << 16;
4787
4788 /* initialize rx_buffer_info */
4789 memset(ring->rx_buffer_info, 0,
4790 sizeof(struct igb_rx_buffer) * ring->count);
4791
4792 /* initialize Rx descriptor 0 */
4793 rx_desc = IGB_RX_DESC(ring, 0);
4794 rx_desc->wb.upper.length = 0;
4795
4796 /* enable receive descriptor fetching */
4797 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
4798 wr32(E1000_RXDCTL(reg_idx), rxdctl);
4799}
4800
4801static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
4802 struct igb_ring *rx_ring)
4803{
4804#if (PAGE_SIZE < 8192)
4805 struct e1000_hw *hw = &adapter->hw;
4806#endif
4807
4808 /* set build_skb and buffer size flags */
4809 clear_ring_build_skb_enabled(rx_ring);
4810 clear_ring_uses_large_buffer(rx_ring);
4811
4812 if (adapter->flags & IGB_FLAG_RX_LEGACY)
4813 return;
4814
4815 set_ring_build_skb_enabled(rx_ring);
4816
4817#if (PAGE_SIZE < 8192)
4818 if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
4819 IGB_2K_TOO_SMALL_WITH_PADDING ||
4820 rd32(E1000_RCTL) & E1000_RCTL_SBP)
4821 set_ring_uses_large_buffer(rx_ring);
4822#endif
4823}
4824
4825/**
4826 * igb_configure_rx - Configure receive Unit after Reset
4827 * @adapter: board private structure
4828 *
4829 * Configure the Rx unit of the MAC after a reset.
4830 **/
4831static void igb_configure_rx(struct igb_adapter *adapter)
4832{
4833 int i;
4834
4835 /* set the correct pool for the PF default MAC address in entry 0 */
4836 igb_set_default_mac_filter(adapter);
4837
4838 /* Setup the HW Rx Head and Tail Descriptor Pointers and
4839 * the Base and Length of the Rx Descriptor Ring
4840 */
4841 for (i = 0; i < adapter->num_rx_queues; i++) {
4842 struct igb_ring *rx_ring = adapter->rx_ring[i];
4843
4844 igb_set_rx_buffer_len(adapter, rx_ring);
4845 igb_configure_rx_ring(adapter, rx_ring);
4846 }
4847}
4848
4849/**
4850 * igb_free_tx_resources - Free Tx Resources per Queue
4851 * @tx_ring: Tx descriptor ring for a specific queue
4852 *
4853 * Free all transmit software resources
4854 **/
4855void igb_free_tx_resources(struct igb_ring *tx_ring)
4856{
4857 igb_clean_tx_ring(tx_ring);
4858
4859 vfree(tx_ring->tx_buffer_info);
4860 tx_ring->tx_buffer_info = NULL;
4861
4862 /* if not set, then don't free */
4863 if (!tx_ring->desc)
4864 return;
4865
4866 dma_free_coherent(tx_ring->dev, tx_ring->size,
4867 tx_ring->desc, tx_ring->dma);
4868
4869 tx_ring->desc = NULL;
4870}
4871
4872/**
4873 * igb_free_all_tx_resources - Free Tx Resources for All Queues
4874 * @adapter: board private structure
4875 *
4876 * Free all transmit software resources
4877 **/
4878static void igb_free_all_tx_resources(struct igb_adapter *adapter)
4879{
4880 int i;
4881
4882 for (i = 0; i < adapter->num_tx_queues; i++)
4883 if (adapter->tx_ring[i])
4884 igb_free_tx_resources(adapter->tx_ring[i]);
4885}
4886
4887/**
4888 * igb_clean_tx_ring - Free Tx Buffers
4889 * @tx_ring: ring to be cleaned
4890 **/
4891static void igb_clean_tx_ring(struct igb_ring *tx_ring)
4892{
4893 u16 i = tx_ring->next_to_clean;
4894 struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
4895
4896 while (i != tx_ring->next_to_use) {
4897 union e1000_adv_tx_desc *eop_desc, *tx_desc;
4898
4899 /* Free all the Tx ring sk_buffs or xdp frames */
4900 if (tx_buffer->type == IGB_TYPE_SKB)
4901 dev_kfree_skb_any(tx_buffer->skb);
4902 else
4903 xdp_return_frame(tx_buffer->xdpf);
4904
4905 /* unmap skb header data */
4906 dma_unmap_single(tx_ring->dev,
4907 dma_unmap_addr(tx_buffer, dma),
4908 dma_unmap_len(tx_buffer, len),
4909 DMA_TO_DEVICE);
4910
4911 /* check for eop_desc to determine the end of the packet */
4912 eop_desc = tx_buffer->next_to_watch;
4913 tx_desc = IGB_TX_DESC(tx_ring, i);
4914
4915 /* unmap remaining buffers */
4916 while (tx_desc != eop_desc) {
4917 tx_buffer++;
4918 tx_desc++;
4919 i++;
4920 if (unlikely(i == tx_ring->count)) {
4921 i = 0;
4922 tx_buffer = tx_ring->tx_buffer_info;
4923 tx_desc = IGB_TX_DESC(tx_ring, 0);
4924 }
4925
4926 /* unmap any remaining paged data */
4927 if (dma_unmap_len(tx_buffer, len))
4928 dma_unmap_page(tx_ring->dev,
4929 dma_unmap_addr(tx_buffer, dma),
4930 dma_unmap_len(tx_buffer, len),
4931 DMA_TO_DEVICE);
4932 }
4933
4934 tx_buffer->next_to_watch = NULL;
4935
4936 /* move us one more past the eop_desc for start of next pkt */
4937 tx_buffer++;
4938 i++;
4939 if (unlikely(i == tx_ring->count)) {
4940 i = 0;
4941 tx_buffer = tx_ring->tx_buffer_info;
4942 }
4943 }
4944
4945 /* reset BQL for queue */
4946 netdev_tx_reset_queue(txring_txq(tx_ring));
4947
4948 /* reset next_to_use and next_to_clean */
4949 tx_ring->next_to_use = 0;
4950 tx_ring->next_to_clean = 0;
4951}
4952
4953/**
4954 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
4955 * @adapter: board private structure
4956 **/
4957static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
4958{
4959 int i;
4960
4961 for (i = 0; i < adapter->num_tx_queues; i++)
4962 if (adapter->tx_ring[i])
4963 igb_clean_tx_ring(adapter->tx_ring[i]);
4964}
4965
4966/**
4967 * igb_free_rx_resources - Free Rx Resources
4968 * @rx_ring: ring to clean the resources from
4969 *
4970 * Free all receive software resources
4971 **/
4972void igb_free_rx_resources(struct igb_ring *rx_ring)
4973{
4974 igb_clean_rx_ring(rx_ring);
4975
4976 rx_ring->xdp_prog = NULL;
4977 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
4978 vfree(rx_ring->rx_buffer_info);
4979 rx_ring->rx_buffer_info = NULL;
4980
4981 /* if not set, then don't free */
4982 if (!rx_ring->desc)
4983 return;
4984
4985 dma_free_coherent(rx_ring->dev, rx_ring->size,
4986 rx_ring->desc, rx_ring->dma);
4987
4988 rx_ring->desc = NULL;
4989}
4990
4991/**
4992 * igb_free_all_rx_resources - Free Rx Resources for All Queues
4993 * @adapter: board private structure
4994 *
4995 * Free all receive software resources
4996 **/
4997static void igb_free_all_rx_resources(struct igb_adapter *adapter)
4998{
4999 int i;
5000
5001 for (i = 0; i < adapter->num_rx_queues; i++)
5002 if (adapter->rx_ring[i])
5003 igb_free_rx_resources(adapter->rx_ring[i]);
5004}
5005
5006/**
5007 * igb_clean_rx_ring - Free Rx Buffers per Queue
5008 * @rx_ring: ring to free buffers from
5009 **/
5010static void igb_clean_rx_ring(struct igb_ring *rx_ring)
5011{
5012 u16 i = rx_ring->next_to_clean;
5013
5014 dev_kfree_skb(rx_ring->skb);
5015 rx_ring->skb = NULL;
5016
5017 /* Free all the Rx ring sk_buffs */
5018 while (i != rx_ring->next_to_alloc) {
5019 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
5020
5021 /* Invalidate cache lines that may have been written to by
5022 * device so that we avoid corrupting memory.
5023 */
5024 dma_sync_single_range_for_cpu(rx_ring->dev,
5025 buffer_info->dma,
5026 buffer_info->page_offset,
5027 igb_rx_bufsz(rx_ring),
5028 DMA_FROM_DEVICE);
5029
5030 /* free resources associated with mapping */
5031 dma_unmap_page_attrs(rx_ring->dev,
5032 buffer_info->dma,
5033 igb_rx_pg_size(rx_ring),
5034 DMA_FROM_DEVICE,
5035 IGB_RX_DMA_ATTR);
5036 __page_frag_cache_drain(buffer_info->page,
5037 buffer_info->pagecnt_bias);
5038
5039 i++;
5040 if (i == rx_ring->count)
5041 i = 0;
5042 }
5043
5044 rx_ring->next_to_alloc = 0;
5045 rx_ring->next_to_clean = 0;
5046 rx_ring->next_to_use = 0;
5047}
5048
5049/**
5050 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
5051 * @adapter: board private structure
5052 **/
5053static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
5054{
5055 int i;
5056
5057 for (i = 0; i < adapter->num_rx_queues; i++)
5058 if (adapter->rx_ring[i])
5059 igb_clean_rx_ring(adapter->rx_ring[i]);
5060}
5061
5062/**
5063 * igb_set_mac - Change the Ethernet Address of the NIC
5064 * @netdev: network interface device structure
5065 * @p: pointer to an address structure
5066 *
5067 * Returns 0 on success, negative on failure
5068 **/
5069static int igb_set_mac(struct net_device *netdev, void *p)
5070{
5071 struct igb_adapter *adapter = netdev_priv(netdev);
5072 struct e1000_hw *hw = &adapter->hw;
5073 struct sockaddr *addr = p;
5074
5075 if (!is_valid_ether_addr(addr->sa_data))
5076 return -EADDRNOTAVAIL;
5077
5078 eth_hw_addr_set(netdev, addr->sa_data);
5079 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5080
5081 /* set the correct pool for the new PF MAC address in entry 0 */
5082 igb_set_default_mac_filter(adapter);
5083
5084 return 0;
5085}
5086
5087/**
5088 * igb_write_mc_addr_list - write multicast addresses to MTA
5089 * @netdev: network interface device structure
5090 *
5091 * Writes multicast address list to the MTA hash table.
5092 * Returns: -ENOMEM on failure
5093 * 0 on no addresses written
5094 * X on writing X addresses to MTA
5095 **/
5096static int igb_write_mc_addr_list(struct net_device *netdev)
5097{
5098 struct igb_adapter *adapter = netdev_priv(netdev);
5099 struct e1000_hw *hw = &adapter->hw;
5100 struct netdev_hw_addr *ha;
5101 u8 *mta_list;
5102 int i;
5103
5104 if (netdev_mc_empty(netdev)) {
5105 /* nothing to program, so clear mc list */
5106 igb_update_mc_addr_list(hw, NULL, 0);
5107 igb_restore_vf_multicasts(adapter);
5108 return 0;
5109 }
5110
5111 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
5112 if (!mta_list)
5113 return -ENOMEM;
5114
5115 /* The shared function expects a packed array of only addresses. */
5116 i = 0;
5117 netdev_for_each_mc_addr(ha, netdev)
5118 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
5119
5120 igb_update_mc_addr_list(hw, mta_list, i);
5121 kfree(mta_list);
5122
5123 return netdev_mc_count(netdev);
5124}
5125
5126static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
5127{
5128 struct e1000_hw *hw = &adapter->hw;
5129 u32 i, pf_id;
5130
5131 switch (hw->mac.type) {
5132 case e1000_i210:
5133 case e1000_i211:
5134 case e1000_i350:
5135 /* VLAN filtering needed for VLAN prio filter */
5136 if (adapter->netdev->features & NETIF_F_NTUPLE)
5137 break;
5138 fallthrough;
5139 case e1000_82576:
5140 case e1000_82580:
5141 case e1000_i354:
5142 /* VLAN filtering needed for pool filtering */
5143 if (adapter->vfs_allocated_count)
5144 break;
5145 fallthrough;
5146 default:
5147 return 1;
5148 }
5149
5150 /* We are already in VLAN promisc, nothing to do */
5151 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
5152 return 0;
5153
5154 if (!adapter->vfs_allocated_count)
5155 goto set_vfta;
5156
5157 /* Add PF to all active pools */
5158 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5159
5160 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5161 u32 vlvf = rd32(E1000_VLVF(i));
5162
5163 vlvf |= BIT(pf_id);
5164 wr32(E1000_VLVF(i), vlvf);
5165 }
5166
5167set_vfta:
5168 /* Set all bits in the VLAN filter table array */
5169 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
5170 hw->mac.ops.write_vfta(hw, i, ~0U);
5171
5172 /* Set flag so we don't redo unnecessary work */
5173 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
5174
5175 return 0;
5176}
5177
5178#define VFTA_BLOCK_SIZE 8
5179static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
5180{
5181 struct e1000_hw *hw = &adapter->hw;
5182 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
5183 u32 vid_start = vfta_offset * 32;
5184 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
5185 u32 i, vid, word, bits, pf_id;
5186
5187 /* guarantee that we don't scrub out management VLAN */
5188 vid = adapter->mng_vlan_id;
5189 if (vid >= vid_start && vid < vid_end)
5190 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5191
5192 if (!adapter->vfs_allocated_count)
5193 goto set_vfta;
5194
5195 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
5196
5197 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
5198 u32 vlvf = rd32(E1000_VLVF(i));
5199
5200 /* pull VLAN ID from VLVF */
5201 vid = vlvf & VLAN_VID_MASK;
5202
5203 /* only concern ourselves with a certain range */
5204 if (vid < vid_start || vid >= vid_end)
5205 continue;
5206
5207 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
5208 /* record VLAN ID in VFTA */
5209 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
5210
5211 /* if PF is part of this then continue */
5212 if (test_bit(vid, adapter->active_vlans))
5213 continue;
5214 }
5215
5216 /* remove PF from the pool */
5217 bits = ~BIT(pf_id);
5218 bits &= rd32(E1000_VLVF(i));
5219 wr32(E1000_VLVF(i), bits);
5220 }
5221
5222set_vfta:
5223 /* extract values from active_vlans and write back to VFTA */
5224 for (i = VFTA_BLOCK_SIZE; i--;) {
5225 vid = (vfta_offset + i) * 32;
5226 word = vid / BITS_PER_LONG;
5227 bits = vid % BITS_PER_LONG;
5228
5229 vfta[i] |= adapter->active_vlans[word] >> bits;
5230
5231 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
5232 }
5233}
5234
5235static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
5236{
5237 u32 i;
5238
5239 /* We are not in VLAN promisc, nothing to do */
5240 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
5241 return;
5242
5243 /* Set flag so we don't redo unnecessary work */
5244 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
5245
5246 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
5247 igb_scrub_vfta(adapter, i);
5248}
5249
5250/**
5251 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
5252 * @netdev: network interface device structure
5253 *
5254 * The set_rx_mode entry point is called whenever the unicast or multicast
5255 * address lists or the network interface flags are updated. This routine is
5256 * responsible for configuring the hardware for proper unicast, multicast,
5257 * promiscuous mode, and all-multi behavior.
5258 **/
5259static void igb_set_rx_mode(struct net_device *netdev)
5260{
5261 struct igb_adapter *adapter = netdev_priv(netdev);
5262 struct e1000_hw *hw = &adapter->hw;
5263 unsigned int vfn = adapter->vfs_allocated_count;
5264 u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
5265 int count;
5266
5267 /* Check for Promiscuous and All Multicast modes */
5268 if (netdev->flags & IFF_PROMISC) {
5269 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
5270 vmolr |= E1000_VMOLR_MPME;
5271
5272 /* enable use of UTA filter to force packets to default pool */
5273 if (hw->mac.type == e1000_82576)
5274 vmolr |= E1000_VMOLR_ROPE;
5275 } else {
5276 if (netdev->flags & IFF_ALLMULTI) {
5277 rctl |= E1000_RCTL_MPE;
5278 vmolr |= E1000_VMOLR_MPME;
5279 } else {
5280 /* Write addresses to the MTA, if the attempt fails
5281 * then we should just turn on promiscuous mode so
5282 * that we can at least receive multicast traffic
5283 */
5284 count = igb_write_mc_addr_list(netdev);
5285 if (count < 0) {
5286 rctl |= E1000_RCTL_MPE;
5287 vmolr |= E1000_VMOLR_MPME;
5288 } else if (count) {
5289 vmolr |= E1000_VMOLR_ROMPE;
5290 }
5291 }
5292 }
5293
5294 /* Write addresses to available RAR registers, if there is not
5295 * sufficient space to store all the addresses then enable
5296 * unicast promiscuous mode
5297 */
5298 if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
5299 rctl |= E1000_RCTL_UPE;
5300 vmolr |= E1000_VMOLR_ROPE;
5301 }
5302
5303 /* enable VLAN filtering by default */
5304 rctl |= E1000_RCTL_VFE;
5305
5306 /* disable VLAN filtering for modes that require it */
5307 if ((netdev->flags & IFF_PROMISC) ||
5308 (netdev->features & NETIF_F_RXALL)) {
5309 /* if we fail to set all rules then just clear VFE */
5310 if (igb_vlan_promisc_enable(adapter))
5311 rctl &= ~E1000_RCTL_VFE;
5312 } else {
5313 igb_vlan_promisc_disable(adapter);
5314 }
5315
5316 /* update state of unicast, multicast, and VLAN filtering modes */
5317 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
5318 E1000_RCTL_VFE);
5319 wr32(E1000_RCTL, rctl);
5320
5321#if (PAGE_SIZE < 8192)
5322 if (!adapter->vfs_allocated_count) {
5323 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5324 rlpml = IGB_MAX_FRAME_BUILD_SKB;
5325 }
5326#endif
5327 wr32(E1000_RLPML, rlpml);
5328
5329 /* In order to support SR-IOV and eventually VMDq it is necessary to set
5330 * the VMOLR to enable the appropriate modes. Without this workaround
5331 * we will have issues with VLAN tag stripping not being done for frames
5332 * that are only arriving because we are the default pool
5333 */
5334 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
5335 return;
5336
5337 /* set UTA to appropriate mode */
5338 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
5339
5340 vmolr |= rd32(E1000_VMOLR(vfn)) &
5341 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
5342
5343 /* enable Rx jumbo frames, restrict as needed to support build_skb */
5344 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5345#if (PAGE_SIZE < 8192)
5346 if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
5347 vmolr |= IGB_MAX_FRAME_BUILD_SKB;
5348 else
5349#endif
5350 vmolr |= MAX_JUMBO_FRAME_SIZE;
5351 vmolr |= E1000_VMOLR_LPE;
5352
5353 wr32(E1000_VMOLR(vfn), vmolr);
5354
5355 igb_restore_vf_multicasts(adapter);
5356}
5357
5358static void igb_check_wvbr(struct igb_adapter *adapter)
5359{
5360 struct e1000_hw *hw = &adapter->hw;
5361 u32 wvbr = 0;
5362
5363 switch (hw->mac.type) {
5364 case e1000_82576:
5365 case e1000_i350:
5366 wvbr = rd32(E1000_WVBR);
5367 if (!wvbr)
5368 return;
5369 break;
5370 default:
5371 break;
5372 }
5373
5374 adapter->wvbr |= wvbr;
5375}
5376
5377#define IGB_STAGGERED_QUEUE_OFFSET 8
5378
5379static void igb_spoof_check(struct igb_adapter *adapter)
5380{
5381 int j;
5382
5383 if (!adapter->wvbr)
5384 return;
5385
5386 for (j = 0; j < adapter->vfs_allocated_count; j++) {
5387 if (adapter->wvbr & BIT(j) ||
5388 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
5389 dev_warn(&adapter->pdev->dev,
5390 "Spoof event(s) detected on VF %d\n", j);
5391 adapter->wvbr &=
5392 ~(BIT(j) |
5393 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
5394 }
5395 }
5396}
5397
5398/* Need to wait a few seconds after link up to get diagnostic information from
5399 * the phy
5400 */
5401static void igb_update_phy_info(struct timer_list *t)
5402{
5403 struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5404 igb_get_phy_info(&adapter->hw);
5405}
5406
5407/**
5408 * igb_has_link - check shared code for link and determine up/down
5409 * @adapter: pointer to driver private info
5410 **/
5411bool igb_has_link(struct igb_adapter *adapter)
5412{
5413 struct e1000_hw *hw = &adapter->hw;
5414 bool link_active = false;
5415
5416 /* get_link_status is set on LSC (link status) interrupt or
5417 * rx sequence error interrupt. get_link_status will stay
5418 * false until the e1000_check_for_link establishes link
5419 * for copper adapters ONLY
5420 */
5421 switch (hw->phy.media_type) {
5422 case e1000_media_type_copper:
5423 if (!hw->mac.get_link_status)
5424 return true;
5425 fallthrough;
5426 case e1000_media_type_internal_serdes:
5427 hw->mac.ops.check_for_link(hw);
5428 link_active = !hw->mac.get_link_status;
5429 break;
5430 default:
5431 case e1000_media_type_unknown:
5432 break;
5433 }
5434
5435 if (((hw->mac.type == e1000_i210) ||
5436 (hw->mac.type == e1000_i211)) &&
5437 (hw->phy.id == I210_I_PHY_ID)) {
5438 if (!netif_carrier_ok(adapter->netdev)) {
5439 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5440 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
5441 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
5442 adapter->link_check_timeout = jiffies;
5443 }
5444 }
5445
5446 return link_active;
5447}
5448
5449static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
5450{
5451 bool ret = false;
5452 u32 ctrl_ext, thstat;
5453
5454 /* check for thermal sensor event on i350 copper only */
5455 if (hw->mac.type == e1000_i350) {
5456 thstat = rd32(E1000_THSTAT);
5457 ctrl_ext = rd32(E1000_CTRL_EXT);
5458
5459 if ((hw->phy.media_type == e1000_media_type_copper) &&
5460 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
5461 ret = !!(thstat & event);
5462 }
5463
5464 return ret;
5465}
5466
5467/**
5468 * igb_check_lvmmc - check for malformed packets received
5469 * and indicated in LVMMC register
5470 * @adapter: pointer to adapter
5471 **/
5472static void igb_check_lvmmc(struct igb_adapter *adapter)
5473{
5474 struct e1000_hw *hw = &adapter->hw;
5475 u32 lvmmc;
5476
5477 lvmmc = rd32(E1000_LVMMC);
5478 if (lvmmc) {
5479 if (unlikely(net_ratelimit())) {
5480 netdev_warn(adapter->netdev,
5481 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
5482 lvmmc);
5483 }
5484 }
5485}
5486
5487/**
5488 * igb_watchdog - Timer Call-back
5489 * @t: pointer to timer_list containing our private info pointer
5490 **/
5491static void igb_watchdog(struct timer_list *t)
5492{
5493 struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5494 /* Do the rest outside of interrupt context */
5495 schedule_work(&adapter->watchdog_task);
5496}
5497
5498static void igb_watchdog_task(struct work_struct *work)
5499{
5500 struct igb_adapter *adapter = container_of(work,
5501 struct igb_adapter,
5502 watchdog_task);
5503 struct e1000_hw *hw = &adapter->hw;
5504 struct e1000_phy_info *phy = &hw->phy;
5505 struct net_device *netdev = adapter->netdev;
5506 u32 link;
5507 int i;
5508 u32 connsw;
5509 u16 phy_data, retry_count = 20;
5510
5511 link = igb_has_link(adapter);
5512
5513 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
5514 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5515 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
5516 else
5517 link = false;
5518 }
5519
5520 /* Force link down if we have fiber to swap to */
5521 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5522 if (hw->phy.media_type == e1000_media_type_copper) {
5523 connsw = rd32(E1000_CONNSW);
5524 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
5525 link = 0;
5526 }
5527 }
5528 if (link) {
5529 /* Perform a reset if the media type changed. */
5530 if (hw->dev_spec._82575.media_changed) {
5531 hw->dev_spec._82575.media_changed = false;
5532 adapter->flags |= IGB_FLAG_MEDIA_RESET;
5533 igb_reset(adapter);
5534 }
5535 /* Cancel scheduled suspend requests. */
5536 pm_runtime_resume(netdev->dev.parent);
5537
5538 if (!netif_carrier_ok(netdev)) {
5539 u32 ctrl;
5540
5541 hw->mac.ops.get_speed_and_duplex(hw,
5542 &adapter->link_speed,
5543 &adapter->link_duplex);
5544
5545 ctrl = rd32(E1000_CTRL);
5546 /* Links status message must follow this format */
5547 netdev_info(netdev,
5548 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5549 netdev->name,
5550 adapter->link_speed,
5551 adapter->link_duplex == FULL_DUPLEX ?
5552 "Full" : "Half",
5553 (ctrl & E1000_CTRL_TFCE) &&
5554 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
5555 (ctrl & E1000_CTRL_RFCE) ? "RX" :
5556 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
5557
5558 /* disable EEE if enabled */
5559 if ((adapter->flags & IGB_FLAG_EEE) &&
5560 (adapter->link_duplex == HALF_DUPLEX)) {
5561 dev_info(&adapter->pdev->dev,
5562 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
5563 adapter->hw.dev_spec._82575.eee_disable = true;
5564 adapter->flags &= ~IGB_FLAG_EEE;
5565 }
5566
5567 /* check if SmartSpeed worked */
5568 igb_check_downshift(hw);
5569 if (phy->speed_downgraded)
5570 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5571
5572 /* check for thermal sensor event */
5573 if (igb_thermal_sensor_event(hw,
5574 E1000_THSTAT_LINK_THROTTLE))
5575 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
5576
5577 /* adjust timeout factor according to speed/duplex */
5578 adapter->tx_timeout_factor = 1;
5579 switch (adapter->link_speed) {
5580 case SPEED_10:
5581 adapter->tx_timeout_factor = 14;
5582 break;
5583 case SPEED_100:
5584 /* maybe add some timeout factor ? */
5585 break;
5586 }
5587
5588 if (adapter->link_speed != SPEED_1000 ||
5589 !hw->phy.ops.read_reg)
5590 goto no_wait;
5591
5592 /* wait for Remote receiver status OK */
5593retry_read_status:
5594 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
5595 &phy_data)) {
5596 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5597 retry_count) {
5598 msleep(100);
5599 retry_count--;
5600 goto retry_read_status;
5601 } else if (!retry_count) {
5602 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
5603 }
5604 } else {
5605 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
5606 }
5607no_wait:
5608 netif_carrier_on(netdev);
5609
5610 igb_ping_all_vfs(adapter);
5611 igb_check_vf_rate_limit(adapter);
5612
5613 /* link state has changed, schedule phy info update */
5614 if (!test_bit(__IGB_DOWN, &adapter->state))
5615 mod_timer(&adapter->phy_info_timer,
5616 round_jiffies(jiffies + 2 * HZ));
5617 }
5618 } else {
5619 if (netif_carrier_ok(netdev)) {
5620 adapter->link_speed = 0;
5621 adapter->link_duplex = 0;
5622
5623 /* check for thermal sensor event */
5624 if (igb_thermal_sensor_event(hw,
5625 E1000_THSTAT_PWR_DOWN)) {
5626 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
5627 }
5628
5629 /* Links status message must follow this format */
5630 netdev_info(netdev, "igb: %s NIC Link is Down\n",
5631 netdev->name);
5632 netif_carrier_off(netdev);
5633
5634 igb_ping_all_vfs(adapter);
5635
5636 /* link state has changed, schedule phy info update */
5637 if (!test_bit(__IGB_DOWN, &adapter->state))
5638 mod_timer(&adapter->phy_info_timer,
5639 round_jiffies(jiffies + 2 * HZ));
5640
5641 /* link is down, time to check for alternate media */
5642 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
5643 igb_check_swap_media(adapter);
5644 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5645 schedule_work(&adapter->reset_task);
5646 /* return immediately */
5647 return;
5648 }
5649 }
5650 pm_schedule_suspend(netdev->dev.parent,
5651 MSEC_PER_SEC * 5);
5652
5653 /* also check for alternate media here */
5654 } else if (!netif_carrier_ok(netdev) &&
5655 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
5656 igb_check_swap_media(adapter);
5657 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
5658 schedule_work(&adapter->reset_task);
5659 /* return immediately */
5660 return;
5661 }
5662 }
5663 }
5664
5665 spin_lock(&adapter->stats64_lock);
5666 igb_update_stats(adapter);
5667 spin_unlock(&adapter->stats64_lock);
5668
5669 for (i = 0; i < adapter->num_tx_queues; i++) {
5670 struct igb_ring *tx_ring = adapter->tx_ring[i];
5671 if (!netif_carrier_ok(netdev)) {
5672 /* We've lost link, so the controller stops DMA,
5673 * but we've got queued Tx work that's never going
5674 * to get done, so reset controller to flush Tx.
5675 * (Do the reset outside of interrupt context).
5676 */
5677 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
5678 adapter->tx_timeout_count++;
5679 schedule_work(&adapter->reset_task);
5680 /* return immediately since reset is imminent */
5681 return;
5682 }
5683 }
5684
5685 /* Force detection of hung controller every watchdog period */
5686 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5687 }
5688
5689 /* Cause software interrupt to ensure Rx ring is cleaned */
5690 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
5691 u32 eics = 0;
5692
5693 for (i = 0; i < adapter->num_q_vectors; i++)
5694 eics |= adapter->q_vector[i]->eims_value;
5695 wr32(E1000_EICS, eics);
5696 } else {
5697 wr32(E1000_ICS, E1000_ICS_RXDMT0);
5698 }
5699
5700 igb_spoof_check(adapter);
5701 igb_ptp_rx_hang(adapter);
5702 igb_ptp_tx_hang(adapter);
5703
5704 /* Check LVMMC register on i350/i354 only */
5705 if ((adapter->hw.mac.type == e1000_i350) ||
5706 (adapter->hw.mac.type == e1000_i354))
5707 igb_check_lvmmc(adapter);
5708
5709 /* Reset the timer */
5710 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5711 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
5712 mod_timer(&adapter->watchdog_timer,
5713 round_jiffies(jiffies + HZ));
5714 else
5715 mod_timer(&adapter->watchdog_timer,
5716 round_jiffies(jiffies + 2 * HZ));
5717 }
5718}
5719
5720enum latency_range {
5721 lowest_latency = 0,
5722 low_latency = 1,
5723 bulk_latency = 2,
5724 latency_invalid = 255
5725};
5726
5727/**
5728 * igb_update_ring_itr - update the dynamic ITR value based on packet size
5729 * @q_vector: pointer to q_vector
5730 *
5731 * Stores a new ITR value based on strictly on packet size. This
5732 * algorithm is less sophisticated than that used in igb_update_itr,
5733 * due to the difficulty of synchronizing statistics across multiple
5734 * receive rings. The divisors and thresholds used by this function
5735 * were determined based on theoretical maximum wire speed and testing
5736 * data, in order to minimize response time while increasing bulk
5737 * throughput.
5738 * This functionality is controlled by ethtool's coalescing settings.
5739 * NOTE: This function is called only when operating in a multiqueue
5740 * receive environment.
5741 **/
5742static void igb_update_ring_itr(struct igb_q_vector *q_vector)
5743{
5744 int new_val = q_vector->itr_val;
5745 int avg_wire_size = 0;
5746 struct igb_adapter *adapter = q_vector->adapter;
5747 unsigned int packets;
5748
5749 /* For non-gigabit speeds, just fix the interrupt rate at 4000
5750 * ints/sec - ITR timer value of 120 ticks.
5751 */
5752 if (adapter->link_speed != SPEED_1000) {
5753 new_val = IGB_4K_ITR;
5754 goto set_itr_val;
5755 }
5756
5757 packets = q_vector->rx.total_packets;
5758 if (packets)
5759 avg_wire_size = q_vector->rx.total_bytes / packets;
5760
5761 packets = q_vector->tx.total_packets;
5762 if (packets)
5763 avg_wire_size = max_t(u32, avg_wire_size,
5764 q_vector->tx.total_bytes / packets);
5765
5766 /* if avg_wire_size isn't set no work was done */
5767 if (!avg_wire_size)
5768 goto clear_counts;
5769
5770 /* Add 24 bytes to size to account for CRC, preamble, and gap */
5771 avg_wire_size += 24;
5772
5773 /* Don't starve jumbo frames */
5774 avg_wire_size = min(avg_wire_size, 3000);
5775
5776 /* Give a little boost to mid-size frames */
5777 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
5778 new_val = avg_wire_size / 3;
5779 else
5780 new_val = avg_wire_size / 2;
5781
5782 /* conservative mode (itr 3) eliminates the lowest_latency setting */
5783 if (new_val < IGB_20K_ITR &&
5784 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5785 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5786 new_val = IGB_20K_ITR;
5787
5788set_itr_val:
5789 if (new_val != q_vector->itr_val) {
5790 q_vector->itr_val = new_val;
5791 q_vector->set_itr = 1;
5792 }
5793clear_counts:
5794 q_vector->rx.total_bytes = 0;
5795 q_vector->rx.total_packets = 0;
5796 q_vector->tx.total_bytes = 0;
5797 q_vector->tx.total_packets = 0;
5798}
5799
5800/**
5801 * igb_update_itr - update the dynamic ITR value based on statistics
5802 * @q_vector: pointer to q_vector
5803 * @ring_container: ring info to update the itr for
5804 *
5805 * Stores a new ITR value based on packets and byte
5806 * counts during the last interrupt. The advantage of per interrupt
5807 * computation is faster updates and more accurate ITR for the current
5808 * traffic pattern. Constants in this function were computed
5809 * based on theoretical maximum wire speed and thresholds were set based
5810 * on testing data as well as attempting to minimize response time
5811 * while increasing bulk throughput.
5812 * This functionality is controlled by ethtool's coalescing settings.
5813 * NOTE: These calculations are only valid when operating in a single-
5814 * queue environment.
5815 **/
5816static void igb_update_itr(struct igb_q_vector *q_vector,
5817 struct igb_ring_container *ring_container)
5818{
5819 unsigned int packets = ring_container->total_packets;
5820 unsigned int bytes = ring_container->total_bytes;
5821 u8 itrval = ring_container->itr;
5822
5823 /* no packets, exit with status unchanged */
5824 if (packets == 0)
5825 return;
5826
5827 switch (itrval) {
5828 case lowest_latency:
5829 /* handle TSO and jumbo frames */
5830 if (bytes/packets > 8000)
5831 itrval = bulk_latency;
5832 else if ((packets < 5) && (bytes > 512))
5833 itrval = low_latency;
5834 break;
5835 case low_latency: /* 50 usec aka 20000 ints/s */
5836 if (bytes > 10000) {
5837 /* this if handles the TSO accounting */
5838 if (bytes/packets > 8000)
5839 itrval = bulk_latency;
5840 else if ((packets < 10) || ((bytes/packets) > 1200))
5841 itrval = bulk_latency;
5842 else if ((packets > 35))
5843 itrval = lowest_latency;
5844 } else if (bytes/packets > 2000) {
5845 itrval = bulk_latency;
5846 } else if (packets <= 2 && bytes < 512) {
5847 itrval = lowest_latency;
5848 }
5849 break;
5850 case bulk_latency: /* 250 usec aka 4000 ints/s */
5851 if (bytes > 25000) {
5852 if (packets > 35)
5853 itrval = low_latency;
5854 } else if (bytes < 1500) {
5855 itrval = low_latency;
5856 }
5857 break;
5858 }
5859
5860 /* clear work counters since we have the values we need */
5861 ring_container->total_bytes = 0;
5862 ring_container->total_packets = 0;
5863
5864 /* write updated itr to ring container */
5865 ring_container->itr = itrval;
5866}
5867
5868static void igb_set_itr(struct igb_q_vector *q_vector)
5869{
5870 struct igb_adapter *adapter = q_vector->adapter;
5871 u32 new_itr = q_vector->itr_val;
5872 u8 current_itr = 0;
5873
5874 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
5875 if (adapter->link_speed != SPEED_1000) {
5876 current_itr = 0;
5877 new_itr = IGB_4K_ITR;
5878 goto set_itr_now;
5879 }
5880
5881 igb_update_itr(q_vector, &q_vector->tx);
5882 igb_update_itr(q_vector, &q_vector->rx);
5883
5884 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
5885
5886 /* conservative mode (itr 3) eliminates the lowest_latency setting */
5887 if (current_itr == lowest_latency &&
5888 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
5889 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
5890 current_itr = low_latency;
5891
5892 switch (current_itr) {
5893 /* counts and packets in update_itr are dependent on these numbers */
5894 case lowest_latency:
5895 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
5896 break;
5897 case low_latency:
5898 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
5899 break;
5900 case bulk_latency:
5901 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
5902 break;
5903 default:
5904 break;
5905 }
5906
5907set_itr_now:
5908 if (new_itr != q_vector->itr_val) {
5909 /* this attempts to bias the interrupt rate towards Bulk
5910 * by adding intermediate steps when interrupt rate is
5911 * increasing
5912 */
5913 new_itr = new_itr > q_vector->itr_val ?
5914 max((new_itr * q_vector->itr_val) /
5915 (new_itr + (q_vector->itr_val >> 2)),
5916 new_itr) : new_itr;
5917 /* Don't write the value here; it resets the adapter's
5918 * internal timer, and causes us to delay far longer than
5919 * we should between interrupts. Instead, we write the ITR
5920 * value at the beginning of the next interrupt so the timing
5921 * ends up being correct.
5922 */
5923 q_vector->itr_val = new_itr;
5924 q_vector->set_itr = 1;
5925 }
5926}
5927
5928static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5929 struct igb_tx_buffer *first,
5930 u32 vlan_macip_lens, u32 type_tucmd,
5931 u32 mss_l4len_idx)
5932{
5933 struct e1000_adv_tx_context_desc *context_desc;
5934 u16 i = tx_ring->next_to_use;
5935 struct timespec64 ts;
5936
5937 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
5938
5939 i++;
5940 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
5941
5942 /* set bits to identify this as an advanced context descriptor */
5943 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
5944
5945 /* For 82575, context index must be unique per ring. */
5946 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5947 mss_l4len_idx |= tx_ring->reg_idx << 4;
5948
5949 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
5950 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
5951 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
5952
5953 /* We assume there is always a valid tx time available. Invalid times
5954 * should have been handled by the upper layers.
5955 */
5956 if (tx_ring->launchtime_enable) {
5957 ts = ktime_to_timespec64(first->skb->tstamp);
5958 skb_txtime_consumed(first->skb);
5959 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5960 } else {
5961 context_desc->seqnum_seed = 0;
5962 }
5963}
5964
5965static int igb_tso(struct igb_ring *tx_ring,
5966 struct igb_tx_buffer *first,
5967 u8 *hdr_len)
5968{
5969 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
5970 struct sk_buff *skb = first->skb;
5971 union {
5972 struct iphdr *v4;
5973 struct ipv6hdr *v6;
5974 unsigned char *hdr;
5975 } ip;
5976 union {
5977 struct tcphdr *tcp;
5978 struct udphdr *udp;
5979 unsigned char *hdr;
5980 } l4;
5981 u32 paylen, l4_offset;
5982 int err;
5983
5984 if (skb->ip_summed != CHECKSUM_PARTIAL)
5985 return 0;
5986
5987 if (!skb_is_gso(skb))
5988 return 0;
5989
5990 err = skb_cow_head(skb, 0);
5991 if (err < 0)
5992 return err;
5993
5994 ip.hdr = skb_network_header(skb);
5995 l4.hdr = skb_checksum_start(skb);
5996
5997 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
5998 type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
5999 E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
6000
6001 /* initialize outer IP header fields */
6002 if (ip.v4->version == 4) {
6003 unsigned char *csum_start = skb_checksum_start(skb);
6004 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
6005
6006 /* IP header will have to cancel out any data that
6007 * is not a part of the outer IP header
6008 */
6009 ip.v4->check = csum_fold(csum_partial(trans_start,
6010 csum_start - trans_start,
6011 0));
6012 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
6013
6014 ip.v4->tot_len = 0;
6015 first->tx_flags |= IGB_TX_FLAGS_TSO |
6016 IGB_TX_FLAGS_CSUM |
6017 IGB_TX_FLAGS_IPV4;
6018 } else {
6019 ip.v6->payload_len = 0;
6020 first->tx_flags |= IGB_TX_FLAGS_TSO |
6021 IGB_TX_FLAGS_CSUM;
6022 }
6023
6024 /* determine offset of inner transport header */
6025 l4_offset = l4.hdr - skb->data;
6026
6027 /* remove payload length from inner checksum */
6028 paylen = skb->len - l4_offset;
6029 if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
6030 /* compute length of segmentation header */
6031 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
6032 csum_replace_by_diff(&l4.tcp->check,
6033 (__force __wsum)htonl(paylen));
6034 } else {
6035 /* compute length of segmentation header */
6036 *hdr_len = sizeof(*l4.udp) + l4_offset;
6037 csum_replace_by_diff(&l4.udp->check,
6038 (__force __wsum)htonl(paylen));
6039 }
6040
6041 /* update gso size and bytecount with header size */
6042 first->gso_segs = skb_shinfo(skb)->gso_segs;
6043 first->bytecount += (first->gso_segs - 1) * *hdr_len;
6044
6045 /* MSS L4LEN IDX */
6046 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
6047 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
6048
6049 /* VLAN MACLEN IPLEN */
6050 vlan_macip_lens = l4.hdr - ip.hdr;
6051 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
6052 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6053
6054 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
6055 type_tucmd, mss_l4len_idx);
6056
6057 return 1;
6058}
6059
6060static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
6061{
6062 struct sk_buff *skb = first->skb;
6063 u32 vlan_macip_lens = 0;
6064 u32 type_tucmd = 0;
6065
6066 if (skb->ip_summed != CHECKSUM_PARTIAL) {
6067csum_failed:
6068 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
6069 !tx_ring->launchtime_enable)
6070 return;
6071 goto no_csum;
6072 }
6073
6074 switch (skb->csum_offset) {
6075 case offsetof(struct tcphdr, check):
6076 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
6077 fallthrough;
6078 case offsetof(struct udphdr, check):
6079 break;
6080 case offsetof(struct sctphdr, checksum):
6081 /* validate that this is actually an SCTP request */
6082 if (skb_csum_is_sctp(skb)) {
6083 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
6084 break;
6085 }
6086 fallthrough;
6087 default:
6088 skb_checksum_help(skb);
6089 goto csum_failed;
6090 }
6091
6092 /* update TX checksum flag */
6093 first->tx_flags |= IGB_TX_FLAGS_CSUM;
6094 vlan_macip_lens = skb_checksum_start_offset(skb) -
6095 skb_network_offset(skb);
6096no_csum:
6097 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
6098 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
6099
6100 igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
6101}
6102
6103#define IGB_SET_FLAG(_input, _flag, _result) \
6104 ((_flag <= _result) ? \
6105 ((u32)(_input & _flag) * (_result / _flag)) : \
6106 ((u32)(_input & _flag) / (_flag / _result)))
6107
6108static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
6109{
6110 /* set type for advanced descriptor with frame checksum insertion */
6111 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
6112 E1000_ADVTXD_DCMD_DEXT |
6113 E1000_ADVTXD_DCMD_IFCS;
6114
6115 /* set HW vlan bit if vlan is present */
6116 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
6117 (E1000_ADVTXD_DCMD_VLE));
6118
6119 /* set segmentation bits for TSO */
6120 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
6121 (E1000_ADVTXD_DCMD_TSE));
6122
6123 /* set timestamp bit if present */
6124 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
6125 (E1000_ADVTXD_MAC_TSTAMP));
6126
6127 /* insert frame checksum */
6128 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
6129
6130 return cmd_type;
6131}
6132
6133static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
6134 union e1000_adv_tx_desc *tx_desc,
6135 u32 tx_flags, unsigned int paylen)
6136{
6137 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
6138
6139 /* 82575 requires a unique index per ring */
6140 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6141 olinfo_status |= tx_ring->reg_idx << 4;
6142
6143 /* insert L4 checksum */
6144 olinfo_status |= IGB_SET_FLAG(tx_flags,
6145 IGB_TX_FLAGS_CSUM,
6146 (E1000_TXD_POPTS_TXSM << 8));
6147
6148 /* insert IPv4 checksum */
6149 olinfo_status |= IGB_SET_FLAG(tx_flags,
6150 IGB_TX_FLAGS_IPV4,
6151 (E1000_TXD_POPTS_IXSM << 8));
6152
6153 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6154}
6155
6156static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6157{
6158 struct net_device *netdev = tx_ring->netdev;
6159
6160 netif_stop_subqueue(netdev, tx_ring->queue_index);
6161
6162 /* Herbert's original patch had:
6163 * smp_mb__after_netif_stop_queue();
6164 * but since that doesn't exist yet, just open code it.
6165 */
6166 smp_mb();
6167
6168 /* We need to check again in a case another CPU has just
6169 * made room available.
6170 */
6171 if (igb_desc_unused(tx_ring) < size)
6172 return -EBUSY;
6173
6174 /* A reprieve! */
6175 netif_wake_subqueue(netdev, tx_ring->queue_index);
6176
6177 u64_stats_update_begin(&tx_ring->tx_syncp2);
6178 tx_ring->tx_stats.restart_queue2++;
6179 u64_stats_update_end(&tx_ring->tx_syncp2);
6180
6181 return 0;
6182}
6183
6184static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
6185{
6186 if (igb_desc_unused(tx_ring) >= size)
6187 return 0;
6188 return __igb_maybe_stop_tx(tx_ring, size);
6189}
6190
6191static int igb_tx_map(struct igb_ring *tx_ring,
6192 struct igb_tx_buffer *first,
6193 const u8 hdr_len)
6194{
6195 struct sk_buff *skb = first->skb;
6196 struct igb_tx_buffer *tx_buffer;
6197 union e1000_adv_tx_desc *tx_desc;
6198 skb_frag_t *frag;
6199 dma_addr_t dma;
6200 unsigned int data_len, size;
6201 u32 tx_flags = first->tx_flags;
6202 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
6203 u16 i = tx_ring->next_to_use;
6204
6205 tx_desc = IGB_TX_DESC(tx_ring, i);
6206
6207 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
6208
6209 size = skb_headlen(skb);
6210 data_len = skb->data_len;
6211
6212 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6213
6214 tx_buffer = first;
6215
6216 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6217 if (dma_mapping_error(tx_ring->dev, dma))
6218 goto dma_error;
6219
6220 /* record length, and DMA address */
6221 dma_unmap_len_set(tx_buffer, len, size);
6222 dma_unmap_addr_set(tx_buffer, dma, dma);
6223
6224 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6225
6226 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
6227 tx_desc->read.cmd_type_len =
6228 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
6229
6230 i++;
6231 tx_desc++;
6232 if (i == tx_ring->count) {
6233 tx_desc = IGB_TX_DESC(tx_ring, 0);
6234 i = 0;
6235 }
6236 tx_desc->read.olinfo_status = 0;
6237
6238 dma += IGB_MAX_DATA_PER_TXD;
6239 size -= IGB_MAX_DATA_PER_TXD;
6240
6241 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6242 }
6243
6244 if (likely(!data_len))
6245 break;
6246
6247 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
6248
6249 i++;
6250 tx_desc++;
6251 if (i == tx_ring->count) {
6252 tx_desc = IGB_TX_DESC(tx_ring, 0);
6253 i = 0;
6254 }
6255 tx_desc->read.olinfo_status = 0;
6256
6257 size = skb_frag_size(frag);
6258 data_len -= size;
6259
6260 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
6261 size, DMA_TO_DEVICE);
6262
6263 tx_buffer = &tx_ring->tx_buffer_info[i];
6264 }
6265
6266 /* write last descriptor with RS and EOP bits */
6267 cmd_type |= size | IGB_TXD_DCMD;
6268 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6269
6270 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
6271
6272 /* set the timestamp */
6273 first->time_stamp = jiffies;
6274
6275 skb_tx_timestamp(skb);
6276
6277 /* Force memory writes to complete before letting h/w know there
6278 * are new descriptors to fetch. (Only applicable for weak-ordered
6279 * memory model archs, such as IA-64).
6280 *
6281 * We also need this memory barrier to make certain all of the
6282 * status bits have been updated before next_to_watch is written.
6283 */
6284 dma_wmb();
6285
6286 /* set next_to_watch value indicating a packet is present */
6287 first->next_to_watch = tx_desc;
6288
6289 i++;
6290 if (i == tx_ring->count)
6291 i = 0;
6292
6293 tx_ring->next_to_use = i;
6294
6295 /* Make sure there is space in the ring for the next send. */
6296 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6297
6298 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
6299 writel(i, tx_ring->tail);
6300 }
6301 return 0;
6302
6303dma_error:
6304 dev_err(tx_ring->dev, "TX DMA map failed\n");
6305 tx_buffer = &tx_ring->tx_buffer_info[i];
6306
6307 /* clear dma mappings for failed tx_buffer_info map */
6308 while (tx_buffer != first) {
6309 if (dma_unmap_len(tx_buffer, len))
6310 dma_unmap_page(tx_ring->dev,
6311 dma_unmap_addr(tx_buffer, dma),
6312 dma_unmap_len(tx_buffer, len),
6313 DMA_TO_DEVICE);
6314 dma_unmap_len_set(tx_buffer, len, 0);
6315
6316 if (i-- == 0)
6317 i += tx_ring->count;
6318 tx_buffer = &tx_ring->tx_buffer_info[i];
6319 }
6320
6321 if (dma_unmap_len(tx_buffer, len))
6322 dma_unmap_single(tx_ring->dev,
6323 dma_unmap_addr(tx_buffer, dma),
6324 dma_unmap_len(tx_buffer, len),
6325 DMA_TO_DEVICE);
6326 dma_unmap_len_set(tx_buffer, len, 0);
6327
6328 dev_kfree_skb_any(tx_buffer->skb);
6329 tx_buffer->skb = NULL;
6330
6331 tx_ring->next_to_use = i;
6332
6333 return -1;
6334}
6335
6336int igb_xmit_xdp_ring(struct igb_adapter *adapter,
6337 struct igb_ring *tx_ring,
6338 struct xdp_frame *xdpf)
6339{
6340 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
6341 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
6342 u16 count, i, index = tx_ring->next_to_use;
6343 struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
6344 struct igb_tx_buffer *tx_buffer = tx_head;
6345 union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
6346 u32 len = xdpf->len, cmd_type, olinfo_status;
6347 void *data = xdpf->data;
6348
6349 count = TXD_USE_COUNT(len);
6350 for (i = 0; i < nr_frags; i++)
6351 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
6352
6353 if (igb_maybe_stop_tx(tx_ring, count + 3))
6354 return IGB_XDP_CONSUMED;
6355
6356 i = 0;
6357 /* record the location of the first descriptor for this packet */
6358 tx_head->bytecount = xdp_get_frame_len(xdpf);
6359 tx_head->type = IGB_TYPE_XDP;
6360 tx_head->gso_segs = 1;
6361 tx_head->xdpf = xdpf;
6362
6363 olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
6364 /* 82575 requires a unique index per ring */
6365 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
6366 olinfo_status |= tx_ring->reg_idx << 4;
6367 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
6368
6369 for (;;) {
6370 dma_addr_t dma;
6371
6372 dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
6373 if (dma_mapping_error(tx_ring->dev, dma))
6374 goto unmap;
6375
6376 /* record length, and DMA address */
6377 dma_unmap_len_set(tx_buffer, len, len);
6378 dma_unmap_addr_set(tx_buffer, dma, dma);
6379
6380 /* put descriptor type bits */
6381 cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
6382 E1000_ADVTXD_DCMD_IFCS | len;
6383
6384 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
6385 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6386
6387 tx_buffer->protocol = 0;
6388
6389 if (++index == tx_ring->count)
6390 index = 0;
6391
6392 if (i == nr_frags)
6393 break;
6394
6395 tx_buffer = &tx_ring->tx_buffer_info[index];
6396 tx_desc = IGB_TX_DESC(tx_ring, index);
6397 tx_desc->read.olinfo_status = 0;
6398
6399 data = skb_frag_address(&sinfo->frags[i]);
6400 len = skb_frag_size(&sinfo->frags[i]);
6401 i++;
6402 }
6403 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
6404
6405 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
6406 /* set the timestamp */
6407 tx_head->time_stamp = jiffies;
6408
6409 /* Avoid any potential race with xdp_xmit and cleanup */
6410 smp_wmb();
6411
6412 /* set next_to_watch value indicating a packet is present */
6413 tx_head->next_to_watch = tx_desc;
6414 tx_ring->next_to_use = index;
6415
6416 /* Make sure there is space in the ring for the next send. */
6417 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
6418
6419 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
6420 writel(index, tx_ring->tail);
6421
6422 return IGB_XDP_TX;
6423
6424unmap:
6425 for (;;) {
6426 tx_buffer = &tx_ring->tx_buffer_info[index];
6427 if (dma_unmap_len(tx_buffer, len))
6428 dma_unmap_page(tx_ring->dev,
6429 dma_unmap_addr(tx_buffer, dma),
6430 dma_unmap_len(tx_buffer, len),
6431 DMA_TO_DEVICE);
6432 dma_unmap_len_set(tx_buffer, len, 0);
6433 if (tx_buffer == tx_head)
6434 break;
6435
6436 if (!index)
6437 index += tx_ring->count;
6438 index--;
6439 }
6440
6441 return IGB_XDP_CONSUMED;
6442}
6443
6444netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
6445 struct igb_ring *tx_ring)
6446{
6447 struct igb_tx_buffer *first;
6448 int tso;
6449 u32 tx_flags = 0;
6450 unsigned short f;
6451 u16 count = TXD_USE_COUNT(skb_headlen(skb));
6452 __be16 protocol = vlan_get_protocol(skb);
6453 u8 hdr_len = 0;
6454
6455 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
6456 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
6457 * + 2 desc gap to keep tail from touching head,
6458 * + 1 desc for context descriptor,
6459 * otherwise try next time
6460 */
6461 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6462 count += TXD_USE_COUNT(skb_frag_size(
6463 &skb_shinfo(skb)->frags[f]));
6464
6465 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
6466 /* this is a hard error */
6467 return NETDEV_TX_BUSY;
6468 }
6469
6470 /* record the location of the first descriptor for this packet */
6471 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
6472 first->type = IGB_TYPE_SKB;
6473 first->skb = skb;
6474 first->bytecount = skb->len;
6475 first->gso_segs = 1;
6476
6477 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6478 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6479
6480 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
6481 !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
6482 &adapter->state)) {
6483 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6484 tx_flags |= IGB_TX_FLAGS_TSTAMP;
6485
6486 adapter->ptp_tx_skb = skb_get(skb);
6487 adapter->ptp_tx_start = jiffies;
6488 if (adapter->hw.mac.type == e1000_82576)
6489 schedule_work(&adapter->ptp_tx_work);
6490 } else {
6491 adapter->tx_hwtstamp_skipped++;
6492 }
6493 }
6494
6495 if (skb_vlan_tag_present(skb)) {
6496 tx_flags |= IGB_TX_FLAGS_VLAN;
6497 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
6498 }
6499
6500 /* record initial flags and protocol */
6501 first->tx_flags = tx_flags;
6502 first->protocol = protocol;
6503
6504 tso = igb_tso(tx_ring, first, &hdr_len);
6505 if (tso < 0)
6506 goto out_drop;
6507 else if (!tso)
6508 igb_tx_csum(tx_ring, first);
6509
6510 if (igb_tx_map(tx_ring, first, hdr_len))
6511 goto cleanup_tx_tstamp;
6512
6513 return NETDEV_TX_OK;
6514
6515out_drop:
6516 dev_kfree_skb_any(first->skb);
6517 first->skb = NULL;
6518cleanup_tx_tstamp:
6519 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
6520 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
6521
6522 dev_kfree_skb_any(adapter->ptp_tx_skb);
6523 adapter->ptp_tx_skb = NULL;
6524 if (adapter->hw.mac.type == e1000_82576)
6525 cancel_work_sync(&adapter->ptp_tx_work);
6526 clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
6527 }
6528
6529 return NETDEV_TX_OK;
6530}
6531
6532static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
6533 struct sk_buff *skb)
6534{
6535 unsigned int r_idx = skb->queue_mapping;
6536
6537 if (r_idx >= adapter->num_tx_queues)
6538 r_idx = r_idx % adapter->num_tx_queues;
6539
6540 return adapter->tx_ring[r_idx];
6541}
6542
6543static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
6544 struct net_device *netdev)
6545{
6546 struct igb_adapter *adapter = netdev_priv(netdev);
6547
6548 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
6549 * in order to meet this minimum size requirement.
6550 */
6551 if (skb_put_padto(skb, 17))
6552 return NETDEV_TX_OK;
6553
6554 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
6555}
6556
6557/**
6558 * igb_tx_timeout - Respond to a Tx Hang
6559 * @netdev: network interface device structure
6560 * @txqueue: number of the Tx queue that hung (unused)
6561 **/
6562static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
6563{
6564 struct igb_adapter *adapter = netdev_priv(netdev);
6565 struct e1000_hw *hw = &adapter->hw;
6566
6567 /* Do the reset outside of interrupt context */
6568 adapter->tx_timeout_count++;
6569
6570 if (hw->mac.type >= e1000_82580)
6571 hw->dev_spec._82575.global_device_reset = true;
6572
6573 schedule_work(&adapter->reset_task);
6574 wr32(E1000_EICS,
6575 (adapter->eims_enable_mask & ~adapter->eims_other));
6576}
6577
6578static void igb_reset_task(struct work_struct *work)
6579{
6580 struct igb_adapter *adapter;
6581 adapter = container_of(work, struct igb_adapter, reset_task);
6582
6583 rtnl_lock();
6584 /* If we're already down or resetting, just bail */
6585 if (test_bit(__IGB_DOWN, &adapter->state) ||
6586 test_bit(__IGB_RESETTING, &adapter->state)) {
6587 rtnl_unlock();
6588 return;
6589 }
6590
6591 igb_dump(adapter);
6592 netdev_err(adapter->netdev, "Reset adapter\n");
6593 igb_reinit_locked(adapter);
6594 rtnl_unlock();
6595}
6596
6597/**
6598 * igb_get_stats64 - Get System Network Statistics
6599 * @netdev: network interface device structure
6600 * @stats: rtnl_link_stats64 pointer
6601 **/
6602static void igb_get_stats64(struct net_device *netdev,
6603 struct rtnl_link_stats64 *stats)
6604{
6605 struct igb_adapter *adapter = netdev_priv(netdev);
6606
6607 spin_lock(&adapter->stats64_lock);
6608 igb_update_stats(adapter);
6609 memcpy(stats, &adapter->stats64, sizeof(*stats));
6610 spin_unlock(&adapter->stats64_lock);
6611}
6612
6613/**
6614 * igb_change_mtu - Change the Maximum Transfer Unit
6615 * @netdev: network interface device structure
6616 * @new_mtu: new value for maximum frame size
6617 *
6618 * Returns 0 on success, negative on failure
6619 **/
6620static int igb_change_mtu(struct net_device *netdev, int new_mtu)
6621{
6622 struct igb_adapter *adapter = netdev_priv(netdev);
6623 int max_frame = new_mtu + IGB_ETH_PKT_HDR_PAD;
6624
6625 if (adapter->xdp_prog) {
6626 int i;
6627
6628 for (i = 0; i < adapter->num_rx_queues; i++) {
6629 struct igb_ring *ring = adapter->rx_ring[i];
6630
6631 if (max_frame > igb_rx_bufsz(ring)) {
6632 netdev_warn(adapter->netdev,
6633 "Requested MTU size is not supported with XDP. Max frame size is %d\n",
6634 max_frame);
6635 return -EINVAL;
6636 }
6637 }
6638 }
6639
6640 /* adjust max frame to be at least the size of a standard frame */
6641 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
6642 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
6643
6644 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
6645 usleep_range(1000, 2000);
6646
6647 /* igb_down has a dependency on max_frame_size */
6648 adapter->max_frame_size = max_frame;
6649
6650 if (netif_running(netdev))
6651 igb_down(adapter);
6652
6653 netdev_dbg(netdev, "changing MTU from %d to %d\n",
6654 netdev->mtu, new_mtu);
6655 WRITE_ONCE(netdev->mtu, new_mtu);
6656
6657 if (netif_running(netdev))
6658 igb_up(adapter);
6659 else
6660 igb_reset(adapter);
6661
6662 clear_bit(__IGB_RESETTING, &adapter->state);
6663
6664 return 0;
6665}
6666
6667/**
6668 * igb_update_stats - Update the board statistics counters
6669 * @adapter: board private structure
6670 **/
6671void igb_update_stats(struct igb_adapter *adapter)
6672{
6673 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
6674 struct e1000_hw *hw = &adapter->hw;
6675 struct pci_dev *pdev = adapter->pdev;
6676 u32 reg, mpc;
6677 int i;
6678 u64 bytes, packets;
6679 unsigned int start;
6680 u64 _bytes, _packets;
6681
6682 /* Prevent stats update while adapter is being reset, or if the pci
6683 * connection is down.
6684 */
6685 if (adapter->link_speed == 0)
6686 return;
6687 if (pci_channel_offline(pdev))
6688 return;
6689
6690 bytes = 0;
6691 packets = 0;
6692
6693 rcu_read_lock();
6694 for (i = 0; i < adapter->num_rx_queues; i++) {
6695 struct igb_ring *ring = adapter->rx_ring[i];
6696 u32 rqdpc = rd32(E1000_RQDPC(i));
6697 if (hw->mac.type >= e1000_i210)
6698 wr32(E1000_RQDPC(i), 0);
6699
6700 if (rqdpc) {
6701 ring->rx_stats.drops += rqdpc;
6702 net_stats->rx_fifo_errors += rqdpc;
6703 }
6704
6705 do {
6706 start = u64_stats_fetch_begin(&ring->rx_syncp);
6707 _bytes = ring->rx_stats.bytes;
6708 _packets = ring->rx_stats.packets;
6709 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
6710 bytes += _bytes;
6711 packets += _packets;
6712 }
6713
6714 net_stats->rx_bytes = bytes;
6715 net_stats->rx_packets = packets;
6716
6717 bytes = 0;
6718 packets = 0;
6719 for (i = 0; i < adapter->num_tx_queues; i++) {
6720 struct igb_ring *ring = adapter->tx_ring[i];
6721 do {
6722 start = u64_stats_fetch_begin(&ring->tx_syncp);
6723 _bytes = ring->tx_stats.bytes;
6724 _packets = ring->tx_stats.packets;
6725 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
6726 bytes += _bytes;
6727 packets += _packets;
6728 }
6729 net_stats->tx_bytes = bytes;
6730 net_stats->tx_packets = packets;
6731 rcu_read_unlock();
6732
6733 /* read stats registers */
6734 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
6735 adapter->stats.gprc += rd32(E1000_GPRC);
6736 adapter->stats.gorc += rd32(E1000_GORCL);
6737 rd32(E1000_GORCH); /* clear GORCL */
6738 adapter->stats.bprc += rd32(E1000_BPRC);
6739 adapter->stats.mprc += rd32(E1000_MPRC);
6740 adapter->stats.roc += rd32(E1000_ROC);
6741
6742 adapter->stats.prc64 += rd32(E1000_PRC64);
6743 adapter->stats.prc127 += rd32(E1000_PRC127);
6744 adapter->stats.prc255 += rd32(E1000_PRC255);
6745 adapter->stats.prc511 += rd32(E1000_PRC511);
6746 adapter->stats.prc1023 += rd32(E1000_PRC1023);
6747 adapter->stats.prc1522 += rd32(E1000_PRC1522);
6748 adapter->stats.symerrs += rd32(E1000_SYMERRS);
6749 adapter->stats.sec += rd32(E1000_SEC);
6750
6751 mpc = rd32(E1000_MPC);
6752 adapter->stats.mpc += mpc;
6753 net_stats->rx_fifo_errors += mpc;
6754 adapter->stats.scc += rd32(E1000_SCC);
6755 adapter->stats.ecol += rd32(E1000_ECOL);
6756 adapter->stats.mcc += rd32(E1000_MCC);
6757 adapter->stats.latecol += rd32(E1000_LATECOL);
6758 adapter->stats.dc += rd32(E1000_DC);
6759 adapter->stats.rlec += rd32(E1000_RLEC);
6760 adapter->stats.xonrxc += rd32(E1000_XONRXC);
6761 adapter->stats.xontxc += rd32(E1000_XONTXC);
6762 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
6763 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
6764 adapter->stats.fcruc += rd32(E1000_FCRUC);
6765 adapter->stats.gptc += rd32(E1000_GPTC);
6766 adapter->stats.gotc += rd32(E1000_GOTCL);
6767 rd32(E1000_GOTCH); /* clear GOTCL */
6768 adapter->stats.rnbc += rd32(E1000_RNBC);
6769 adapter->stats.ruc += rd32(E1000_RUC);
6770 adapter->stats.rfc += rd32(E1000_RFC);
6771 adapter->stats.rjc += rd32(E1000_RJC);
6772 adapter->stats.tor += rd32(E1000_TORH);
6773 adapter->stats.tot += rd32(E1000_TOTH);
6774 adapter->stats.tpr += rd32(E1000_TPR);
6775
6776 adapter->stats.ptc64 += rd32(E1000_PTC64);
6777 adapter->stats.ptc127 += rd32(E1000_PTC127);
6778 adapter->stats.ptc255 += rd32(E1000_PTC255);
6779 adapter->stats.ptc511 += rd32(E1000_PTC511);
6780 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
6781 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
6782
6783 adapter->stats.mptc += rd32(E1000_MPTC);
6784 adapter->stats.bptc += rd32(E1000_BPTC);
6785
6786 adapter->stats.tpt += rd32(E1000_TPT);
6787 adapter->stats.colc += rd32(E1000_COLC);
6788
6789 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
6790 /* read internal phy specific stats */
6791 reg = rd32(E1000_CTRL_EXT);
6792 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
6793 adapter->stats.rxerrc += rd32(E1000_RXERRC);
6794
6795 /* this stat has invalid values on i210/i211 */
6796 if ((hw->mac.type != e1000_i210) &&
6797 (hw->mac.type != e1000_i211))
6798 adapter->stats.tncrs += rd32(E1000_TNCRS);
6799 }
6800
6801 adapter->stats.tsctc += rd32(E1000_TSCTC);
6802 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
6803
6804 adapter->stats.iac += rd32(E1000_IAC);
6805 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
6806 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
6807 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
6808 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
6809 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
6810 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
6811 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
6812 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
6813
6814 /* Fill out the OS statistics structure */
6815 net_stats->multicast = adapter->stats.mprc;
6816 net_stats->collisions = adapter->stats.colc;
6817
6818 /* Rx Errors */
6819
6820 /* RLEC on some newer hardware can be incorrect so build
6821 * our own version based on RUC and ROC
6822 */
6823 net_stats->rx_errors = adapter->stats.rxerrc +
6824 adapter->stats.crcerrs + adapter->stats.algnerrc +
6825 adapter->stats.ruc + adapter->stats.roc +
6826 adapter->stats.cexterr;
6827 net_stats->rx_length_errors = adapter->stats.ruc +
6828 adapter->stats.roc;
6829 net_stats->rx_crc_errors = adapter->stats.crcerrs;
6830 net_stats->rx_frame_errors = adapter->stats.algnerrc;
6831 net_stats->rx_missed_errors = adapter->stats.mpc;
6832
6833 /* Tx Errors */
6834 net_stats->tx_errors = adapter->stats.ecol +
6835 adapter->stats.latecol;
6836 net_stats->tx_aborted_errors = adapter->stats.ecol;
6837 net_stats->tx_window_errors = adapter->stats.latecol;
6838 net_stats->tx_carrier_errors = adapter->stats.tncrs;
6839
6840 /* Tx Dropped needs to be maintained elsewhere */
6841
6842 /* Management Stats */
6843 adapter->stats.mgptc += rd32(E1000_MGTPTC);
6844 adapter->stats.mgprc += rd32(E1000_MGTPRC);
6845 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
6846
6847 /* OS2BMC Stats */
6848 reg = rd32(E1000_MANC);
6849 if (reg & E1000_MANC_EN_BMC2OS) {
6850 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
6851 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
6852 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
6853 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
6854 }
6855}
6856
6857static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
6858{
6859 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
6860 struct e1000_hw *hw = &adapter->hw;
6861 struct timespec64 ts;
6862 u32 tsauxc;
6863
6864 if (pin < 0 || pin >= IGB_N_SDP)
6865 return;
6866
6867 spin_lock(&adapter->tmreg_lock);
6868
6869 if (hw->mac.type == e1000_82580 ||
6870 hw->mac.type == e1000_i354 ||
6871 hw->mac.type == e1000_i350) {
6872 s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
6873 u32 systiml, systimh, level_mask, level, rem;
6874 u64 systim, now;
6875
6876 /* read systim registers in sequence */
6877 rd32(E1000_SYSTIMR);
6878 systiml = rd32(E1000_SYSTIML);
6879 systimh = rd32(E1000_SYSTIMH);
6880 systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
6881 now = timecounter_cyc2time(&adapter->tc, systim);
6882
6883 if (pin < 2) {
6884 level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
6885 level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
6886 } else {
6887 level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
6888 level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
6889 }
6890
6891 div_u64_rem(now, ns, &rem);
6892 systim = systim + (ns - rem);
6893
6894 /* synchronize pin level with rising/falling edges */
6895 div_u64_rem(now, ns << 1, &rem);
6896 if (rem < ns) {
6897 /* first half of period */
6898 if (level == 0) {
6899 /* output is already low, skip this period */
6900 systim += ns;
6901 pr_notice("igb: periodic output on %s missed falling edge\n",
6902 adapter->sdp_config[pin].name);
6903 }
6904 } else {
6905 /* second half of period */
6906 if (level == 1) {
6907 /* output is already high, skip this period */
6908 systim += ns;
6909 pr_notice("igb: periodic output on %s missed rising edge\n",
6910 adapter->sdp_config[pin].name);
6911 }
6912 }
6913
6914 /* for this chip family tv_sec is the upper part of the binary value,
6915 * so not seconds
6916 */
6917 ts.tv_nsec = (u32)systim;
6918 ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
6919 } else {
6920 ts = timespec64_add(adapter->perout[tsintr_tt].start,
6921 adapter->perout[tsintr_tt].period);
6922 }
6923
6924 /* u32 conversion of tv_sec is safe until y2106 */
6925 wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
6926 wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
6927 tsauxc = rd32(E1000_TSAUXC);
6928 tsauxc |= TSAUXC_EN_TT0;
6929 wr32(E1000_TSAUXC, tsauxc);
6930 adapter->perout[tsintr_tt].start = ts;
6931
6932 spin_unlock(&adapter->tmreg_lock);
6933}
6934
6935static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
6936{
6937 int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
6938 int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
6939 int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
6940 struct e1000_hw *hw = &adapter->hw;
6941 struct ptp_clock_event event;
6942 struct timespec64 ts;
6943 unsigned long flags;
6944
6945 if (pin < 0 || pin >= IGB_N_SDP)
6946 return;
6947
6948 if (hw->mac.type == e1000_82580 ||
6949 hw->mac.type == e1000_i354 ||
6950 hw->mac.type == e1000_i350) {
6951 u64 ns = rd32(auxstmpl);
6952
6953 ns += ((u64)(rd32(auxstmph) & 0xFF)) << 32;
6954 spin_lock_irqsave(&adapter->tmreg_lock, flags);
6955 ns = timecounter_cyc2time(&adapter->tc, ns);
6956 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
6957 ts = ns_to_timespec64(ns);
6958 } else {
6959 ts.tv_nsec = rd32(auxstmpl);
6960 ts.tv_sec = rd32(auxstmph);
6961 }
6962
6963 event.type = PTP_CLOCK_EXTTS;
6964 event.index = tsintr_tt;
6965 event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
6966 ptp_clock_event(adapter->ptp_clock, &event);
6967}
6968
6969static void igb_tsync_interrupt(struct igb_adapter *adapter)
6970{
6971 const u32 mask = (TSINTR_SYS_WRAP | E1000_TSICR_TXTS |
6972 TSINTR_TT0 | TSINTR_TT1 |
6973 TSINTR_AUTT0 | TSINTR_AUTT1);
6974 struct e1000_hw *hw = &adapter->hw;
6975 u32 tsicr = rd32(E1000_TSICR);
6976 struct ptp_clock_event event;
6977
6978 if (hw->mac.type == e1000_82580) {
6979 /* 82580 has a hardware bug that requires an explicit
6980 * write to clear the TimeSync interrupt cause.
6981 */
6982 wr32(E1000_TSICR, tsicr & mask);
6983 }
6984
6985 if (tsicr & TSINTR_SYS_WRAP) {
6986 event.type = PTP_CLOCK_PPS;
6987 if (adapter->ptp_caps.pps)
6988 ptp_clock_event(adapter->ptp_clock, &event);
6989 }
6990
6991 if (tsicr & E1000_TSICR_TXTS) {
6992 /* retrieve hardware timestamp */
6993 schedule_work(&adapter->ptp_tx_work);
6994 }
6995
6996 if (tsicr & TSINTR_TT0)
6997 igb_perout(adapter, 0);
6998
6999 if (tsicr & TSINTR_TT1)
7000 igb_perout(adapter, 1);
7001
7002 if (tsicr & TSINTR_AUTT0)
7003 igb_extts(adapter, 0);
7004
7005 if (tsicr & TSINTR_AUTT1)
7006 igb_extts(adapter, 1);
7007}
7008
7009static irqreturn_t igb_msix_other(int irq, void *data)
7010{
7011 struct igb_adapter *adapter = data;
7012 struct e1000_hw *hw = &adapter->hw;
7013 u32 icr = rd32(E1000_ICR);
7014 /* reading ICR causes bit 31 of EICR to be cleared */
7015
7016 if (icr & E1000_ICR_DRSTA)
7017 schedule_work(&adapter->reset_task);
7018
7019 if (icr & E1000_ICR_DOUTSYNC) {
7020 /* HW is reporting DMA is out of sync */
7021 adapter->stats.doosync++;
7022 /* The DMA Out of Sync is also indication of a spoof event
7023 * in IOV mode. Check the Wrong VM Behavior register to
7024 * see if it is really a spoof event.
7025 */
7026 igb_check_wvbr(adapter);
7027 }
7028
7029 /* Check for a mailbox event */
7030 if (icr & E1000_ICR_VMMB)
7031 igb_msg_task(adapter);
7032
7033 if (icr & E1000_ICR_LSC) {
7034 hw->mac.get_link_status = 1;
7035 /* guard against interrupt when we're going down */
7036 if (!test_bit(__IGB_DOWN, &adapter->state))
7037 mod_timer(&adapter->watchdog_timer, jiffies + 1);
7038 }
7039
7040 if (icr & E1000_ICR_TS)
7041 igb_tsync_interrupt(adapter);
7042
7043 wr32(E1000_EIMS, adapter->eims_other);
7044
7045 return IRQ_HANDLED;
7046}
7047
7048static void igb_write_itr(struct igb_q_vector *q_vector)
7049{
7050 struct igb_adapter *adapter = q_vector->adapter;
7051 u32 itr_val = q_vector->itr_val & 0x7FFC;
7052
7053 if (!q_vector->set_itr)
7054 return;
7055
7056 if (!itr_val)
7057 itr_val = 0x4;
7058
7059 if (adapter->hw.mac.type == e1000_82575)
7060 itr_val |= itr_val << 16;
7061 else
7062 itr_val |= E1000_EITR_CNT_IGNR;
7063
7064 writel(itr_val, q_vector->itr_register);
7065 q_vector->set_itr = 0;
7066}
7067
7068static irqreturn_t igb_msix_ring(int irq, void *data)
7069{
7070 struct igb_q_vector *q_vector = data;
7071
7072 /* Write the ITR value calculated from the previous interrupt. */
7073 igb_write_itr(q_vector);
7074
7075 napi_schedule(&q_vector->napi);
7076
7077 return IRQ_HANDLED;
7078}
7079
7080#ifdef CONFIG_IGB_DCA
7081static void igb_update_tx_dca(struct igb_adapter *adapter,
7082 struct igb_ring *tx_ring,
7083 int cpu)
7084{
7085 struct e1000_hw *hw = &adapter->hw;
7086 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
7087
7088 if (hw->mac.type != e1000_82575)
7089 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
7090
7091 /* We can enable relaxed ordering for reads, but not writes when
7092 * DCA is enabled. This is due to a known issue in some chipsets
7093 * which will cause the DCA tag to be cleared.
7094 */
7095 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
7096 E1000_DCA_TXCTRL_DATA_RRO_EN |
7097 E1000_DCA_TXCTRL_DESC_DCA_EN;
7098
7099 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
7100}
7101
7102static void igb_update_rx_dca(struct igb_adapter *adapter,
7103 struct igb_ring *rx_ring,
7104 int cpu)
7105{
7106 struct e1000_hw *hw = &adapter->hw;
7107 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
7108
7109 if (hw->mac.type != e1000_82575)
7110 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
7111
7112 /* We can enable relaxed ordering for reads, but not writes when
7113 * DCA is enabled. This is due to a known issue in some chipsets
7114 * which will cause the DCA tag to be cleared.
7115 */
7116 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
7117 E1000_DCA_RXCTRL_DESC_DCA_EN;
7118
7119 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
7120}
7121
7122static void igb_update_dca(struct igb_q_vector *q_vector)
7123{
7124 struct igb_adapter *adapter = q_vector->adapter;
7125 int cpu = get_cpu();
7126
7127 if (q_vector->cpu == cpu)
7128 goto out_no_update;
7129
7130 if (q_vector->tx.ring)
7131 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
7132
7133 if (q_vector->rx.ring)
7134 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
7135
7136 q_vector->cpu = cpu;
7137out_no_update:
7138 put_cpu();
7139}
7140
7141static void igb_setup_dca(struct igb_adapter *adapter)
7142{
7143 struct e1000_hw *hw = &adapter->hw;
7144 int i;
7145
7146 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
7147 return;
7148
7149 /* Always use CB2 mode, difference is masked in the CB driver. */
7150 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
7151
7152 for (i = 0; i < adapter->num_q_vectors; i++) {
7153 adapter->q_vector[i]->cpu = -1;
7154 igb_update_dca(adapter->q_vector[i]);
7155 }
7156}
7157
7158static int __igb_notify_dca(struct device *dev, void *data)
7159{
7160 struct net_device *netdev = dev_get_drvdata(dev);
7161 struct igb_adapter *adapter = netdev_priv(netdev);
7162 struct pci_dev *pdev = adapter->pdev;
7163 struct e1000_hw *hw = &adapter->hw;
7164 unsigned long event = *(unsigned long *)data;
7165
7166 switch (event) {
7167 case DCA_PROVIDER_ADD:
7168 /* if already enabled, don't do it again */
7169 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
7170 break;
7171 if (dca_add_requester(dev) == 0) {
7172 adapter->flags |= IGB_FLAG_DCA_ENABLED;
7173 dev_info(&pdev->dev, "DCA enabled\n");
7174 igb_setup_dca(adapter);
7175 break;
7176 }
7177 fallthrough; /* since DCA is disabled. */
7178 case DCA_PROVIDER_REMOVE:
7179 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
7180 /* without this a class_device is left
7181 * hanging around in the sysfs model
7182 */
7183 dca_remove_requester(dev);
7184 dev_info(&pdev->dev, "DCA disabled\n");
7185 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
7186 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
7187 }
7188 break;
7189 }
7190
7191 return 0;
7192}
7193
7194static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
7195 void *p)
7196{
7197 int ret_val;
7198
7199 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
7200 __igb_notify_dca);
7201
7202 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
7203}
7204#endif /* CONFIG_IGB_DCA */
7205
7206#ifdef CONFIG_PCI_IOV
7207static int igb_vf_configure(struct igb_adapter *adapter, int vf)
7208{
7209 unsigned char mac_addr[ETH_ALEN];
7210
7211 eth_zero_addr(mac_addr);
7212 igb_set_vf_mac(adapter, vf, mac_addr);
7213
7214 /* By default spoof check is enabled for all VFs */
7215 adapter->vf_data[vf].spoofchk_enabled = true;
7216
7217 /* By default VFs are not trusted */
7218 adapter->vf_data[vf].trusted = false;
7219
7220 return 0;
7221}
7222
7223#endif
7224static void igb_ping_all_vfs(struct igb_adapter *adapter)
7225{
7226 struct e1000_hw *hw = &adapter->hw;
7227 u32 ping;
7228 int i;
7229
7230 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
7231 ping = E1000_PF_CONTROL_MSG;
7232 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
7233 ping |= E1000_VT_MSGTYPE_CTS;
7234 igb_write_mbx(hw, &ping, 1, i);
7235 }
7236}
7237
7238static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7239{
7240 struct e1000_hw *hw = &adapter->hw;
7241 u32 vmolr = rd32(E1000_VMOLR(vf));
7242 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7243
7244 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7245 IGB_VF_FLAG_MULTI_PROMISC);
7246 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7247
7248 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
7249 vmolr |= E1000_VMOLR_MPME;
7250 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7251 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
7252 } else {
7253 /* if we have hashes and we are clearing a multicast promisc
7254 * flag we need to write the hashes to the MTA as this step
7255 * was previously skipped
7256 */
7257 if (vf_data->num_vf_mc_hashes > 30) {
7258 vmolr |= E1000_VMOLR_MPME;
7259 } else if (vf_data->num_vf_mc_hashes) {
7260 int j;
7261
7262 vmolr |= E1000_VMOLR_ROMPE;
7263 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7264 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7265 }
7266 }
7267
7268 wr32(E1000_VMOLR(vf), vmolr);
7269
7270 /* there are flags left unprocessed, likely not supported */
7271 if (*msgbuf & E1000_VT_MSGINFO_MASK)
7272 return -EINVAL;
7273
7274 return 0;
7275}
7276
7277static int igb_set_vf_multicasts(struct igb_adapter *adapter,
7278 u32 *msgbuf, u32 vf)
7279{
7280 int n = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
7281 u16 *hash_list = (u16 *)&msgbuf[1];
7282 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7283 int i;
7284
7285 /* salt away the number of multicast addresses assigned
7286 * to this VF for later use to restore when the PF multi cast
7287 * list changes
7288 */
7289 vf_data->num_vf_mc_hashes = n;
7290
7291 /* only up to 30 hash values supported */
7292 if (n > 30)
7293 n = 30;
7294
7295 /* store the hashes for later use */
7296 for (i = 0; i < n; i++)
7297 vf_data->vf_mc_hashes[i] = hash_list[i];
7298
7299 /* Flush and reset the mta with the new values */
7300 igb_set_rx_mode(adapter->netdev);
7301
7302 return 0;
7303}
7304
7305static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
7306{
7307 struct e1000_hw *hw = &adapter->hw;
7308 struct vf_data_storage *vf_data;
7309 int i, j;
7310
7311 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7312 u32 vmolr = rd32(E1000_VMOLR(i));
7313
7314 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
7315
7316 vf_data = &adapter->vf_data[i];
7317
7318 if ((vf_data->num_vf_mc_hashes > 30) ||
7319 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
7320 vmolr |= E1000_VMOLR_MPME;
7321 } else if (vf_data->num_vf_mc_hashes) {
7322 vmolr |= E1000_VMOLR_ROMPE;
7323 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
7324 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
7325 }
7326 wr32(E1000_VMOLR(i), vmolr);
7327 }
7328}
7329
7330static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
7331{
7332 struct e1000_hw *hw = &adapter->hw;
7333 u32 pool_mask, vlvf_mask, i;
7334
7335 /* create mask for VF and other pools */
7336 pool_mask = E1000_VLVF_POOLSEL_MASK;
7337 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
7338
7339 /* drop PF from pool bits */
7340 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
7341 adapter->vfs_allocated_count);
7342
7343 /* Find the vlan filter for this id */
7344 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
7345 u32 vlvf = rd32(E1000_VLVF(i));
7346 u32 vfta_mask, vid, vfta;
7347
7348 /* remove the vf from the pool */
7349 if (!(vlvf & vlvf_mask))
7350 continue;
7351
7352 /* clear out bit from VLVF */
7353 vlvf ^= vlvf_mask;
7354
7355 /* if other pools are present, just remove ourselves */
7356 if (vlvf & pool_mask)
7357 goto update_vlvfb;
7358
7359 /* if PF is present, leave VFTA */
7360 if (vlvf & E1000_VLVF_POOLSEL_MASK)
7361 goto update_vlvf;
7362
7363 vid = vlvf & E1000_VLVF_VLANID_MASK;
7364 vfta_mask = BIT(vid % 32);
7365
7366 /* clear bit from VFTA */
7367 vfta = adapter->shadow_vfta[vid / 32];
7368 if (vfta & vfta_mask)
7369 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
7370update_vlvf:
7371 /* clear pool selection enable */
7372 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7373 vlvf &= E1000_VLVF_POOLSEL_MASK;
7374 else
7375 vlvf = 0;
7376update_vlvfb:
7377 /* clear pool bits */
7378 wr32(E1000_VLVF(i), vlvf);
7379 }
7380}
7381
7382static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
7383{
7384 u32 vlvf;
7385 int idx;
7386
7387 /* short cut the special case */
7388 if (vlan == 0)
7389 return 0;
7390
7391 /* Search for the VLAN id in the VLVF entries */
7392 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
7393 vlvf = rd32(E1000_VLVF(idx));
7394 if ((vlvf & VLAN_VID_MASK) == vlan)
7395 break;
7396 }
7397
7398 return idx;
7399}
7400
7401static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
7402{
7403 struct e1000_hw *hw = &adapter->hw;
7404 u32 bits, pf_id;
7405 int idx;
7406
7407 idx = igb_find_vlvf_entry(hw, vid);
7408 if (!idx)
7409 return;
7410
7411 /* See if any other pools are set for this VLAN filter
7412 * entry other than the PF.
7413 */
7414 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
7415 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
7416 bits &= rd32(E1000_VLVF(idx));
7417
7418 /* Disable the filter so this falls into the default pool. */
7419 if (!bits) {
7420 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
7421 wr32(E1000_VLVF(idx), BIT(pf_id));
7422 else
7423 wr32(E1000_VLVF(idx), 0);
7424 }
7425}
7426
7427static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
7428 bool add, u32 vf)
7429{
7430 int pf_id = adapter->vfs_allocated_count;
7431 struct e1000_hw *hw = &adapter->hw;
7432 int err;
7433
7434 /* If VLAN overlaps with one the PF is currently monitoring make
7435 * sure that we are able to allocate a VLVF entry. This may be
7436 * redundant but it guarantees PF will maintain visibility to
7437 * the VLAN.
7438 */
7439 if (add && test_bit(vid, adapter->active_vlans)) {
7440 err = igb_vfta_set(hw, vid, pf_id, true, false);
7441 if (err)
7442 return err;
7443 }
7444
7445 err = igb_vfta_set(hw, vid, vf, add, false);
7446
7447 if (add && !err)
7448 return err;
7449
7450 /* If we failed to add the VF VLAN or we are removing the VF VLAN
7451 * we may need to drop the PF pool bit in order to allow us to free
7452 * up the VLVF resources.
7453 */
7454 if (test_bit(vid, adapter->active_vlans) ||
7455 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
7456 igb_update_pf_vlvf(adapter, vid);
7457
7458 return err;
7459}
7460
7461static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
7462{
7463 struct e1000_hw *hw = &adapter->hw;
7464
7465 if (vid)
7466 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
7467 else
7468 wr32(E1000_VMVIR(vf), 0);
7469}
7470
7471static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
7472 u16 vlan, u8 qos)
7473{
7474 int err;
7475
7476 err = igb_set_vf_vlan(adapter, vlan, true, vf);
7477 if (err)
7478 return err;
7479
7480 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
7481 igb_set_vmolr(adapter, vf, !vlan);
7482
7483 /* revoke access to previous VLAN */
7484 if (vlan != adapter->vf_data[vf].pf_vlan)
7485 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7486 false, vf);
7487
7488 adapter->vf_data[vf].pf_vlan = vlan;
7489 adapter->vf_data[vf].pf_qos = qos;
7490 igb_set_vf_vlan_strip(adapter, vf, true);
7491 dev_info(&adapter->pdev->dev,
7492 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
7493 if (test_bit(__IGB_DOWN, &adapter->state)) {
7494 dev_warn(&adapter->pdev->dev,
7495 "The VF VLAN has been set, but the PF device is not up.\n");
7496 dev_warn(&adapter->pdev->dev,
7497 "Bring the PF device up before attempting to use the VF device.\n");
7498 }
7499
7500 return err;
7501}
7502
7503static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
7504{
7505 /* Restore tagless access via VLAN 0 */
7506 igb_set_vf_vlan(adapter, 0, true, vf);
7507
7508 igb_set_vmvir(adapter, 0, vf);
7509 igb_set_vmolr(adapter, vf, true);
7510
7511 /* Remove any PF assigned VLAN */
7512 if (adapter->vf_data[vf].pf_vlan)
7513 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
7514 false, vf);
7515
7516 adapter->vf_data[vf].pf_vlan = 0;
7517 adapter->vf_data[vf].pf_qos = 0;
7518 igb_set_vf_vlan_strip(adapter, vf, false);
7519
7520 return 0;
7521}
7522
7523static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
7524 u16 vlan, u8 qos, __be16 vlan_proto)
7525{
7526 struct igb_adapter *adapter = netdev_priv(netdev);
7527
7528 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
7529 return -EINVAL;
7530
7531 if (vlan_proto != htons(ETH_P_8021Q))
7532 return -EPROTONOSUPPORT;
7533
7534 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
7535 igb_disable_port_vlan(adapter, vf);
7536}
7537
7538static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
7539{
7540 int add = FIELD_GET(E1000_VT_MSGINFO_MASK, msgbuf[0]);
7541 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
7542 int ret;
7543
7544 if (adapter->vf_data[vf].pf_vlan)
7545 return -1;
7546
7547 /* VLAN 0 is a special case, don't allow it to be removed */
7548 if (!vid && !add)
7549 return 0;
7550
7551 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
7552 if (!ret)
7553 igb_set_vf_vlan_strip(adapter, vf, !!vid);
7554 return ret;
7555}
7556
7557static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
7558{
7559 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7560
7561 /* clear flags - except flag that indicates PF has set the MAC */
7562 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
7563 vf_data->last_nack = jiffies;
7564
7565 /* reset vlans for device */
7566 igb_clear_vf_vfta(adapter, vf);
7567 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
7568 igb_set_vmvir(adapter, vf_data->pf_vlan |
7569 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
7570 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
7571 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
7572
7573 /* reset multicast table array for vf */
7574 adapter->vf_data[vf].num_vf_mc_hashes = 0;
7575
7576 /* Flush and reset the mta with the new values */
7577 igb_set_rx_mode(adapter->netdev);
7578}
7579
7580static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
7581{
7582 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7583
7584 /* clear mac address as we were hotplug removed/added */
7585 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7586 eth_zero_addr(vf_mac);
7587
7588 /* process remaining reset events */
7589 igb_vf_reset(adapter, vf);
7590}
7591
7592static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
7593{
7594 struct e1000_hw *hw = &adapter->hw;
7595 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
7596 u32 reg, msgbuf[3] = {};
7597 u8 *addr = (u8 *)(&msgbuf[1]);
7598
7599 /* process all the same items cleared in a function level reset */
7600 igb_vf_reset(adapter, vf);
7601
7602 /* set vf mac address */
7603 igb_set_vf_mac(adapter, vf, vf_mac);
7604
7605 /* enable transmit and receive for vf */
7606 reg = rd32(E1000_VFTE);
7607 wr32(E1000_VFTE, reg | BIT(vf));
7608 reg = rd32(E1000_VFRE);
7609 wr32(E1000_VFRE, reg | BIT(vf));
7610
7611 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
7612
7613 /* reply to reset with ack and vf mac address */
7614 if (!is_zero_ether_addr(vf_mac)) {
7615 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
7616 memcpy(addr, vf_mac, ETH_ALEN);
7617 } else {
7618 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
7619 }
7620 igb_write_mbx(hw, msgbuf, 3, vf);
7621}
7622
7623static void igb_flush_mac_table(struct igb_adapter *adapter)
7624{
7625 struct e1000_hw *hw = &adapter->hw;
7626 int i;
7627
7628 for (i = 0; i < hw->mac.rar_entry_count; i++) {
7629 adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
7630 eth_zero_addr(adapter->mac_table[i].addr);
7631 adapter->mac_table[i].queue = 0;
7632 igb_rar_set_index(adapter, i);
7633 }
7634}
7635
7636static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
7637{
7638 struct e1000_hw *hw = &adapter->hw;
7639 /* do not count rar entries reserved for VFs MAC addresses */
7640 int rar_entries = hw->mac.rar_entry_count -
7641 adapter->vfs_allocated_count;
7642 int i, count = 0;
7643
7644 for (i = 0; i < rar_entries; i++) {
7645 /* do not count default entries */
7646 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
7647 continue;
7648
7649 /* do not count "in use" entries for different queues */
7650 if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
7651 (adapter->mac_table[i].queue != queue))
7652 continue;
7653
7654 count++;
7655 }
7656
7657 return count;
7658}
7659
7660/* Set default MAC address for the PF in the first RAR entry */
7661static void igb_set_default_mac_filter(struct igb_adapter *adapter)
7662{
7663 struct igb_mac_addr *mac_table = &adapter->mac_table[0];
7664
7665 ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
7666 mac_table->queue = adapter->vfs_allocated_count;
7667 mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7668
7669 igb_rar_set_index(adapter, 0);
7670}
7671
7672/* If the filter to be added and an already existing filter express
7673 * the same address and address type, it should be possible to only
7674 * override the other configurations, for example the queue to steer
7675 * traffic.
7676 */
7677static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
7678 const u8 *addr, const u8 flags)
7679{
7680 if (!(entry->state & IGB_MAC_STATE_IN_USE))
7681 return true;
7682
7683 if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
7684 (flags & IGB_MAC_STATE_SRC_ADDR))
7685 return false;
7686
7687 if (!ether_addr_equal(addr, entry->addr))
7688 return false;
7689
7690 return true;
7691}
7692
7693/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
7694 * 'flags' is used to indicate what kind of match is made, match is by
7695 * default for the destination address, if matching by source address
7696 * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
7697 */
7698static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
7699 const u8 *addr, const u8 queue,
7700 const u8 flags)
7701{
7702 struct e1000_hw *hw = &adapter->hw;
7703 int rar_entries = hw->mac.rar_entry_count -
7704 adapter->vfs_allocated_count;
7705 int i;
7706
7707 if (is_zero_ether_addr(addr))
7708 return -EINVAL;
7709
7710 /* Search for the first empty entry in the MAC table.
7711 * Do not touch entries at the end of the table reserved for the VF MAC
7712 * addresses.
7713 */
7714 for (i = 0; i < rar_entries; i++) {
7715 if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
7716 addr, flags))
7717 continue;
7718
7719 ether_addr_copy(adapter->mac_table[i].addr, addr);
7720 adapter->mac_table[i].queue = queue;
7721 adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
7722
7723 igb_rar_set_index(adapter, i);
7724 return i;
7725 }
7726
7727 return -ENOSPC;
7728}
7729
7730static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7731 const u8 queue)
7732{
7733 return igb_add_mac_filter_flags(adapter, addr, queue, 0);
7734}
7735
7736/* Remove a MAC filter for 'addr' directing matching traffic to
7737 * 'queue', 'flags' is used to indicate what kind of match need to be
7738 * removed, match is by default for the destination address, if
7739 * matching by source address is to be removed the flag
7740 * IGB_MAC_STATE_SRC_ADDR can be used.
7741 */
7742static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
7743 const u8 *addr, const u8 queue,
7744 const u8 flags)
7745{
7746 struct e1000_hw *hw = &adapter->hw;
7747 int rar_entries = hw->mac.rar_entry_count -
7748 adapter->vfs_allocated_count;
7749 int i;
7750
7751 if (is_zero_ether_addr(addr))
7752 return -EINVAL;
7753
7754 /* Search for matching entry in the MAC table based on given address
7755 * and queue. Do not touch entries at the end of the table reserved
7756 * for the VF MAC addresses.
7757 */
7758 for (i = 0; i < rar_entries; i++) {
7759 if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
7760 continue;
7761 if ((adapter->mac_table[i].state & flags) != flags)
7762 continue;
7763 if (adapter->mac_table[i].queue != queue)
7764 continue;
7765 if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
7766 continue;
7767
7768 /* When a filter for the default address is "deleted",
7769 * we return it to its initial configuration
7770 */
7771 if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
7772 adapter->mac_table[i].state =
7773 IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
7774 adapter->mac_table[i].queue =
7775 adapter->vfs_allocated_count;
7776 } else {
7777 adapter->mac_table[i].state = 0;
7778 adapter->mac_table[i].queue = 0;
7779 eth_zero_addr(adapter->mac_table[i].addr);
7780 }
7781
7782 igb_rar_set_index(adapter, i);
7783 return 0;
7784 }
7785
7786 return -ENOENT;
7787}
7788
7789static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
7790 const u8 queue)
7791{
7792 return igb_del_mac_filter_flags(adapter, addr, queue, 0);
7793}
7794
7795int igb_add_mac_steering_filter(struct igb_adapter *adapter,
7796 const u8 *addr, u8 queue, u8 flags)
7797{
7798 struct e1000_hw *hw = &adapter->hw;
7799
7800 /* In theory, this should be supported on 82575 as well, but
7801 * that part wasn't easily accessible during development.
7802 */
7803 if (hw->mac.type != e1000_i210)
7804 return -EOPNOTSUPP;
7805
7806 return igb_add_mac_filter_flags(adapter, addr, queue,
7807 IGB_MAC_STATE_QUEUE_STEERING | flags);
7808}
7809
7810int igb_del_mac_steering_filter(struct igb_adapter *adapter,
7811 const u8 *addr, u8 queue, u8 flags)
7812{
7813 return igb_del_mac_filter_flags(adapter, addr, queue,
7814 IGB_MAC_STATE_QUEUE_STEERING | flags);
7815}
7816
7817static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
7818{
7819 struct igb_adapter *adapter = netdev_priv(netdev);
7820 int ret;
7821
7822 ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7823
7824 return min_t(int, ret, 0);
7825}
7826
7827static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
7828{
7829 struct igb_adapter *adapter = netdev_priv(netdev);
7830
7831 igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
7832
7833 return 0;
7834}
7835
7836static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
7837 const u32 info, const u8 *addr)
7838{
7839 struct pci_dev *pdev = adapter->pdev;
7840 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7841 struct vf_mac_filter *entry;
7842 bool found = false;
7843 int ret = 0;
7844
7845 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7846 !vf_data->trusted) {
7847 dev_warn(&pdev->dev,
7848 "VF %d requested MAC filter but is administratively denied\n",
7849 vf);
7850 return -EINVAL;
7851 }
7852 if (!is_valid_ether_addr(addr)) {
7853 dev_warn(&pdev->dev,
7854 "VF %d attempted to set invalid MAC filter\n",
7855 vf);
7856 return -EINVAL;
7857 }
7858
7859 switch (info) {
7860 case E1000_VF_MAC_FILTER_CLR:
7861 /* remove all unicast MAC filters related to the current VF */
7862 list_for_each_entry(entry, &adapter->vf_macs.l, l) {
7863 if (entry->vf == vf) {
7864 entry->vf = -1;
7865 entry->free = true;
7866 igb_del_mac_filter(adapter, entry->vf_mac, vf);
7867 }
7868 }
7869 break;
7870 case E1000_VF_MAC_FILTER_ADD:
7871 /* try to find empty slot in the list */
7872 list_for_each_entry(entry, &adapter->vf_macs.l, l) {
7873 if (entry->free) {
7874 found = true;
7875 break;
7876 }
7877 }
7878
7879 if (found) {
7880 entry->free = false;
7881 entry->vf = vf;
7882 ether_addr_copy(entry->vf_mac, addr);
7883
7884 ret = igb_add_mac_filter(adapter, addr, vf);
7885 ret = min_t(int, ret, 0);
7886 } else {
7887 ret = -ENOSPC;
7888 }
7889
7890 if (ret == -ENOSPC)
7891 dev_warn(&pdev->dev,
7892 "VF %d has requested MAC filter but there is no space for it\n",
7893 vf);
7894 break;
7895 default:
7896 ret = -EINVAL;
7897 break;
7898 }
7899
7900 return ret;
7901}
7902
7903static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
7904{
7905 struct pci_dev *pdev = adapter->pdev;
7906 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7907 u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
7908
7909 /* The VF MAC Address is stored in a packed array of bytes
7910 * starting at the second 32 bit word of the msg array
7911 */
7912 unsigned char *addr = (unsigned char *)&msg[1];
7913 int ret = 0;
7914
7915 if (!info) {
7916 if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
7917 !vf_data->trusted) {
7918 dev_warn(&pdev->dev,
7919 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
7920 vf);
7921 return -EINVAL;
7922 }
7923
7924 if (!is_valid_ether_addr(addr)) {
7925 dev_warn(&pdev->dev,
7926 "VF %d attempted to set invalid MAC\n",
7927 vf);
7928 return -EINVAL;
7929 }
7930
7931 ret = igb_set_vf_mac(adapter, vf, addr);
7932 } else {
7933 ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
7934 }
7935
7936 return ret;
7937}
7938
7939static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
7940{
7941 struct e1000_hw *hw = &adapter->hw;
7942 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7943 u32 msg = E1000_VT_MSGTYPE_NACK;
7944
7945 /* if device isn't clear to send it shouldn't be reading either */
7946 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
7947 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
7948 igb_write_mbx(hw, &msg, 1, vf);
7949 vf_data->last_nack = jiffies;
7950 }
7951}
7952
7953static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
7954{
7955 struct pci_dev *pdev = adapter->pdev;
7956 u32 msgbuf[E1000_VFMAILBOX_SIZE];
7957 struct e1000_hw *hw = &adapter->hw;
7958 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
7959 s32 retval;
7960
7961 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
7962
7963 if (retval) {
7964 /* if receive failed revoke VF CTS stats and restart init */
7965 dev_err(&pdev->dev, "Error receiving message from VF\n");
7966 vf_data->flags &= ~IGB_VF_FLAG_CTS;
7967 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7968 goto unlock;
7969 goto out;
7970 }
7971
7972 /* this is a message we already processed, do nothing */
7973 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
7974 goto unlock;
7975
7976 /* until the vf completes a reset it should not be
7977 * allowed to start any configuration.
7978 */
7979 if (msgbuf[0] == E1000_VF_RESET) {
7980 /* unlocks mailbox */
7981 igb_vf_reset_msg(adapter, vf);
7982 return;
7983 }
7984
7985 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
7986 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
7987 goto unlock;
7988 retval = -1;
7989 goto out;
7990 }
7991
7992 switch ((msgbuf[0] & 0xFFFF)) {
7993 case E1000_VF_SET_MAC_ADDR:
7994 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
7995 break;
7996 case E1000_VF_SET_PROMISC:
7997 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
7998 break;
7999 case E1000_VF_SET_MULTICAST:
8000 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
8001 break;
8002 case E1000_VF_SET_LPE:
8003 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
8004 break;
8005 case E1000_VF_SET_VLAN:
8006 retval = -1;
8007 if (vf_data->pf_vlan)
8008 dev_warn(&pdev->dev,
8009 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
8010 vf);
8011 else
8012 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
8013 break;
8014 default:
8015 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
8016 retval = -1;
8017 break;
8018 }
8019
8020 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
8021out:
8022 /* notify the VF of the results of what it sent us */
8023 if (retval)
8024 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
8025 else
8026 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
8027
8028 /* unlocks mailbox */
8029 igb_write_mbx(hw, msgbuf, 1, vf);
8030 return;
8031
8032unlock:
8033 igb_unlock_mbx(hw, vf);
8034}
8035
8036static void igb_msg_task(struct igb_adapter *adapter)
8037{
8038 struct e1000_hw *hw = &adapter->hw;
8039 unsigned long flags;
8040 u32 vf;
8041
8042 spin_lock_irqsave(&adapter->vfs_lock, flags);
8043 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
8044 /* process any reset requests */
8045 if (!igb_check_for_rst(hw, vf))
8046 igb_vf_reset_event(adapter, vf);
8047
8048 /* process any messages pending */
8049 if (!igb_check_for_msg(hw, vf))
8050 igb_rcv_msg_from_vf(adapter, vf);
8051
8052 /* process any acks */
8053 if (!igb_check_for_ack(hw, vf))
8054 igb_rcv_ack_from_vf(adapter, vf);
8055 }
8056 spin_unlock_irqrestore(&adapter->vfs_lock, flags);
8057}
8058
8059/**
8060 * igb_set_uta - Set unicast filter table address
8061 * @adapter: board private structure
8062 * @set: boolean indicating if we are setting or clearing bits
8063 *
8064 * The unicast table address is a register array of 32-bit registers.
8065 * The table is meant to be used in a way similar to how the MTA is used
8066 * however due to certain limitations in the hardware it is necessary to
8067 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
8068 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
8069 **/
8070static void igb_set_uta(struct igb_adapter *adapter, bool set)
8071{
8072 struct e1000_hw *hw = &adapter->hw;
8073 u32 uta = set ? ~0 : 0;
8074 int i;
8075
8076 /* we only need to do this if VMDq is enabled */
8077 if (!adapter->vfs_allocated_count)
8078 return;
8079
8080 for (i = hw->mac.uta_reg_count; i--;)
8081 array_wr32(E1000_UTA, i, uta);
8082}
8083
8084/**
8085 * igb_intr_msi - Interrupt Handler
8086 * @irq: interrupt number
8087 * @data: pointer to a network interface device structure
8088 **/
8089static irqreturn_t igb_intr_msi(int irq, void *data)
8090{
8091 struct igb_adapter *adapter = data;
8092 struct igb_q_vector *q_vector = adapter->q_vector[0];
8093 struct e1000_hw *hw = &adapter->hw;
8094 /* read ICR disables interrupts using IAM */
8095 u32 icr = rd32(E1000_ICR);
8096
8097 igb_write_itr(q_vector);
8098
8099 if (icr & E1000_ICR_DRSTA)
8100 schedule_work(&adapter->reset_task);
8101
8102 if (icr & E1000_ICR_DOUTSYNC) {
8103 /* HW is reporting DMA is out of sync */
8104 adapter->stats.doosync++;
8105 }
8106
8107 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8108 hw->mac.get_link_status = 1;
8109 if (!test_bit(__IGB_DOWN, &adapter->state))
8110 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8111 }
8112
8113 if (icr & E1000_ICR_TS)
8114 igb_tsync_interrupt(adapter);
8115
8116 napi_schedule(&q_vector->napi);
8117
8118 return IRQ_HANDLED;
8119}
8120
8121/**
8122 * igb_intr - Legacy Interrupt Handler
8123 * @irq: interrupt number
8124 * @data: pointer to a network interface device structure
8125 **/
8126static irqreturn_t igb_intr(int irq, void *data)
8127{
8128 struct igb_adapter *adapter = data;
8129 struct igb_q_vector *q_vector = adapter->q_vector[0];
8130 struct e1000_hw *hw = &adapter->hw;
8131 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
8132 * need for the IMC write
8133 */
8134 u32 icr = rd32(E1000_ICR);
8135
8136 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
8137 * not set, then the adapter didn't send an interrupt
8138 */
8139 if (!(icr & E1000_ICR_INT_ASSERTED))
8140 return IRQ_NONE;
8141
8142 igb_write_itr(q_vector);
8143
8144 if (icr & E1000_ICR_DRSTA)
8145 schedule_work(&adapter->reset_task);
8146
8147 if (icr & E1000_ICR_DOUTSYNC) {
8148 /* HW is reporting DMA is out of sync */
8149 adapter->stats.doosync++;
8150 }
8151
8152 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
8153 hw->mac.get_link_status = 1;
8154 /* guard against interrupt when we're going down */
8155 if (!test_bit(__IGB_DOWN, &adapter->state))
8156 mod_timer(&adapter->watchdog_timer, jiffies + 1);
8157 }
8158
8159 if (icr & E1000_ICR_TS)
8160 igb_tsync_interrupt(adapter);
8161
8162 napi_schedule(&q_vector->napi);
8163
8164 return IRQ_HANDLED;
8165}
8166
8167static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
8168{
8169 struct igb_adapter *adapter = q_vector->adapter;
8170 struct e1000_hw *hw = &adapter->hw;
8171
8172 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
8173 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
8174 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
8175 igb_set_itr(q_vector);
8176 else
8177 igb_update_ring_itr(q_vector);
8178 }
8179
8180 if (!test_bit(__IGB_DOWN, &adapter->state)) {
8181 if (adapter->flags & IGB_FLAG_HAS_MSIX)
8182 wr32(E1000_EIMS, q_vector->eims_value);
8183 else
8184 igb_irq_enable(adapter);
8185 }
8186}
8187
8188/**
8189 * igb_poll - NAPI Rx polling callback
8190 * @napi: napi polling structure
8191 * @budget: count of how many packets we should handle
8192 **/
8193static int igb_poll(struct napi_struct *napi, int budget)
8194{
8195 struct igb_q_vector *q_vector = container_of(napi,
8196 struct igb_q_vector,
8197 napi);
8198 bool clean_complete = true;
8199 int work_done = 0;
8200
8201#ifdef CONFIG_IGB_DCA
8202 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
8203 igb_update_dca(q_vector);
8204#endif
8205 if (q_vector->tx.ring)
8206 clean_complete = igb_clean_tx_irq(q_vector, budget);
8207
8208 if (q_vector->rx.ring) {
8209 int cleaned = igb_clean_rx_irq(q_vector, budget);
8210
8211 work_done += cleaned;
8212 if (cleaned >= budget)
8213 clean_complete = false;
8214 }
8215
8216 /* If all work not completed, return budget and keep polling */
8217 if (!clean_complete)
8218 return budget;
8219
8220 /* Exit the polling mode, but don't re-enable interrupts if stack might
8221 * poll us due to busy-polling
8222 */
8223 if (likely(napi_complete_done(napi, work_done)))
8224 igb_ring_irq_enable(q_vector);
8225
8226 return work_done;
8227}
8228
8229/**
8230 * igb_clean_tx_irq - Reclaim resources after transmit completes
8231 * @q_vector: pointer to q_vector containing needed info
8232 * @napi_budget: Used to determine if we are in netpoll
8233 *
8234 * returns true if ring is completely cleaned
8235 **/
8236static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
8237{
8238 struct igb_adapter *adapter = q_vector->adapter;
8239 struct igb_ring *tx_ring = q_vector->tx.ring;
8240 struct igb_tx_buffer *tx_buffer;
8241 union e1000_adv_tx_desc *tx_desc;
8242 unsigned int total_bytes = 0, total_packets = 0;
8243 unsigned int budget = q_vector->tx.work_limit;
8244 unsigned int i = tx_ring->next_to_clean;
8245
8246 if (test_bit(__IGB_DOWN, &adapter->state))
8247 return true;
8248
8249 tx_buffer = &tx_ring->tx_buffer_info[i];
8250 tx_desc = IGB_TX_DESC(tx_ring, i);
8251 i -= tx_ring->count;
8252
8253 do {
8254 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8255
8256 /* if next_to_watch is not set then there is no work pending */
8257 if (!eop_desc)
8258 break;
8259
8260 /* prevent any other reads prior to eop_desc */
8261 smp_rmb();
8262
8263 /* if DD is not set pending work has not been completed */
8264 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
8265 break;
8266
8267 /* clear next_to_watch to prevent false hangs */
8268 tx_buffer->next_to_watch = NULL;
8269
8270 /* update the statistics for this packet */
8271 total_bytes += tx_buffer->bytecount;
8272 total_packets += tx_buffer->gso_segs;
8273
8274 /* free the skb */
8275 if (tx_buffer->type == IGB_TYPE_SKB)
8276 napi_consume_skb(tx_buffer->skb, napi_budget);
8277 else
8278 xdp_return_frame(tx_buffer->xdpf);
8279
8280 /* unmap skb header data */
8281 dma_unmap_single(tx_ring->dev,
8282 dma_unmap_addr(tx_buffer, dma),
8283 dma_unmap_len(tx_buffer, len),
8284 DMA_TO_DEVICE);
8285
8286 /* clear tx_buffer data */
8287 dma_unmap_len_set(tx_buffer, len, 0);
8288
8289 /* clear last DMA location and unmap remaining buffers */
8290 while (tx_desc != eop_desc) {
8291 tx_buffer++;
8292 tx_desc++;
8293 i++;
8294 if (unlikely(!i)) {
8295 i -= tx_ring->count;
8296 tx_buffer = tx_ring->tx_buffer_info;
8297 tx_desc = IGB_TX_DESC(tx_ring, 0);
8298 }
8299
8300 /* unmap any remaining paged data */
8301 if (dma_unmap_len(tx_buffer, len)) {
8302 dma_unmap_page(tx_ring->dev,
8303 dma_unmap_addr(tx_buffer, dma),
8304 dma_unmap_len(tx_buffer, len),
8305 DMA_TO_DEVICE);
8306 dma_unmap_len_set(tx_buffer, len, 0);
8307 }
8308 }
8309
8310 /* move us one more past the eop_desc for start of next pkt */
8311 tx_buffer++;
8312 tx_desc++;
8313 i++;
8314 if (unlikely(!i)) {
8315 i -= tx_ring->count;
8316 tx_buffer = tx_ring->tx_buffer_info;
8317 tx_desc = IGB_TX_DESC(tx_ring, 0);
8318 }
8319
8320 /* issue prefetch for next Tx descriptor */
8321 prefetch(tx_desc);
8322
8323 /* update budget accounting */
8324 budget--;
8325 } while (likely(budget));
8326
8327 netdev_tx_completed_queue(txring_txq(tx_ring),
8328 total_packets, total_bytes);
8329 i += tx_ring->count;
8330 tx_ring->next_to_clean = i;
8331 u64_stats_update_begin(&tx_ring->tx_syncp);
8332 tx_ring->tx_stats.bytes += total_bytes;
8333 tx_ring->tx_stats.packets += total_packets;
8334 u64_stats_update_end(&tx_ring->tx_syncp);
8335 q_vector->tx.total_bytes += total_bytes;
8336 q_vector->tx.total_packets += total_packets;
8337
8338 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
8339 struct e1000_hw *hw = &adapter->hw;
8340
8341 /* Detect a transmit hang in hardware, this serializes the
8342 * check with the clearing of time_stamp and movement of i
8343 */
8344 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
8345 if (tx_buffer->next_to_watch &&
8346 time_after(jiffies, tx_buffer->time_stamp +
8347 (adapter->tx_timeout_factor * HZ)) &&
8348 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
8349
8350 /* detected Tx unit hang */
8351 dev_err(tx_ring->dev,
8352 "Detected Tx Unit Hang\n"
8353 " Tx Queue <%d>\n"
8354 " TDH <%x>\n"
8355 " TDT <%x>\n"
8356 " next_to_use <%x>\n"
8357 " next_to_clean <%x>\n"
8358 "buffer_info[next_to_clean]\n"
8359 " time_stamp <%lx>\n"
8360 " next_to_watch <%p>\n"
8361 " jiffies <%lx>\n"
8362 " desc.status <%x>\n",
8363 tx_ring->queue_index,
8364 rd32(E1000_TDH(tx_ring->reg_idx)),
8365 readl(tx_ring->tail),
8366 tx_ring->next_to_use,
8367 tx_ring->next_to_clean,
8368 tx_buffer->time_stamp,
8369 tx_buffer->next_to_watch,
8370 jiffies,
8371 tx_buffer->next_to_watch->wb.status);
8372 netif_stop_subqueue(tx_ring->netdev,
8373 tx_ring->queue_index);
8374
8375 /* we are about to reset, no point in enabling stuff */
8376 return true;
8377 }
8378 }
8379
8380#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
8381 if (unlikely(total_packets &&
8382 netif_carrier_ok(tx_ring->netdev) &&
8383 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
8384 /* Make sure that anybody stopping the queue after this
8385 * sees the new next_to_clean.
8386 */
8387 smp_mb();
8388 if (__netif_subqueue_stopped(tx_ring->netdev,
8389 tx_ring->queue_index) &&
8390 !(test_bit(__IGB_DOWN, &adapter->state))) {
8391 netif_wake_subqueue(tx_ring->netdev,
8392 tx_ring->queue_index);
8393
8394 u64_stats_update_begin(&tx_ring->tx_syncp);
8395 tx_ring->tx_stats.restart_queue++;
8396 u64_stats_update_end(&tx_ring->tx_syncp);
8397 }
8398 }
8399
8400 return !!budget;
8401}
8402
8403/**
8404 * igb_reuse_rx_page - page flip buffer and store it back on the ring
8405 * @rx_ring: rx descriptor ring to store buffers on
8406 * @old_buff: donor buffer to have page reused
8407 *
8408 * Synchronizes page for reuse by the adapter
8409 **/
8410static void igb_reuse_rx_page(struct igb_ring *rx_ring,
8411 struct igb_rx_buffer *old_buff)
8412{
8413 struct igb_rx_buffer *new_buff;
8414 u16 nta = rx_ring->next_to_alloc;
8415
8416 new_buff = &rx_ring->rx_buffer_info[nta];
8417
8418 /* update, and store next to alloc */
8419 nta++;
8420 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
8421
8422 /* Transfer page from old buffer to new buffer.
8423 * Move each member individually to avoid possible store
8424 * forwarding stalls.
8425 */
8426 new_buff->dma = old_buff->dma;
8427 new_buff->page = old_buff->page;
8428 new_buff->page_offset = old_buff->page_offset;
8429 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
8430}
8431
8432static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
8433 int rx_buf_pgcnt)
8434{
8435 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
8436 struct page *page = rx_buffer->page;
8437
8438 /* avoid re-using remote and pfmemalloc pages */
8439 if (!dev_page_is_reusable(page))
8440 return false;
8441
8442#if (PAGE_SIZE < 8192)
8443 /* if we are only owner of page we can reuse it */
8444 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
8445 return false;
8446#else
8447#define IGB_LAST_OFFSET \
8448 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
8449
8450 if (rx_buffer->page_offset > IGB_LAST_OFFSET)
8451 return false;
8452#endif
8453
8454 /* If we have drained the page fragment pool we need to update
8455 * the pagecnt_bias and page count so that we fully restock the
8456 * number of references the driver holds.
8457 */
8458 if (unlikely(pagecnt_bias == 1)) {
8459 page_ref_add(page, USHRT_MAX - 1);
8460 rx_buffer->pagecnt_bias = USHRT_MAX;
8461 }
8462
8463 return true;
8464}
8465
8466/**
8467 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
8468 * @rx_ring: rx descriptor ring to transact packets on
8469 * @rx_buffer: buffer containing page to add
8470 * @skb: sk_buff to place the data into
8471 * @size: size of buffer to be added
8472 *
8473 * This function will add the data contained in rx_buffer->page to the skb.
8474 **/
8475static void igb_add_rx_frag(struct igb_ring *rx_ring,
8476 struct igb_rx_buffer *rx_buffer,
8477 struct sk_buff *skb,
8478 unsigned int size)
8479{
8480#if (PAGE_SIZE < 8192)
8481 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8482#else
8483 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
8484 SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
8485 SKB_DATA_ALIGN(size);
8486#endif
8487 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
8488 rx_buffer->page_offset, size, truesize);
8489#if (PAGE_SIZE < 8192)
8490 rx_buffer->page_offset ^= truesize;
8491#else
8492 rx_buffer->page_offset += truesize;
8493#endif
8494}
8495
8496static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
8497 struct igb_rx_buffer *rx_buffer,
8498 struct xdp_buff *xdp,
8499 ktime_t timestamp)
8500{
8501#if (PAGE_SIZE < 8192)
8502 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8503#else
8504 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
8505 xdp->data_hard_start);
8506#endif
8507 unsigned int size = xdp->data_end - xdp->data;
8508 unsigned int headlen;
8509 struct sk_buff *skb;
8510
8511 /* prefetch first cache line of first page */
8512 net_prefetch(xdp->data);
8513
8514 /* allocate a skb to store the frags */
8515 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
8516 if (unlikely(!skb))
8517 return NULL;
8518
8519 if (timestamp)
8520 skb_hwtstamps(skb)->hwtstamp = timestamp;
8521
8522 /* Determine available headroom for copy */
8523 headlen = size;
8524 if (headlen > IGB_RX_HDR_LEN)
8525 headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
8526
8527 /* align pull length to size of long to optimize memcpy performance */
8528 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
8529
8530 /* update all of the pointers */
8531 size -= headlen;
8532 if (size) {
8533 skb_add_rx_frag(skb, 0, rx_buffer->page,
8534 (xdp->data + headlen) - page_address(rx_buffer->page),
8535 size, truesize);
8536#if (PAGE_SIZE < 8192)
8537 rx_buffer->page_offset ^= truesize;
8538#else
8539 rx_buffer->page_offset += truesize;
8540#endif
8541 } else {
8542 rx_buffer->pagecnt_bias++;
8543 }
8544
8545 return skb;
8546}
8547
8548static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
8549 struct igb_rx_buffer *rx_buffer,
8550 struct xdp_buff *xdp,
8551 ktime_t timestamp)
8552{
8553#if (PAGE_SIZE < 8192)
8554 unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
8555#else
8556 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
8557 SKB_DATA_ALIGN(xdp->data_end -
8558 xdp->data_hard_start);
8559#endif
8560 unsigned int metasize = xdp->data - xdp->data_meta;
8561 struct sk_buff *skb;
8562
8563 /* prefetch first cache line of first page */
8564 net_prefetch(xdp->data_meta);
8565
8566 /* build an skb around the page buffer */
8567 skb = napi_build_skb(xdp->data_hard_start, truesize);
8568 if (unlikely(!skb))
8569 return NULL;
8570
8571 /* update pointers within the skb to store the data */
8572 skb_reserve(skb, xdp->data - xdp->data_hard_start);
8573 __skb_put(skb, xdp->data_end - xdp->data);
8574
8575 if (metasize)
8576 skb_metadata_set(skb, metasize);
8577
8578 if (timestamp)
8579 skb_hwtstamps(skb)->hwtstamp = timestamp;
8580
8581 /* update buffer offset */
8582#if (PAGE_SIZE < 8192)
8583 rx_buffer->page_offset ^= truesize;
8584#else
8585 rx_buffer->page_offset += truesize;
8586#endif
8587
8588 return skb;
8589}
8590
8591static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
8592 struct igb_ring *rx_ring,
8593 struct xdp_buff *xdp)
8594{
8595 int err, result = IGB_XDP_PASS;
8596 struct bpf_prog *xdp_prog;
8597 u32 act;
8598
8599 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
8600
8601 if (!xdp_prog)
8602 goto xdp_out;
8603
8604 prefetchw(xdp->data_hard_start); /* xdp_frame write */
8605
8606 act = bpf_prog_run_xdp(xdp_prog, xdp);
8607 switch (act) {
8608 case XDP_PASS:
8609 break;
8610 case XDP_TX:
8611 result = igb_xdp_xmit_back(adapter, xdp);
8612 if (result == IGB_XDP_CONSUMED)
8613 goto out_failure;
8614 break;
8615 case XDP_REDIRECT:
8616 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
8617 if (err)
8618 goto out_failure;
8619 result = IGB_XDP_REDIR;
8620 break;
8621 default:
8622 bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
8623 fallthrough;
8624 case XDP_ABORTED:
8625out_failure:
8626 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
8627 fallthrough;
8628 case XDP_DROP:
8629 result = IGB_XDP_CONSUMED;
8630 break;
8631 }
8632xdp_out:
8633 return ERR_PTR(-result);
8634}
8635
8636static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
8637 unsigned int size)
8638{
8639 unsigned int truesize;
8640
8641#if (PAGE_SIZE < 8192)
8642 truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
8643#else
8644 truesize = ring_uses_build_skb(rx_ring) ?
8645 SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
8646 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
8647 SKB_DATA_ALIGN(size);
8648#endif
8649 return truesize;
8650}
8651
8652static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
8653 struct igb_rx_buffer *rx_buffer,
8654 unsigned int size)
8655{
8656 unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
8657#if (PAGE_SIZE < 8192)
8658 rx_buffer->page_offset ^= truesize;
8659#else
8660 rx_buffer->page_offset += truesize;
8661#endif
8662}
8663
8664static inline void igb_rx_checksum(struct igb_ring *ring,
8665 union e1000_adv_rx_desc *rx_desc,
8666 struct sk_buff *skb)
8667{
8668 skb_checksum_none_assert(skb);
8669
8670 /* Ignore Checksum bit is set */
8671 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
8672 return;
8673
8674 /* Rx checksum disabled via ethtool */
8675 if (!(ring->netdev->features & NETIF_F_RXCSUM))
8676 return;
8677
8678 /* TCP/UDP checksum error bit is set */
8679 if (igb_test_staterr(rx_desc,
8680 E1000_RXDEXT_STATERR_TCPE |
8681 E1000_RXDEXT_STATERR_IPE)) {
8682 /* work around errata with sctp packets where the TCPE aka
8683 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
8684 * packets, (aka let the stack check the crc32c)
8685 */
8686 if (!((skb->len == 60) &&
8687 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
8688 u64_stats_update_begin(&ring->rx_syncp);
8689 ring->rx_stats.csum_err++;
8690 u64_stats_update_end(&ring->rx_syncp);
8691 }
8692 /* let the stack verify checksum errors */
8693 return;
8694 }
8695 /* It must be a TCP or UDP packet with a valid checksum */
8696 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
8697 E1000_RXD_STAT_UDPCS))
8698 skb->ip_summed = CHECKSUM_UNNECESSARY;
8699
8700 dev_dbg(ring->dev, "cksum success: bits %08X\n",
8701 le32_to_cpu(rx_desc->wb.upper.status_error));
8702}
8703
8704static inline void igb_rx_hash(struct igb_ring *ring,
8705 union e1000_adv_rx_desc *rx_desc,
8706 struct sk_buff *skb)
8707{
8708 if (ring->netdev->features & NETIF_F_RXHASH)
8709 skb_set_hash(skb,
8710 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
8711 PKT_HASH_TYPE_L3);
8712}
8713
8714/**
8715 * igb_is_non_eop - process handling of non-EOP buffers
8716 * @rx_ring: Rx ring being processed
8717 * @rx_desc: Rx descriptor for current buffer
8718 *
8719 * This function updates next to clean. If the buffer is an EOP buffer
8720 * this function exits returning false, otherwise it will place the
8721 * sk_buff in the next buffer to be chained and return true indicating
8722 * that this is in fact a non-EOP buffer.
8723 **/
8724static bool igb_is_non_eop(struct igb_ring *rx_ring,
8725 union e1000_adv_rx_desc *rx_desc)
8726{
8727 u32 ntc = rx_ring->next_to_clean + 1;
8728
8729 /* fetch, update, and store next to clean */
8730 ntc = (ntc < rx_ring->count) ? ntc : 0;
8731 rx_ring->next_to_clean = ntc;
8732
8733 prefetch(IGB_RX_DESC(rx_ring, ntc));
8734
8735 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
8736 return false;
8737
8738 return true;
8739}
8740
8741/**
8742 * igb_cleanup_headers - Correct corrupted or empty headers
8743 * @rx_ring: rx descriptor ring packet is being transacted on
8744 * @rx_desc: pointer to the EOP Rx descriptor
8745 * @skb: pointer to current skb being fixed
8746 *
8747 * Address the case where we are pulling data in on pages only
8748 * and as such no data is present in the skb header.
8749 *
8750 * In addition if skb is not at least 60 bytes we need to pad it so that
8751 * it is large enough to qualify as a valid Ethernet frame.
8752 *
8753 * Returns true if an error was encountered and skb was freed.
8754 **/
8755static bool igb_cleanup_headers(struct igb_ring *rx_ring,
8756 union e1000_adv_rx_desc *rx_desc,
8757 struct sk_buff *skb)
8758{
8759 /* XDP packets use error pointer so abort at this point */
8760 if (IS_ERR(skb))
8761 return true;
8762
8763 if (unlikely((igb_test_staterr(rx_desc,
8764 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
8765 struct net_device *netdev = rx_ring->netdev;
8766 if (!(netdev->features & NETIF_F_RXALL)) {
8767 dev_kfree_skb_any(skb);
8768 return true;
8769 }
8770 }
8771
8772 /* if eth_skb_pad returns an error the skb was freed */
8773 if (eth_skb_pad(skb))
8774 return true;
8775
8776 return false;
8777}
8778
8779/**
8780 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
8781 * @rx_ring: rx descriptor ring packet is being transacted on
8782 * @rx_desc: pointer to the EOP Rx descriptor
8783 * @skb: pointer to current skb being populated
8784 *
8785 * This function checks the ring, descriptor, and packet information in
8786 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
8787 * other fields within the skb.
8788 **/
8789static void igb_process_skb_fields(struct igb_ring *rx_ring,
8790 union e1000_adv_rx_desc *rx_desc,
8791 struct sk_buff *skb)
8792{
8793 struct net_device *dev = rx_ring->netdev;
8794
8795 igb_rx_hash(rx_ring, rx_desc, skb);
8796
8797 igb_rx_checksum(rx_ring, rx_desc, skb);
8798
8799 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
8800 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
8801 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
8802
8803 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
8804 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
8805 u16 vid;
8806
8807 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
8808 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
8809 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
8810 else
8811 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
8812
8813 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
8814 }
8815
8816 skb_record_rx_queue(skb, rx_ring->queue_index);
8817
8818 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
8819}
8820
8821static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
8822{
8823 return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
8824}
8825
8826static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
8827 const unsigned int size, int *rx_buf_pgcnt)
8828{
8829 struct igb_rx_buffer *rx_buffer;
8830
8831 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
8832 *rx_buf_pgcnt =
8833#if (PAGE_SIZE < 8192)
8834 page_count(rx_buffer->page);
8835#else
8836 0;
8837#endif
8838 prefetchw(rx_buffer->page);
8839
8840 /* we are reusing so sync this buffer for CPU use */
8841 dma_sync_single_range_for_cpu(rx_ring->dev,
8842 rx_buffer->dma,
8843 rx_buffer->page_offset,
8844 size,
8845 DMA_FROM_DEVICE);
8846
8847 rx_buffer->pagecnt_bias--;
8848
8849 return rx_buffer;
8850}
8851
8852static void igb_put_rx_buffer(struct igb_ring *rx_ring,
8853 struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
8854{
8855 if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
8856 /* hand second half of page back to the ring */
8857 igb_reuse_rx_page(rx_ring, rx_buffer);
8858 } else {
8859 /* We are not reusing the buffer so unmap it and free
8860 * any references we are holding to it
8861 */
8862 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
8863 igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
8864 IGB_RX_DMA_ATTR);
8865 __page_frag_cache_drain(rx_buffer->page,
8866 rx_buffer->pagecnt_bias);
8867 }
8868
8869 /* clear contents of rx_buffer */
8870 rx_buffer->page = NULL;
8871}
8872
8873static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
8874{
8875 unsigned int total_bytes = 0, total_packets = 0;
8876 struct igb_adapter *adapter = q_vector->adapter;
8877 struct igb_ring *rx_ring = q_vector->rx.ring;
8878 u16 cleaned_count = igb_desc_unused(rx_ring);
8879 struct sk_buff *skb = rx_ring->skb;
8880 int cpu = smp_processor_id();
8881 unsigned int xdp_xmit = 0;
8882 struct netdev_queue *nq;
8883 struct xdp_buff xdp;
8884 u32 frame_sz = 0;
8885 int rx_buf_pgcnt;
8886
8887 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
8888#if (PAGE_SIZE < 8192)
8889 frame_sz = igb_rx_frame_truesize(rx_ring, 0);
8890#endif
8891 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
8892
8893 while (likely(total_packets < budget)) {
8894 union e1000_adv_rx_desc *rx_desc;
8895 struct igb_rx_buffer *rx_buffer;
8896 ktime_t timestamp = 0;
8897 int pkt_offset = 0;
8898 unsigned int size;
8899 void *pktbuf;
8900
8901 /* return some buffers to hardware, one at a time is too slow */
8902 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
8903 igb_alloc_rx_buffers(rx_ring, cleaned_count);
8904 cleaned_count = 0;
8905 }
8906
8907 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
8908 size = le16_to_cpu(rx_desc->wb.upper.length);
8909 if (!size)
8910 break;
8911
8912 /* This memory barrier is needed to keep us from reading
8913 * any other fields out of the rx_desc until we know the
8914 * descriptor has been written back
8915 */
8916 dma_rmb();
8917
8918 rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
8919 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
8920
8921 /* pull rx packet timestamp if available and valid */
8922 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
8923 int ts_hdr_len;
8924
8925 ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
8926 pktbuf, ×tamp);
8927
8928 pkt_offset += ts_hdr_len;
8929 size -= ts_hdr_len;
8930 }
8931
8932 /* retrieve a buffer from the ring */
8933 if (!skb) {
8934 unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
8935 unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
8936
8937 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
8938 xdp_buff_clear_frags_flag(&xdp);
8939#if (PAGE_SIZE > 4096)
8940 /* At larger PAGE_SIZE, frame_sz depend on len size */
8941 xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
8942#endif
8943 skb = igb_run_xdp(adapter, rx_ring, &xdp);
8944 }
8945
8946 if (IS_ERR(skb)) {
8947 unsigned int xdp_res = -PTR_ERR(skb);
8948
8949 if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
8950 xdp_xmit |= xdp_res;
8951 igb_rx_buffer_flip(rx_ring, rx_buffer, size);
8952 } else {
8953 rx_buffer->pagecnt_bias++;
8954 }
8955 total_packets++;
8956 total_bytes += size;
8957 } else if (skb)
8958 igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
8959 else if (ring_uses_build_skb(rx_ring))
8960 skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
8961 timestamp);
8962 else
8963 skb = igb_construct_skb(rx_ring, rx_buffer,
8964 &xdp, timestamp);
8965
8966 /* exit if we failed to retrieve a buffer */
8967 if (!skb) {
8968 rx_ring->rx_stats.alloc_failed++;
8969 rx_buffer->pagecnt_bias++;
8970 break;
8971 }
8972
8973 igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
8974 cleaned_count++;
8975
8976 /* fetch next buffer in frame if non-eop */
8977 if (igb_is_non_eop(rx_ring, rx_desc))
8978 continue;
8979
8980 /* verify the packet layout is correct */
8981 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
8982 skb = NULL;
8983 continue;
8984 }
8985
8986 /* probably a little skewed due to removing CRC */
8987 total_bytes += skb->len;
8988
8989 /* populate checksum, timestamp, VLAN, and protocol */
8990 igb_process_skb_fields(rx_ring, rx_desc, skb);
8991
8992 napi_gro_receive(&q_vector->napi, skb);
8993
8994 /* reset skb pointer */
8995 skb = NULL;
8996
8997 /* update budget accounting */
8998 total_packets++;
8999 }
9000
9001 /* place incomplete frames back on ring for completion */
9002 rx_ring->skb = skb;
9003
9004 if (xdp_xmit & IGB_XDP_REDIR)
9005 xdp_do_flush();
9006
9007 if (xdp_xmit & IGB_XDP_TX) {
9008 struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
9009
9010 nq = txring_txq(tx_ring);
9011 __netif_tx_lock(nq, cpu);
9012 igb_xdp_ring_update_tail(tx_ring);
9013 __netif_tx_unlock(nq);
9014 }
9015
9016 u64_stats_update_begin(&rx_ring->rx_syncp);
9017 rx_ring->rx_stats.packets += total_packets;
9018 rx_ring->rx_stats.bytes += total_bytes;
9019 u64_stats_update_end(&rx_ring->rx_syncp);
9020 q_vector->rx.total_packets += total_packets;
9021 q_vector->rx.total_bytes += total_bytes;
9022
9023 if (cleaned_count)
9024 igb_alloc_rx_buffers(rx_ring, cleaned_count);
9025
9026 return total_packets;
9027}
9028
9029static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
9030 struct igb_rx_buffer *bi)
9031{
9032 struct page *page = bi->page;
9033 dma_addr_t dma;
9034
9035 /* since we are recycling buffers we should seldom need to alloc */
9036 if (likely(page))
9037 return true;
9038
9039 /* alloc new page for storage */
9040 page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
9041 if (unlikely(!page)) {
9042 rx_ring->rx_stats.alloc_failed++;
9043 return false;
9044 }
9045
9046 /* map page for use */
9047 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
9048 igb_rx_pg_size(rx_ring),
9049 DMA_FROM_DEVICE,
9050 IGB_RX_DMA_ATTR);
9051
9052 /* if mapping failed free memory back to system since
9053 * there isn't much point in holding memory we can't use
9054 */
9055 if (dma_mapping_error(rx_ring->dev, dma)) {
9056 __free_pages(page, igb_rx_pg_order(rx_ring));
9057
9058 rx_ring->rx_stats.alloc_failed++;
9059 return false;
9060 }
9061
9062 bi->dma = dma;
9063 bi->page = page;
9064 bi->page_offset = igb_rx_offset(rx_ring);
9065 page_ref_add(page, USHRT_MAX - 1);
9066 bi->pagecnt_bias = USHRT_MAX;
9067
9068 return true;
9069}
9070
9071/**
9072 * igb_alloc_rx_buffers - Replace used receive buffers
9073 * @rx_ring: rx descriptor ring to allocate new receive buffers
9074 * @cleaned_count: count of buffers to allocate
9075 **/
9076void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9077{
9078 union e1000_adv_rx_desc *rx_desc;
9079 struct igb_rx_buffer *bi;
9080 u16 i = rx_ring->next_to_use;
9081 u16 bufsz;
9082
9083 /* nothing to do */
9084 if (!cleaned_count)
9085 return;
9086
9087 rx_desc = IGB_RX_DESC(rx_ring, i);
9088 bi = &rx_ring->rx_buffer_info[i];
9089 i -= rx_ring->count;
9090
9091 bufsz = igb_rx_bufsz(rx_ring);
9092
9093 do {
9094 if (!igb_alloc_mapped_page(rx_ring, bi))
9095 break;
9096
9097 /* sync the buffer for use by the device */
9098 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
9099 bi->page_offset, bufsz,
9100 DMA_FROM_DEVICE);
9101
9102 /* Refresh the desc even if buffer_addrs didn't change
9103 * because each write-back erases this info.
9104 */
9105 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
9106
9107 rx_desc++;
9108 bi++;
9109 i++;
9110 if (unlikely(!i)) {
9111 rx_desc = IGB_RX_DESC(rx_ring, 0);
9112 bi = rx_ring->rx_buffer_info;
9113 i -= rx_ring->count;
9114 }
9115
9116 /* clear the length for the next_to_use descriptor */
9117 rx_desc->wb.upper.length = 0;
9118
9119 cleaned_count--;
9120 } while (cleaned_count);
9121
9122 i += rx_ring->count;
9123
9124 if (rx_ring->next_to_use != i) {
9125 /* record the next descriptor to use */
9126 rx_ring->next_to_use = i;
9127
9128 /* update next to alloc since we have filled the ring */
9129 rx_ring->next_to_alloc = i;
9130
9131 /* Force memory writes to complete before letting h/w
9132 * know there are new descriptors to fetch. (Only
9133 * applicable for weak-ordered memory model archs,
9134 * such as IA-64).
9135 */
9136 dma_wmb();
9137 writel(i, rx_ring->tail);
9138 }
9139}
9140
9141/**
9142 * igb_mii_ioctl -
9143 * @netdev: pointer to netdev struct
9144 * @ifr: interface structure
9145 * @cmd: ioctl command to execute
9146 **/
9147static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9148{
9149 struct igb_adapter *adapter = netdev_priv(netdev);
9150 struct mii_ioctl_data *data = if_mii(ifr);
9151
9152 if (adapter->hw.phy.media_type != e1000_media_type_copper)
9153 return -EOPNOTSUPP;
9154
9155 switch (cmd) {
9156 case SIOCGMIIPHY:
9157 data->phy_id = adapter->hw.phy.addr;
9158 break;
9159 case SIOCGMIIREG:
9160 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
9161 &data->val_out))
9162 return -EIO;
9163 break;
9164 case SIOCSMIIREG:
9165 if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F,
9166 data->val_in))
9167 return -EIO;
9168 break;
9169 default:
9170 return -EOPNOTSUPP;
9171 }
9172 return 0;
9173}
9174
9175/**
9176 * igb_ioctl -
9177 * @netdev: pointer to netdev struct
9178 * @ifr: interface structure
9179 * @cmd: ioctl command to execute
9180 **/
9181static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
9182{
9183 switch (cmd) {
9184 case SIOCGMIIPHY:
9185 case SIOCGMIIREG:
9186 case SIOCSMIIREG:
9187 return igb_mii_ioctl(netdev, ifr, cmd);
9188 case SIOCGHWTSTAMP:
9189 return igb_ptp_get_ts_config(netdev, ifr);
9190 case SIOCSHWTSTAMP:
9191 return igb_ptp_set_ts_config(netdev, ifr);
9192 default:
9193 return -EOPNOTSUPP;
9194 }
9195}
9196
9197void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9198{
9199 struct igb_adapter *adapter = hw->back;
9200
9201 pci_read_config_word(adapter->pdev, reg, value);
9202}
9203
9204void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
9205{
9206 struct igb_adapter *adapter = hw->back;
9207
9208 pci_write_config_word(adapter->pdev, reg, *value);
9209}
9210
9211s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9212{
9213 struct igb_adapter *adapter = hw->back;
9214
9215 if (pcie_capability_read_word(adapter->pdev, reg, value))
9216 return -E1000_ERR_CONFIG;
9217
9218 return 0;
9219}
9220
9221s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
9222{
9223 struct igb_adapter *adapter = hw->back;
9224
9225 if (pcie_capability_write_word(adapter->pdev, reg, *value))
9226 return -E1000_ERR_CONFIG;
9227
9228 return 0;
9229}
9230
9231static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9232{
9233 struct igb_adapter *adapter = netdev_priv(netdev);
9234 struct e1000_hw *hw = &adapter->hw;
9235 u32 ctrl, rctl;
9236 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
9237
9238 if (enable) {
9239 /* enable VLAN tag insert/strip */
9240 ctrl = rd32(E1000_CTRL);
9241 ctrl |= E1000_CTRL_VME;
9242 wr32(E1000_CTRL, ctrl);
9243
9244 /* Disable CFI check */
9245 rctl = rd32(E1000_RCTL);
9246 rctl &= ~E1000_RCTL_CFIEN;
9247 wr32(E1000_RCTL, rctl);
9248 } else {
9249 /* disable VLAN tag insert/strip */
9250 ctrl = rd32(E1000_CTRL);
9251 ctrl &= ~E1000_CTRL_VME;
9252 wr32(E1000_CTRL, ctrl);
9253 }
9254
9255 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
9256}
9257
9258static int igb_vlan_rx_add_vid(struct net_device *netdev,
9259 __be16 proto, u16 vid)
9260{
9261 struct igb_adapter *adapter = netdev_priv(netdev);
9262 struct e1000_hw *hw = &adapter->hw;
9263 int pf_id = adapter->vfs_allocated_count;
9264
9265 /* add the filter since PF can receive vlans w/o entry in vlvf */
9266 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9267 igb_vfta_set(hw, vid, pf_id, true, !!vid);
9268
9269 set_bit(vid, adapter->active_vlans);
9270
9271 return 0;
9272}
9273
9274static int igb_vlan_rx_kill_vid(struct net_device *netdev,
9275 __be16 proto, u16 vid)
9276{
9277 struct igb_adapter *adapter = netdev_priv(netdev);
9278 int pf_id = adapter->vfs_allocated_count;
9279 struct e1000_hw *hw = &adapter->hw;
9280
9281 /* remove VID from filter table */
9282 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
9283 igb_vfta_set(hw, vid, pf_id, false, true);
9284
9285 clear_bit(vid, adapter->active_vlans);
9286
9287 return 0;
9288}
9289
9290static void igb_restore_vlan(struct igb_adapter *adapter)
9291{
9292 u16 vid = 1;
9293
9294 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
9295 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
9296
9297 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
9298 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
9299}
9300
9301int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9302{
9303 struct pci_dev *pdev = adapter->pdev;
9304 struct e1000_mac_info *mac = &adapter->hw.mac;
9305
9306 mac->autoneg = 0;
9307
9308 /* Make sure dplx is at most 1 bit and lsb of speed is not set
9309 * for the switch() below to work
9310 */
9311 if ((spd & 1) || (dplx & ~1))
9312 goto err_inval;
9313
9314 /* Fiber NIC's only allow 1000 gbps Full duplex
9315 * and 100Mbps Full duplex for 100baseFx sfp
9316 */
9317 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
9318 switch (spd + dplx) {
9319 case SPEED_10 + DUPLEX_HALF:
9320 case SPEED_10 + DUPLEX_FULL:
9321 case SPEED_100 + DUPLEX_HALF:
9322 goto err_inval;
9323 default:
9324 break;
9325 }
9326 }
9327
9328 switch (spd + dplx) {
9329 case SPEED_10 + DUPLEX_HALF:
9330 mac->forced_speed_duplex = ADVERTISE_10_HALF;
9331 break;
9332 case SPEED_10 + DUPLEX_FULL:
9333 mac->forced_speed_duplex = ADVERTISE_10_FULL;
9334 break;
9335 case SPEED_100 + DUPLEX_HALF:
9336 mac->forced_speed_duplex = ADVERTISE_100_HALF;
9337 break;
9338 case SPEED_100 + DUPLEX_FULL:
9339 mac->forced_speed_duplex = ADVERTISE_100_FULL;
9340 break;
9341 case SPEED_1000 + DUPLEX_FULL:
9342 mac->autoneg = 1;
9343 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
9344 break;
9345 case SPEED_1000 + DUPLEX_HALF: /* not supported */
9346 default:
9347 goto err_inval;
9348 }
9349
9350 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
9351 adapter->hw.phy.mdix = AUTO_ALL_MODES;
9352
9353 return 0;
9354
9355err_inval:
9356 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9357 return -EINVAL;
9358}
9359
9360static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
9361 bool runtime)
9362{
9363 struct net_device *netdev = pci_get_drvdata(pdev);
9364 struct igb_adapter *adapter = netdev_priv(netdev);
9365 struct e1000_hw *hw = &adapter->hw;
9366 u32 ctrl, rctl, status;
9367 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9368 bool wake;
9369
9370 rtnl_lock();
9371 netif_device_detach(netdev);
9372
9373 if (netif_running(netdev))
9374 __igb_close(netdev, true);
9375
9376 igb_ptp_suspend(adapter);
9377
9378 igb_clear_interrupt_scheme(adapter);
9379 rtnl_unlock();
9380
9381 status = rd32(E1000_STATUS);
9382 if (status & E1000_STATUS_LU)
9383 wufc &= ~E1000_WUFC_LNKC;
9384
9385 if (wufc) {
9386 igb_setup_rctl(adapter);
9387 igb_set_rx_mode(netdev);
9388
9389 /* turn on all-multi mode if wake on multicast is enabled */
9390 if (wufc & E1000_WUFC_MC) {
9391 rctl = rd32(E1000_RCTL);
9392 rctl |= E1000_RCTL_MPE;
9393 wr32(E1000_RCTL, rctl);
9394 }
9395
9396 ctrl = rd32(E1000_CTRL);
9397 ctrl |= E1000_CTRL_ADVD3WUC;
9398 wr32(E1000_CTRL, ctrl);
9399
9400 /* Allow time for pending master requests to run */
9401 igb_disable_pcie_master(hw);
9402
9403 wr32(E1000_WUC, E1000_WUC_PME_EN);
9404 wr32(E1000_WUFC, wufc);
9405 } else {
9406 wr32(E1000_WUC, 0);
9407 wr32(E1000_WUFC, 0);
9408 }
9409
9410 wake = wufc || adapter->en_mng_pt;
9411 if (!wake)
9412 igb_power_down_link(adapter);
9413 else
9414 igb_power_up_link(adapter);
9415
9416 if (enable_wake)
9417 *enable_wake = wake;
9418
9419 /* Release control of h/w to f/w. If f/w is AMT enabled, this
9420 * would have already happened in close and is redundant.
9421 */
9422 igb_release_hw_control(adapter);
9423
9424 pci_disable_device(pdev);
9425
9426 return 0;
9427}
9428
9429static void igb_deliver_wake_packet(struct net_device *netdev)
9430{
9431 struct igb_adapter *adapter = netdev_priv(netdev);
9432 struct e1000_hw *hw = &adapter->hw;
9433 struct sk_buff *skb;
9434 u32 wupl;
9435
9436 wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
9437
9438 /* WUPM stores only the first 128 bytes of the wake packet.
9439 * Read the packet only if we have the whole thing.
9440 */
9441 if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
9442 return;
9443
9444 skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
9445 if (!skb)
9446 return;
9447
9448 skb_put(skb, wupl);
9449
9450 /* Ensure reads are 32-bit aligned */
9451 wupl = roundup(wupl, 4);
9452
9453 memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
9454
9455 skb->protocol = eth_type_trans(skb, netdev);
9456 netif_rx(skb);
9457}
9458
9459static int igb_suspend(struct device *dev)
9460{
9461 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
9462}
9463
9464static int __igb_resume(struct device *dev, bool rpm)
9465{
9466 struct pci_dev *pdev = to_pci_dev(dev);
9467 struct net_device *netdev = pci_get_drvdata(pdev);
9468 struct igb_adapter *adapter = netdev_priv(netdev);
9469 struct e1000_hw *hw = &adapter->hw;
9470 u32 err, val;
9471
9472 pci_set_power_state(pdev, PCI_D0);
9473 pci_restore_state(pdev);
9474 pci_save_state(pdev);
9475
9476 if (!pci_device_is_present(pdev))
9477 return -ENODEV;
9478 err = pci_enable_device_mem(pdev);
9479 if (err) {
9480 dev_err(&pdev->dev,
9481 "igb: Cannot enable PCI device from suspend\n");
9482 return err;
9483 }
9484 pci_set_master(pdev);
9485
9486 pci_enable_wake(pdev, PCI_D3hot, 0);
9487 pci_enable_wake(pdev, PCI_D3cold, 0);
9488
9489 if (igb_init_interrupt_scheme(adapter, true)) {
9490 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
9491 return -ENOMEM;
9492 }
9493
9494 igb_reset(adapter);
9495
9496 /* let the f/w know that the h/w is now under the control of the
9497 * driver.
9498 */
9499 igb_get_hw_control(adapter);
9500
9501 val = rd32(E1000_WUS);
9502 if (val & WAKE_PKT_WUS)
9503 igb_deliver_wake_packet(netdev);
9504
9505 wr32(E1000_WUS, ~0);
9506
9507 if (!rpm)
9508 rtnl_lock();
9509 if (!err && netif_running(netdev))
9510 err = __igb_open(netdev, true);
9511
9512 if (!err)
9513 netif_device_attach(netdev);
9514 if (!rpm)
9515 rtnl_unlock();
9516
9517 return err;
9518}
9519
9520static int igb_resume(struct device *dev)
9521{
9522 return __igb_resume(dev, false);
9523}
9524
9525static int igb_runtime_idle(struct device *dev)
9526{
9527 struct net_device *netdev = dev_get_drvdata(dev);
9528 struct igb_adapter *adapter = netdev_priv(netdev);
9529
9530 if (!igb_has_link(adapter))
9531 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
9532
9533 return -EBUSY;
9534}
9535
9536static int igb_runtime_suspend(struct device *dev)
9537{
9538 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
9539}
9540
9541static int igb_runtime_resume(struct device *dev)
9542{
9543 return __igb_resume(dev, true);
9544}
9545
9546static void igb_shutdown(struct pci_dev *pdev)
9547{
9548 bool wake;
9549
9550 __igb_shutdown(pdev, &wake, 0);
9551
9552 if (system_state == SYSTEM_POWER_OFF) {
9553 pci_wake_from_d3(pdev, wake);
9554 pci_set_power_state(pdev, PCI_D3hot);
9555 }
9556}
9557
9558static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9559{
9560#ifdef CONFIG_PCI_IOV
9561 int err;
9562
9563 if (num_vfs == 0) {
9564 return igb_disable_sriov(dev, true);
9565 } else {
9566 err = igb_enable_sriov(dev, num_vfs, true);
9567 return err ? err : num_vfs;
9568 }
9569#endif
9570 return 0;
9571}
9572
9573/**
9574 * igb_io_error_detected - called when PCI error is detected
9575 * @pdev: Pointer to PCI device
9576 * @state: The current pci connection state
9577 *
9578 * This function is called after a PCI bus error affecting
9579 * this device has been detected.
9580 **/
9581static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
9582 pci_channel_state_t state)
9583{
9584 struct net_device *netdev = pci_get_drvdata(pdev);
9585 struct igb_adapter *adapter = netdev_priv(netdev);
9586
9587 if (state == pci_channel_io_normal) {
9588 dev_warn(&pdev->dev, "Non-correctable non-fatal error reported.\n");
9589 return PCI_ERS_RESULT_CAN_RECOVER;
9590 }
9591
9592 netif_device_detach(netdev);
9593
9594 if (state == pci_channel_io_perm_failure)
9595 return PCI_ERS_RESULT_DISCONNECT;
9596
9597 if (netif_running(netdev))
9598 igb_down(adapter);
9599 pci_disable_device(pdev);
9600
9601 /* Request a slot reset. */
9602 return PCI_ERS_RESULT_NEED_RESET;
9603}
9604
9605/**
9606 * igb_io_slot_reset - called after the pci bus has been reset.
9607 * @pdev: Pointer to PCI device
9608 *
9609 * Restart the card from scratch, as if from a cold-boot. Implementation
9610 * resembles the first-half of the __igb_resume routine.
9611 **/
9612static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
9613{
9614 struct net_device *netdev = pci_get_drvdata(pdev);
9615 struct igb_adapter *adapter = netdev_priv(netdev);
9616 struct e1000_hw *hw = &adapter->hw;
9617 pci_ers_result_t result;
9618
9619 if (pci_enable_device_mem(pdev)) {
9620 dev_err(&pdev->dev,
9621 "Cannot re-enable PCI device after reset.\n");
9622 result = PCI_ERS_RESULT_DISCONNECT;
9623 } else {
9624 pci_set_master(pdev);
9625 pci_restore_state(pdev);
9626 pci_save_state(pdev);
9627
9628 pci_enable_wake(pdev, PCI_D3hot, 0);
9629 pci_enable_wake(pdev, PCI_D3cold, 0);
9630
9631 /* In case of PCI error, adapter lose its HW address
9632 * so we should re-assign it here.
9633 */
9634 hw->hw_addr = adapter->io_addr;
9635
9636 igb_reset(adapter);
9637 wr32(E1000_WUS, ~0);
9638 result = PCI_ERS_RESULT_RECOVERED;
9639 }
9640
9641 return result;
9642}
9643
9644/**
9645 * igb_io_resume - called when traffic can start flowing again.
9646 * @pdev: Pointer to PCI device
9647 *
9648 * This callback is called when the error recovery driver tells us that
9649 * its OK to resume normal operation. Implementation resembles the
9650 * second-half of the __igb_resume routine.
9651 */
9652static void igb_io_resume(struct pci_dev *pdev)
9653{
9654 struct net_device *netdev = pci_get_drvdata(pdev);
9655 struct igb_adapter *adapter = netdev_priv(netdev);
9656
9657 if (netif_running(netdev)) {
9658 if (!test_bit(__IGB_DOWN, &adapter->state)) {
9659 dev_dbg(&pdev->dev, "Resuming from non-fatal error, do nothing.\n");
9660 return;
9661 }
9662 if (igb_up(adapter)) {
9663 dev_err(&pdev->dev, "igb_up failed after reset\n");
9664 return;
9665 }
9666 }
9667
9668 netif_device_attach(netdev);
9669
9670 /* let the f/w know that the h/w is now under the control of the
9671 * driver.
9672 */
9673 igb_get_hw_control(adapter);
9674}
9675
9676/**
9677 * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
9678 * @adapter: Pointer to adapter structure
9679 * @index: Index of the RAR entry which need to be synced with MAC table
9680 **/
9681static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
9682{
9683 struct e1000_hw *hw = &adapter->hw;
9684 u32 rar_low, rar_high;
9685 u8 *addr = adapter->mac_table[index].addr;
9686
9687 /* HW expects these to be in network order when they are plugged
9688 * into the registers which are little endian. In order to guarantee
9689 * that ordering we need to do an leXX_to_cpup here in order to be
9690 * ready for the byteswap that occurs with writel
9691 */
9692 rar_low = le32_to_cpup((__le32 *)(addr));
9693 rar_high = le16_to_cpup((__le16 *)(addr + 4));
9694
9695 /* Indicate to hardware the Address is Valid. */
9696 if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
9697 if (is_valid_ether_addr(addr))
9698 rar_high |= E1000_RAH_AV;
9699
9700 if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
9701 rar_high |= E1000_RAH_ASEL_SRC_ADDR;
9702
9703 switch (hw->mac.type) {
9704 case e1000_82575:
9705 case e1000_i210:
9706 if (adapter->mac_table[index].state &
9707 IGB_MAC_STATE_QUEUE_STEERING)
9708 rar_high |= E1000_RAH_QSEL_ENABLE;
9709
9710 rar_high |= E1000_RAH_POOL_1 *
9711 adapter->mac_table[index].queue;
9712 break;
9713 default:
9714 rar_high |= E1000_RAH_POOL_1 <<
9715 adapter->mac_table[index].queue;
9716 break;
9717 }
9718 }
9719
9720 wr32(E1000_RAL(index), rar_low);
9721 wrfl();
9722 wr32(E1000_RAH(index), rar_high);
9723 wrfl();
9724}
9725
9726static int igb_set_vf_mac(struct igb_adapter *adapter,
9727 int vf, unsigned char *mac_addr)
9728{
9729 struct e1000_hw *hw = &adapter->hw;
9730 /* VF MAC addresses start at end of receive addresses and moves
9731 * towards the first, as a result a collision should not be possible
9732 */
9733 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
9734 unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
9735
9736 ether_addr_copy(vf_mac_addr, mac_addr);
9737 ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
9738 adapter->mac_table[rar_entry].queue = vf;
9739 adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
9740 igb_rar_set_index(adapter, rar_entry);
9741
9742 return 0;
9743}
9744
9745static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
9746{
9747 struct igb_adapter *adapter = netdev_priv(netdev);
9748
9749 if (vf >= adapter->vfs_allocated_count)
9750 return -EINVAL;
9751
9752 /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
9753 * flag and allows to overwrite the MAC via VF netdev. This
9754 * is necessary to allow libvirt a way to restore the original
9755 * MAC after unbinding vfio-pci and reloading igbvf after shutting
9756 * down a VM.
9757 */
9758 if (is_zero_ether_addr(mac)) {
9759 adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
9760 dev_info(&adapter->pdev->dev,
9761 "remove administratively set MAC on VF %d\n",
9762 vf);
9763 } else if (is_valid_ether_addr(mac)) {
9764 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
9765 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
9766 mac, vf);
9767 dev_info(&adapter->pdev->dev,
9768 "Reload the VF driver to make this change effective.");
9769 /* Generate additional warning if PF is down */
9770 if (test_bit(__IGB_DOWN, &adapter->state)) {
9771 dev_warn(&adapter->pdev->dev,
9772 "The VF MAC address has been set, but the PF device is not up.\n");
9773 dev_warn(&adapter->pdev->dev,
9774 "Bring the PF device up before attempting to use the VF device.\n");
9775 }
9776 } else {
9777 return -EINVAL;
9778 }
9779 return igb_set_vf_mac(adapter, vf, mac);
9780}
9781
9782static int igb_link_mbps(int internal_link_speed)
9783{
9784 switch (internal_link_speed) {
9785 case SPEED_100:
9786 return 100;
9787 case SPEED_1000:
9788 return 1000;
9789 default:
9790 return 0;
9791 }
9792}
9793
9794static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
9795 int link_speed)
9796{
9797 int rf_dec, rf_int;
9798 u32 bcnrc_val;
9799
9800 if (tx_rate != 0) {
9801 /* Calculate the rate factor values to set */
9802 rf_int = link_speed / tx_rate;
9803 rf_dec = (link_speed - (rf_int * tx_rate));
9804 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
9805 tx_rate;
9806
9807 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
9808 bcnrc_val |= FIELD_PREP(E1000_RTTBCNRC_RF_INT_MASK, rf_int);
9809 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
9810 } else {
9811 bcnrc_val = 0;
9812 }
9813
9814 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
9815 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
9816 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
9817 */
9818 wr32(E1000_RTTBCNRM, 0x14);
9819 wr32(E1000_RTTBCNRC, bcnrc_val);
9820}
9821
9822static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
9823{
9824 int actual_link_speed, i;
9825 bool reset_rate = false;
9826
9827 /* VF TX rate limit was not set or not supported */
9828 if ((adapter->vf_rate_link_speed == 0) ||
9829 (adapter->hw.mac.type != e1000_82576))
9830 return;
9831
9832 actual_link_speed = igb_link_mbps(adapter->link_speed);
9833 if (actual_link_speed != adapter->vf_rate_link_speed) {
9834 reset_rate = true;
9835 adapter->vf_rate_link_speed = 0;
9836 dev_info(&adapter->pdev->dev,
9837 "Link speed has been changed. VF Transmit rate is disabled\n");
9838 }
9839
9840 for (i = 0; i < adapter->vfs_allocated_count; i++) {
9841 if (reset_rate)
9842 adapter->vf_data[i].tx_rate = 0;
9843
9844 igb_set_vf_rate_limit(&adapter->hw, i,
9845 adapter->vf_data[i].tx_rate,
9846 actual_link_speed);
9847 }
9848}
9849
9850static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
9851 int min_tx_rate, int max_tx_rate)
9852{
9853 struct igb_adapter *adapter = netdev_priv(netdev);
9854 struct e1000_hw *hw = &adapter->hw;
9855 int actual_link_speed;
9856
9857 if (hw->mac.type != e1000_82576)
9858 return -EOPNOTSUPP;
9859
9860 if (min_tx_rate)
9861 return -EINVAL;
9862
9863 actual_link_speed = igb_link_mbps(adapter->link_speed);
9864 if ((vf >= adapter->vfs_allocated_count) ||
9865 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
9866 (max_tx_rate < 0) ||
9867 (max_tx_rate > actual_link_speed))
9868 return -EINVAL;
9869
9870 adapter->vf_rate_link_speed = actual_link_speed;
9871 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
9872 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
9873
9874 return 0;
9875}
9876
9877static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
9878 bool setting)
9879{
9880 struct igb_adapter *adapter = netdev_priv(netdev);
9881 struct e1000_hw *hw = &adapter->hw;
9882 u32 reg_val, reg_offset;
9883
9884 if (!adapter->vfs_allocated_count)
9885 return -EOPNOTSUPP;
9886
9887 if (vf >= adapter->vfs_allocated_count)
9888 return -EINVAL;
9889
9890 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
9891 reg_val = rd32(reg_offset);
9892 if (setting)
9893 reg_val |= (BIT(vf) |
9894 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9895 else
9896 reg_val &= ~(BIT(vf) |
9897 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
9898 wr32(reg_offset, reg_val);
9899
9900 adapter->vf_data[vf].spoofchk_enabled = setting;
9901 return 0;
9902}
9903
9904static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
9905{
9906 struct igb_adapter *adapter = netdev_priv(netdev);
9907
9908 if (vf >= adapter->vfs_allocated_count)
9909 return -EINVAL;
9910 if (adapter->vf_data[vf].trusted == setting)
9911 return 0;
9912
9913 adapter->vf_data[vf].trusted = setting;
9914
9915 dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
9916 vf, setting ? "" : "not ");
9917 return 0;
9918}
9919
9920static int igb_ndo_get_vf_config(struct net_device *netdev,
9921 int vf, struct ifla_vf_info *ivi)
9922{
9923 struct igb_adapter *adapter = netdev_priv(netdev);
9924 if (vf >= adapter->vfs_allocated_count)
9925 return -EINVAL;
9926 ivi->vf = vf;
9927 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
9928 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
9929 ivi->min_tx_rate = 0;
9930 ivi->vlan = adapter->vf_data[vf].pf_vlan;
9931 ivi->qos = adapter->vf_data[vf].pf_qos;
9932 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
9933 ivi->trusted = adapter->vf_data[vf].trusted;
9934 return 0;
9935}
9936
9937static void igb_vmm_control(struct igb_adapter *adapter)
9938{
9939 struct e1000_hw *hw = &adapter->hw;
9940 u32 reg;
9941
9942 switch (hw->mac.type) {
9943 case e1000_82575:
9944 case e1000_i210:
9945 case e1000_i211:
9946 case e1000_i354:
9947 default:
9948 /* replication is not supported for 82575 */
9949 return;
9950 case e1000_82576:
9951 /* notify HW that the MAC is adding vlan tags */
9952 reg = rd32(E1000_DTXCTL);
9953 reg |= E1000_DTXCTL_VLAN_ADDED;
9954 wr32(E1000_DTXCTL, reg);
9955 fallthrough;
9956 case e1000_82580:
9957 /* enable replication vlan tag stripping */
9958 reg = rd32(E1000_RPLOLR);
9959 reg |= E1000_RPLOLR_STRVLAN;
9960 wr32(E1000_RPLOLR, reg);
9961 fallthrough;
9962 case e1000_i350:
9963 /* none of the above registers are supported by i350 */
9964 break;
9965 }
9966
9967 if (adapter->vfs_allocated_count) {
9968 igb_vmdq_set_loopback_pf(hw, true);
9969 igb_vmdq_set_replication_pf(hw, true);
9970 igb_vmdq_set_anti_spoofing_pf(hw, true,
9971 adapter->vfs_allocated_count);
9972 } else {
9973 igb_vmdq_set_loopback_pf(hw, false);
9974 igb_vmdq_set_replication_pf(hw, false);
9975 }
9976}
9977
9978static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
9979{
9980 struct e1000_hw *hw = &adapter->hw;
9981 u32 dmac_thr;
9982 u16 hwm;
9983 u32 reg;
9984
9985 if (hw->mac.type > e1000_82580) {
9986 if (adapter->flags & IGB_FLAG_DMAC) {
9987 /* force threshold to 0. */
9988 wr32(E1000_DMCTXTH, 0);
9989
9990 /* DMA Coalescing high water mark needs to be greater
9991 * than the Rx threshold. Set hwm to PBA - max frame
9992 * size in 16B units, capping it at PBA - 6KB.
9993 */
9994 hwm = 64 * (pba - 6);
9995 reg = rd32(E1000_FCRTC);
9996 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
9997 reg |= FIELD_PREP(E1000_FCRTC_RTH_COAL_MASK, hwm);
9998 wr32(E1000_FCRTC, reg);
9999
10000 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
10001 * frame size, capping it at PBA - 10KB.
10002 */
10003 dmac_thr = pba - 10;
10004 reg = rd32(E1000_DMACR);
10005 reg &= ~E1000_DMACR_DMACTHR_MASK;
10006 reg |= FIELD_PREP(E1000_DMACR_DMACTHR_MASK, dmac_thr);
10007
10008 /* transition to L0x or L1 if available..*/
10009 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
10010
10011 /* watchdog timer= +-1000 usec in 32usec intervals */
10012 reg |= (1000 >> 5);
10013
10014 /* Disable BMC-to-OS Watchdog Enable */
10015 if (hw->mac.type != e1000_i354)
10016 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
10017 wr32(E1000_DMACR, reg);
10018
10019 /* no lower threshold to disable
10020 * coalescing(smart fifb)-UTRESH=0
10021 */
10022 wr32(E1000_DMCRTRH, 0);
10023
10024 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
10025
10026 wr32(E1000_DMCTLX, reg);
10027
10028 /* free space in tx packet buffer to wake from
10029 * DMA coal
10030 */
10031 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
10032 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
10033 }
10034
10035 if (hw->mac.type >= e1000_i210 ||
10036 (adapter->flags & IGB_FLAG_DMAC)) {
10037 reg = rd32(E1000_PCIEMISC);
10038 reg |= E1000_PCIEMISC_LX_DECISION;
10039 wr32(E1000_PCIEMISC, reg);
10040 } /* endif adapter->dmac is not disabled */
10041 } else if (hw->mac.type == e1000_82580) {
10042 u32 reg = rd32(E1000_PCIEMISC);
10043
10044 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
10045 wr32(E1000_DMACR, 0);
10046 }
10047}
10048
10049/**
10050 * igb_read_i2c_byte - Reads 8 bit word over I2C
10051 * @hw: pointer to hardware structure
10052 * @byte_offset: byte offset to read
10053 * @dev_addr: device address
10054 * @data: value read
10055 *
10056 * Performs byte read operation over I2C interface at
10057 * a specified device address.
10058 **/
10059s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10060 u8 dev_addr, u8 *data)
10061{
10062 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10063 struct i2c_client *this_client = adapter->i2c_client;
10064 s32 status;
10065 u16 swfw_mask = 0;
10066
10067 if (!this_client)
10068 return E1000_ERR_I2C;
10069
10070 swfw_mask = E1000_SWFW_PHY0_SM;
10071
10072 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10073 return E1000_ERR_SWFW_SYNC;
10074
10075 status = i2c_smbus_read_byte_data(this_client, byte_offset);
10076 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10077
10078 if (status < 0)
10079 return E1000_ERR_I2C;
10080 else {
10081 *data = status;
10082 return 0;
10083 }
10084}
10085
10086/**
10087 * igb_write_i2c_byte - Writes 8 bit word over I2C
10088 * @hw: pointer to hardware structure
10089 * @byte_offset: byte offset to write
10090 * @dev_addr: device address
10091 * @data: value to write
10092 *
10093 * Performs byte write operation over I2C interface at
10094 * a specified device address.
10095 **/
10096s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
10097 u8 dev_addr, u8 data)
10098{
10099 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
10100 struct i2c_client *this_client = adapter->i2c_client;
10101 s32 status;
10102 u16 swfw_mask = E1000_SWFW_PHY0_SM;
10103
10104 if (!this_client)
10105 return E1000_ERR_I2C;
10106
10107 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
10108 return E1000_ERR_SWFW_SYNC;
10109 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
10110 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
10111
10112 if (status)
10113 return E1000_ERR_I2C;
10114 else
10115 return 0;
10116
10117}
10118
10119int igb_reinit_queues(struct igb_adapter *adapter)
10120{
10121 struct net_device *netdev = adapter->netdev;
10122 struct pci_dev *pdev = adapter->pdev;
10123 int err = 0;
10124
10125 if (netif_running(netdev))
10126 igb_close(netdev);
10127
10128 igb_reset_interrupt_capability(adapter);
10129
10130 if (igb_init_interrupt_scheme(adapter, true)) {
10131 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
10132 return -ENOMEM;
10133 }
10134
10135 if (netif_running(netdev))
10136 err = igb_open(netdev);
10137
10138 return err;
10139}
10140
10141static void igb_nfc_filter_exit(struct igb_adapter *adapter)
10142{
10143 struct igb_nfc_filter *rule;
10144
10145 spin_lock(&adapter->nfc_lock);
10146
10147 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10148 igb_erase_filter(adapter, rule);
10149
10150 hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
10151 igb_erase_filter(adapter, rule);
10152
10153 spin_unlock(&adapter->nfc_lock);
10154}
10155
10156static void igb_nfc_filter_restore(struct igb_adapter *adapter)
10157{
10158 struct igb_nfc_filter *rule;
10159
10160 spin_lock(&adapter->nfc_lock);
10161
10162 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
10163 igb_add_filter(adapter, rule);
10164
10165 spin_unlock(&adapter->nfc_lock);
10166}
10167
10168static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume,
10169 igb_runtime_suspend, igb_runtime_resume,
10170 igb_runtime_idle);
10171
10172static struct pci_driver igb_driver = {
10173 .name = igb_driver_name,
10174 .id_table = igb_pci_tbl,
10175 .probe = igb_probe,
10176 .remove = igb_remove,
10177 .driver.pm = pm_ptr(&igb_pm_ops),
10178 .shutdown = igb_shutdown,
10179 .sriov_configure = igb_pci_sriov_configure,
10180 .err_handler = &igb_err_handler
10181};
10182
10183/* igb_main.c */
1/* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/init.h>
29#include <linux/bitops.h>
30#include <linux/vmalloc.h>
31#include <linux/pagemap.h>
32#include <linux/netdevice.h>
33#include <linux/ipv6.h>
34#include <linux/slab.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
37#include <linux/net_tstamp.h>
38#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if.h>
41#include <linux/if_vlan.h>
42#include <linux/pci.h>
43#include <linux/pci-aspm.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/ip.h>
47#include <linux/tcp.h>
48#include <linux/sctp.h>
49#include <linux/if_ether.h>
50#include <linux/aer.h>
51#include <linux/prefetch.h>
52#include <linux/pm_runtime.h>
53#include <linux/etherdevice.h>
54#ifdef CONFIG_IGB_DCA
55#include <linux/dca.h>
56#endif
57#include <linux/i2c.h>
58#include "igb.h"
59
60#define MAJ 5
61#define MIN 4
62#define BUILD 0
63#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
64__stringify(BUILD) "-k"
65char igb_driver_name[] = "igb";
66char igb_driver_version[] = DRV_VERSION;
67static const char igb_driver_string[] =
68 "Intel(R) Gigabit Ethernet Network Driver";
69static const char igb_copyright[] =
70 "Copyright (c) 2007-2014 Intel Corporation.";
71
72static const struct e1000_info *igb_info_tbl[] = {
73 [board_82575] = &e1000_82575_info,
74};
75
76static const struct pci_device_id igb_pci_tbl[] = {
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
112 /* required last entry */
113 {0, }
114};
115
116MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
117
118static int igb_setup_all_tx_resources(struct igb_adapter *);
119static int igb_setup_all_rx_resources(struct igb_adapter *);
120static void igb_free_all_tx_resources(struct igb_adapter *);
121static void igb_free_all_rx_resources(struct igb_adapter *);
122static void igb_setup_mrqc(struct igb_adapter *);
123static int igb_probe(struct pci_dev *, const struct pci_device_id *);
124static void igb_remove(struct pci_dev *pdev);
125static int igb_sw_init(struct igb_adapter *);
126int igb_open(struct net_device *);
127int igb_close(struct net_device *);
128static void igb_configure(struct igb_adapter *);
129static void igb_configure_tx(struct igb_adapter *);
130static void igb_configure_rx(struct igb_adapter *);
131static void igb_clean_all_tx_rings(struct igb_adapter *);
132static void igb_clean_all_rx_rings(struct igb_adapter *);
133static void igb_clean_tx_ring(struct igb_ring *);
134static void igb_clean_rx_ring(struct igb_ring *);
135static void igb_set_rx_mode(struct net_device *);
136static void igb_update_phy_info(unsigned long);
137static void igb_watchdog(unsigned long);
138static void igb_watchdog_task(struct work_struct *);
139static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
140static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
141 struct rtnl_link_stats64 *stats);
142static int igb_change_mtu(struct net_device *, int);
143static int igb_set_mac(struct net_device *, void *);
144static void igb_set_uta(struct igb_adapter *adapter, bool set);
145static irqreturn_t igb_intr(int irq, void *);
146static irqreturn_t igb_intr_msi(int irq, void *);
147static irqreturn_t igb_msix_other(int irq, void *);
148static irqreturn_t igb_msix_ring(int irq, void *);
149#ifdef CONFIG_IGB_DCA
150static void igb_update_dca(struct igb_q_vector *);
151static void igb_setup_dca(struct igb_adapter *);
152#endif /* CONFIG_IGB_DCA */
153static int igb_poll(struct napi_struct *, int);
154static bool igb_clean_tx_irq(struct igb_q_vector *, int);
155static int igb_clean_rx_irq(struct igb_q_vector *, int);
156static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
157static void igb_tx_timeout(struct net_device *);
158static void igb_reset_task(struct work_struct *);
159static void igb_vlan_mode(struct net_device *netdev,
160 netdev_features_t features);
161static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
162static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
163static void igb_restore_vlan(struct igb_adapter *);
164static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
165static void igb_ping_all_vfs(struct igb_adapter *);
166static void igb_msg_task(struct igb_adapter *);
167static void igb_vmm_control(struct igb_adapter *);
168static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
169static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
170static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
171static int igb_ndo_set_vf_vlan(struct net_device *netdev,
172 int vf, u16 vlan, u8 qos, __be16 vlan_proto);
173static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
174static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
175 bool setting);
176static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
177 struct ifla_vf_info *ivi);
178static void igb_check_vf_rate_limit(struct igb_adapter *);
179static void igb_nfc_filter_exit(struct igb_adapter *adapter);
180static void igb_nfc_filter_restore(struct igb_adapter *adapter);
181
182#ifdef CONFIG_PCI_IOV
183static int igb_vf_configure(struct igb_adapter *adapter, int vf);
184static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
185static int igb_disable_sriov(struct pci_dev *dev);
186static int igb_pci_disable_sriov(struct pci_dev *dev);
187#endif
188
189#ifdef CONFIG_PM
190#ifdef CONFIG_PM_SLEEP
191static int igb_suspend(struct device *);
192#endif
193static int igb_resume(struct device *);
194static int igb_runtime_suspend(struct device *dev);
195static int igb_runtime_resume(struct device *dev);
196static int igb_runtime_idle(struct device *dev);
197static const struct dev_pm_ops igb_pm_ops = {
198 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
199 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
200 igb_runtime_idle)
201};
202#endif
203static void igb_shutdown(struct pci_dev *);
204static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
205#ifdef CONFIG_IGB_DCA
206static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
207static struct notifier_block dca_notifier = {
208 .notifier_call = igb_notify_dca,
209 .next = NULL,
210 .priority = 0
211};
212#endif
213#ifdef CONFIG_NET_POLL_CONTROLLER
214/* for netdump / net console */
215static void igb_netpoll(struct net_device *);
216#endif
217#ifdef CONFIG_PCI_IOV
218static unsigned int max_vfs;
219module_param(max_vfs, uint, 0);
220MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
221#endif /* CONFIG_PCI_IOV */
222
223static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
224 pci_channel_state_t);
225static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
226static void igb_io_resume(struct pci_dev *);
227
228static const struct pci_error_handlers igb_err_handler = {
229 .error_detected = igb_io_error_detected,
230 .slot_reset = igb_io_slot_reset,
231 .resume = igb_io_resume,
232};
233
234static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
235
236static struct pci_driver igb_driver = {
237 .name = igb_driver_name,
238 .id_table = igb_pci_tbl,
239 .probe = igb_probe,
240 .remove = igb_remove,
241#ifdef CONFIG_PM
242 .driver.pm = &igb_pm_ops,
243#endif
244 .shutdown = igb_shutdown,
245 .sriov_configure = igb_pci_sriov_configure,
246 .err_handler = &igb_err_handler
247};
248
249MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
250MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
251MODULE_LICENSE("GPL");
252MODULE_VERSION(DRV_VERSION);
253
254#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
255static int debug = -1;
256module_param(debug, int, 0);
257MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
258
259struct igb_reg_info {
260 u32 ofs;
261 char *name;
262};
263
264static const struct igb_reg_info igb_reg_info_tbl[] = {
265
266 /* General Registers */
267 {E1000_CTRL, "CTRL"},
268 {E1000_STATUS, "STATUS"},
269 {E1000_CTRL_EXT, "CTRL_EXT"},
270
271 /* Interrupt Registers */
272 {E1000_ICR, "ICR"},
273
274 /* RX Registers */
275 {E1000_RCTL, "RCTL"},
276 {E1000_RDLEN(0), "RDLEN"},
277 {E1000_RDH(0), "RDH"},
278 {E1000_RDT(0), "RDT"},
279 {E1000_RXDCTL(0), "RXDCTL"},
280 {E1000_RDBAL(0), "RDBAL"},
281 {E1000_RDBAH(0), "RDBAH"},
282
283 /* TX Registers */
284 {E1000_TCTL, "TCTL"},
285 {E1000_TDBAL(0), "TDBAL"},
286 {E1000_TDBAH(0), "TDBAH"},
287 {E1000_TDLEN(0), "TDLEN"},
288 {E1000_TDH(0), "TDH"},
289 {E1000_TDT(0), "TDT"},
290 {E1000_TXDCTL(0), "TXDCTL"},
291 {E1000_TDFH, "TDFH"},
292 {E1000_TDFT, "TDFT"},
293 {E1000_TDFHS, "TDFHS"},
294 {E1000_TDFPC, "TDFPC"},
295
296 /* List Terminator */
297 {}
298};
299
300/* igb_regdump - register printout routine */
301static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
302{
303 int n = 0;
304 char rname[16];
305 u32 regs[8];
306
307 switch (reginfo->ofs) {
308 case E1000_RDLEN(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RDLEN(n));
311 break;
312 case E1000_RDH(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RDH(n));
315 break;
316 case E1000_RDT(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDT(n));
319 break;
320 case E1000_RXDCTL(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_RXDCTL(n));
323 break;
324 case E1000_RDBAL(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_RDBAL(n));
327 break;
328 case E1000_RDBAH(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_RDBAH(n));
331 break;
332 case E1000_TDBAL(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_RDBAL(n));
335 break;
336 case E1000_TDBAH(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TDBAH(n));
339 break;
340 case E1000_TDLEN(0):
341 for (n = 0; n < 4; n++)
342 regs[n] = rd32(E1000_TDLEN(n));
343 break;
344 case E1000_TDH(0):
345 for (n = 0; n < 4; n++)
346 regs[n] = rd32(E1000_TDH(n));
347 break;
348 case E1000_TDT(0):
349 for (n = 0; n < 4; n++)
350 regs[n] = rd32(E1000_TDT(n));
351 break;
352 case E1000_TXDCTL(0):
353 for (n = 0; n < 4; n++)
354 regs[n] = rd32(E1000_TXDCTL(n));
355 break;
356 default:
357 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
358 return;
359 }
360
361 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
362 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
363 regs[2], regs[3]);
364}
365
366/* igb_dump - Print registers, Tx-rings and Rx-rings */
367static void igb_dump(struct igb_adapter *adapter)
368{
369 struct net_device *netdev = adapter->netdev;
370 struct e1000_hw *hw = &adapter->hw;
371 struct igb_reg_info *reginfo;
372 struct igb_ring *tx_ring;
373 union e1000_adv_tx_desc *tx_desc;
374 struct my_u0 { u64 a; u64 b; } *u0;
375 struct igb_ring *rx_ring;
376 union e1000_adv_rx_desc *rx_desc;
377 u32 staterr;
378 u16 i, n;
379
380 if (!netif_msg_hw(adapter))
381 return;
382
383 /* Print netdevice Info */
384 if (netdev) {
385 dev_info(&adapter->pdev->dev, "Net device Info\n");
386 pr_info("Device Name state trans_start last_rx\n");
387 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
388 netdev->state, dev_trans_start(netdev), netdev->last_rx);
389 }
390
391 /* Print Registers */
392 dev_info(&adapter->pdev->dev, "Register Dump\n");
393 pr_info(" Register Name Value\n");
394 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
395 reginfo->name; reginfo++) {
396 igb_regdump(hw, reginfo);
397 }
398
399 /* Print TX Ring Summary */
400 if (!netdev || !netif_running(netdev))
401 goto exit;
402
403 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
404 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
405 for (n = 0; n < adapter->num_tx_queues; n++) {
406 struct igb_tx_buffer *buffer_info;
407 tx_ring = adapter->tx_ring[n];
408 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
409 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
410 n, tx_ring->next_to_use, tx_ring->next_to_clean,
411 (u64)dma_unmap_addr(buffer_info, dma),
412 dma_unmap_len(buffer_info, len),
413 buffer_info->next_to_watch,
414 (u64)buffer_info->time_stamp);
415 }
416
417 /* Print TX Rings */
418 if (!netif_msg_tx_done(adapter))
419 goto rx_ring_summary;
420
421 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
422
423 /* Transmit Descriptor Formats
424 *
425 * Advanced Transmit Descriptor
426 * +--------------------------------------------------------------+
427 * 0 | Buffer Address [63:0] |
428 * +--------------------------------------------------------------+
429 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
430 * +--------------------------------------------------------------+
431 * 63 46 45 40 39 38 36 35 32 31 24 15 0
432 */
433
434 for (n = 0; n < adapter->num_tx_queues; n++) {
435 tx_ring = adapter->tx_ring[n];
436 pr_info("------------------------------------\n");
437 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
438 pr_info("------------------------------------\n");
439 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
440
441 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
442 const char *next_desc;
443 struct igb_tx_buffer *buffer_info;
444 tx_desc = IGB_TX_DESC(tx_ring, i);
445 buffer_info = &tx_ring->tx_buffer_info[i];
446 u0 = (struct my_u0 *)tx_desc;
447 if (i == tx_ring->next_to_use &&
448 i == tx_ring->next_to_clean)
449 next_desc = " NTC/U";
450 else if (i == tx_ring->next_to_use)
451 next_desc = " NTU";
452 else if (i == tx_ring->next_to_clean)
453 next_desc = " NTC";
454 else
455 next_desc = "";
456
457 pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
458 i, le64_to_cpu(u0->a),
459 le64_to_cpu(u0->b),
460 (u64)dma_unmap_addr(buffer_info, dma),
461 dma_unmap_len(buffer_info, len),
462 buffer_info->next_to_watch,
463 (u64)buffer_info->time_stamp,
464 buffer_info->skb, next_desc);
465
466 if (netif_msg_pktdata(adapter) && buffer_info->skb)
467 print_hex_dump(KERN_INFO, "",
468 DUMP_PREFIX_ADDRESS,
469 16, 1, buffer_info->skb->data,
470 dma_unmap_len(buffer_info, len),
471 true);
472 }
473 }
474
475 /* Print RX Rings Summary */
476rx_ring_summary:
477 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
478 pr_info("Queue [NTU] [NTC]\n");
479 for (n = 0; n < adapter->num_rx_queues; n++) {
480 rx_ring = adapter->rx_ring[n];
481 pr_info(" %5d %5X %5X\n",
482 n, rx_ring->next_to_use, rx_ring->next_to_clean);
483 }
484
485 /* Print RX Rings */
486 if (!netif_msg_rx_status(adapter))
487 goto exit;
488
489 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
490
491 /* Advanced Receive Descriptor (Read) Format
492 * 63 1 0
493 * +-----------------------------------------------------+
494 * 0 | Packet Buffer Address [63:1] |A0/NSE|
495 * +----------------------------------------------+------+
496 * 8 | Header Buffer Address [63:1] | DD |
497 * +-----------------------------------------------------+
498 *
499 *
500 * Advanced Receive Descriptor (Write-Back) Format
501 *
502 * 63 48 47 32 31 30 21 20 17 16 4 3 0
503 * +------------------------------------------------------+
504 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
505 * | Checksum Ident | | | | Type | Type |
506 * +------------------------------------------------------+
507 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
508 * +------------------------------------------------------+
509 * 63 48 47 32 31 20 19 0
510 */
511
512 for (n = 0; n < adapter->num_rx_queues; n++) {
513 rx_ring = adapter->rx_ring[n];
514 pr_info("------------------------------------\n");
515 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
516 pr_info("------------------------------------\n");
517 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
518 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
519
520 for (i = 0; i < rx_ring->count; i++) {
521 const char *next_desc;
522 struct igb_rx_buffer *buffer_info;
523 buffer_info = &rx_ring->rx_buffer_info[i];
524 rx_desc = IGB_RX_DESC(rx_ring, i);
525 u0 = (struct my_u0 *)rx_desc;
526 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
527
528 if (i == rx_ring->next_to_use)
529 next_desc = " NTU";
530 else if (i == rx_ring->next_to_clean)
531 next_desc = " NTC";
532 else
533 next_desc = "";
534
535 if (staterr & E1000_RXD_STAT_DD) {
536 /* Descriptor Done */
537 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
538 "RWB", i,
539 le64_to_cpu(u0->a),
540 le64_to_cpu(u0->b),
541 next_desc);
542 } else {
543 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
544 "R ", i,
545 le64_to_cpu(u0->a),
546 le64_to_cpu(u0->b),
547 (u64)buffer_info->dma,
548 next_desc);
549
550 if (netif_msg_pktdata(adapter) &&
551 buffer_info->dma && buffer_info->page) {
552 print_hex_dump(KERN_INFO, "",
553 DUMP_PREFIX_ADDRESS,
554 16, 1,
555 page_address(buffer_info->page) +
556 buffer_info->page_offset,
557 IGB_RX_BUFSZ, true);
558 }
559 }
560 }
561 }
562
563exit:
564 return;
565}
566
567/**
568 * igb_get_i2c_data - Reads the I2C SDA data bit
569 * @hw: pointer to hardware structure
570 * @i2cctl: Current value of I2CCTL register
571 *
572 * Returns the I2C data bit value
573 **/
574static int igb_get_i2c_data(void *data)
575{
576 struct igb_adapter *adapter = (struct igb_adapter *)data;
577 struct e1000_hw *hw = &adapter->hw;
578 s32 i2cctl = rd32(E1000_I2CPARAMS);
579
580 return !!(i2cctl & E1000_I2C_DATA_IN);
581}
582
583/**
584 * igb_set_i2c_data - Sets the I2C data bit
585 * @data: pointer to hardware structure
586 * @state: I2C data value (0 or 1) to set
587 *
588 * Sets the I2C data bit
589 **/
590static void igb_set_i2c_data(void *data, int state)
591{
592 struct igb_adapter *adapter = (struct igb_adapter *)data;
593 struct e1000_hw *hw = &adapter->hw;
594 s32 i2cctl = rd32(E1000_I2CPARAMS);
595
596 if (state)
597 i2cctl |= E1000_I2C_DATA_OUT;
598 else
599 i2cctl &= ~E1000_I2C_DATA_OUT;
600
601 i2cctl &= ~E1000_I2C_DATA_OE_N;
602 i2cctl |= E1000_I2C_CLK_OE_N;
603 wr32(E1000_I2CPARAMS, i2cctl);
604 wrfl();
605
606}
607
608/**
609 * igb_set_i2c_clk - Sets the I2C SCL clock
610 * @data: pointer to hardware structure
611 * @state: state to set clock
612 *
613 * Sets the I2C clock line to state
614 **/
615static void igb_set_i2c_clk(void *data, int state)
616{
617 struct igb_adapter *adapter = (struct igb_adapter *)data;
618 struct e1000_hw *hw = &adapter->hw;
619 s32 i2cctl = rd32(E1000_I2CPARAMS);
620
621 if (state) {
622 i2cctl |= E1000_I2C_CLK_OUT;
623 i2cctl &= ~E1000_I2C_CLK_OE_N;
624 } else {
625 i2cctl &= ~E1000_I2C_CLK_OUT;
626 i2cctl &= ~E1000_I2C_CLK_OE_N;
627 }
628 wr32(E1000_I2CPARAMS, i2cctl);
629 wrfl();
630}
631
632/**
633 * igb_get_i2c_clk - Gets the I2C SCL clock state
634 * @data: pointer to hardware structure
635 *
636 * Gets the I2C clock state
637 **/
638static int igb_get_i2c_clk(void *data)
639{
640 struct igb_adapter *adapter = (struct igb_adapter *)data;
641 struct e1000_hw *hw = &adapter->hw;
642 s32 i2cctl = rd32(E1000_I2CPARAMS);
643
644 return !!(i2cctl & E1000_I2C_CLK_IN);
645}
646
647static const struct i2c_algo_bit_data igb_i2c_algo = {
648 .setsda = igb_set_i2c_data,
649 .setscl = igb_set_i2c_clk,
650 .getsda = igb_get_i2c_data,
651 .getscl = igb_get_i2c_clk,
652 .udelay = 5,
653 .timeout = 20,
654};
655
656/**
657 * igb_get_hw_dev - return device
658 * @hw: pointer to hardware structure
659 *
660 * used by hardware layer to print debugging information
661 **/
662struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
663{
664 struct igb_adapter *adapter = hw->back;
665 return adapter->netdev;
666}
667
668/**
669 * igb_init_module - Driver Registration Routine
670 *
671 * igb_init_module is the first routine called when the driver is
672 * loaded. All it does is register with the PCI subsystem.
673 **/
674static int __init igb_init_module(void)
675{
676 int ret;
677
678 pr_info("%s - version %s\n",
679 igb_driver_string, igb_driver_version);
680 pr_info("%s\n", igb_copyright);
681
682#ifdef CONFIG_IGB_DCA
683 dca_register_notify(&dca_notifier);
684#endif
685 ret = pci_register_driver(&igb_driver);
686 return ret;
687}
688
689module_init(igb_init_module);
690
691/**
692 * igb_exit_module - Driver Exit Cleanup Routine
693 *
694 * igb_exit_module is called just before the driver is removed
695 * from memory.
696 **/
697static void __exit igb_exit_module(void)
698{
699#ifdef CONFIG_IGB_DCA
700 dca_unregister_notify(&dca_notifier);
701#endif
702 pci_unregister_driver(&igb_driver);
703}
704
705module_exit(igb_exit_module);
706
707#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
708/**
709 * igb_cache_ring_register - Descriptor ring to register mapping
710 * @adapter: board private structure to initialize
711 *
712 * Once we know the feature-set enabled for the device, we'll cache
713 * the register offset the descriptor ring is assigned to.
714 **/
715static void igb_cache_ring_register(struct igb_adapter *adapter)
716{
717 int i = 0, j = 0;
718 u32 rbase_offset = adapter->vfs_allocated_count;
719
720 switch (adapter->hw.mac.type) {
721 case e1000_82576:
722 /* The queues are allocated for virtualization such that VF 0
723 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
724 * In order to avoid collision we start at the first free queue
725 * and continue consuming queues in the same sequence
726 */
727 if (adapter->vfs_allocated_count) {
728 for (; i < adapter->rss_queues; i++)
729 adapter->rx_ring[i]->reg_idx = rbase_offset +
730 Q_IDX_82576(i);
731 }
732 /* Fall through */
733 case e1000_82575:
734 case e1000_82580:
735 case e1000_i350:
736 case e1000_i354:
737 case e1000_i210:
738 case e1000_i211:
739 /* Fall through */
740 default:
741 for (; i < adapter->num_rx_queues; i++)
742 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
743 for (; j < adapter->num_tx_queues; j++)
744 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
745 break;
746 }
747}
748
749u32 igb_rd32(struct e1000_hw *hw, u32 reg)
750{
751 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
752 u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
753 u32 value = 0;
754
755 if (E1000_REMOVED(hw_addr))
756 return ~value;
757
758 value = readl(&hw_addr[reg]);
759
760 /* reads should not return all F's */
761 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
762 struct net_device *netdev = igb->netdev;
763 hw->hw_addr = NULL;
764 netif_device_detach(netdev);
765 netdev_err(netdev, "PCIe link lost, device now detached\n");
766 }
767
768 return value;
769}
770
771/**
772 * igb_write_ivar - configure ivar for given MSI-X vector
773 * @hw: pointer to the HW structure
774 * @msix_vector: vector number we are allocating to a given ring
775 * @index: row index of IVAR register to write within IVAR table
776 * @offset: column offset of in IVAR, should be multiple of 8
777 *
778 * This function is intended to handle the writing of the IVAR register
779 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
780 * each containing an cause allocation for an Rx and Tx ring, and a
781 * variable number of rows depending on the number of queues supported.
782 **/
783static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
784 int index, int offset)
785{
786 u32 ivar = array_rd32(E1000_IVAR0, index);
787
788 /* clear any bits that are currently set */
789 ivar &= ~((u32)0xFF << offset);
790
791 /* write vector and valid bit */
792 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
793
794 array_wr32(E1000_IVAR0, index, ivar);
795}
796
797#define IGB_N0_QUEUE -1
798static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
799{
800 struct igb_adapter *adapter = q_vector->adapter;
801 struct e1000_hw *hw = &adapter->hw;
802 int rx_queue = IGB_N0_QUEUE;
803 int tx_queue = IGB_N0_QUEUE;
804 u32 msixbm = 0;
805
806 if (q_vector->rx.ring)
807 rx_queue = q_vector->rx.ring->reg_idx;
808 if (q_vector->tx.ring)
809 tx_queue = q_vector->tx.ring->reg_idx;
810
811 switch (hw->mac.type) {
812 case e1000_82575:
813 /* The 82575 assigns vectors using a bitmask, which matches the
814 * bitmask for the EICR/EIMS/EIMC registers. To assign one
815 * or more queues to a vector, we write the appropriate bits
816 * into the MSIXBM register for that vector.
817 */
818 if (rx_queue > IGB_N0_QUEUE)
819 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
820 if (tx_queue > IGB_N0_QUEUE)
821 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
822 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
823 msixbm |= E1000_EIMS_OTHER;
824 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
825 q_vector->eims_value = msixbm;
826 break;
827 case e1000_82576:
828 /* 82576 uses a table that essentially consists of 2 columns
829 * with 8 rows. The ordering is column-major so we use the
830 * lower 3 bits as the row index, and the 4th bit as the
831 * column offset.
832 */
833 if (rx_queue > IGB_N0_QUEUE)
834 igb_write_ivar(hw, msix_vector,
835 rx_queue & 0x7,
836 (rx_queue & 0x8) << 1);
837 if (tx_queue > IGB_N0_QUEUE)
838 igb_write_ivar(hw, msix_vector,
839 tx_queue & 0x7,
840 ((tx_queue & 0x8) << 1) + 8);
841 q_vector->eims_value = BIT(msix_vector);
842 break;
843 case e1000_82580:
844 case e1000_i350:
845 case e1000_i354:
846 case e1000_i210:
847 case e1000_i211:
848 /* On 82580 and newer adapters the scheme is similar to 82576
849 * however instead of ordering column-major we have things
850 * ordered row-major. So we traverse the table by using
851 * bit 0 as the column offset, and the remaining bits as the
852 * row index.
853 */
854 if (rx_queue > IGB_N0_QUEUE)
855 igb_write_ivar(hw, msix_vector,
856 rx_queue >> 1,
857 (rx_queue & 0x1) << 4);
858 if (tx_queue > IGB_N0_QUEUE)
859 igb_write_ivar(hw, msix_vector,
860 tx_queue >> 1,
861 ((tx_queue & 0x1) << 4) + 8);
862 q_vector->eims_value = BIT(msix_vector);
863 break;
864 default:
865 BUG();
866 break;
867 }
868
869 /* add q_vector eims value to global eims_enable_mask */
870 adapter->eims_enable_mask |= q_vector->eims_value;
871
872 /* configure q_vector to set itr on first interrupt */
873 q_vector->set_itr = 1;
874}
875
876/**
877 * igb_configure_msix - Configure MSI-X hardware
878 * @adapter: board private structure to initialize
879 *
880 * igb_configure_msix sets up the hardware to properly
881 * generate MSI-X interrupts.
882 **/
883static void igb_configure_msix(struct igb_adapter *adapter)
884{
885 u32 tmp;
886 int i, vector = 0;
887 struct e1000_hw *hw = &adapter->hw;
888
889 adapter->eims_enable_mask = 0;
890
891 /* set vector for other causes, i.e. link changes */
892 switch (hw->mac.type) {
893 case e1000_82575:
894 tmp = rd32(E1000_CTRL_EXT);
895 /* enable MSI-X PBA support*/
896 tmp |= E1000_CTRL_EXT_PBA_CLR;
897
898 /* Auto-Mask interrupts upon ICR read. */
899 tmp |= E1000_CTRL_EXT_EIAME;
900 tmp |= E1000_CTRL_EXT_IRCA;
901
902 wr32(E1000_CTRL_EXT, tmp);
903
904 /* enable msix_other interrupt */
905 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
906 adapter->eims_other = E1000_EIMS_OTHER;
907
908 break;
909
910 case e1000_82576:
911 case e1000_82580:
912 case e1000_i350:
913 case e1000_i354:
914 case e1000_i210:
915 case e1000_i211:
916 /* Turn on MSI-X capability first, or our settings
917 * won't stick. And it will take days to debug.
918 */
919 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
920 E1000_GPIE_PBA | E1000_GPIE_EIAME |
921 E1000_GPIE_NSICR);
922
923 /* enable msix_other interrupt */
924 adapter->eims_other = BIT(vector);
925 tmp = (vector++ | E1000_IVAR_VALID) << 8;
926
927 wr32(E1000_IVAR_MISC, tmp);
928 break;
929 default:
930 /* do nothing, since nothing else supports MSI-X */
931 break;
932 } /* switch (hw->mac.type) */
933
934 adapter->eims_enable_mask |= adapter->eims_other;
935
936 for (i = 0; i < adapter->num_q_vectors; i++)
937 igb_assign_vector(adapter->q_vector[i], vector++);
938
939 wrfl();
940}
941
942/**
943 * igb_request_msix - Initialize MSI-X interrupts
944 * @adapter: board private structure to initialize
945 *
946 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
947 * kernel.
948 **/
949static int igb_request_msix(struct igb_adapter *adapter)
950{
951 struct net_device *netdev = adapter->netdev;
952 int i, err = 0, vector = 0, free_vector = 0;
953
954 err = request_irq(adapter->msix_entries[vector].vector,
955 igb_msix_other, 0, netdev->name, adapter);
956 if (err)
957 goto err_out;
958
959 for (i = 0; i < adapter->num_q_vectors; i++) {
960 struct igb_q_vector *q_vector = adapter->q_vector[i];
961
962 vector++;
963
964 q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
965
966 if (q_vector->rx.ring && q_vector->tx.ring)
967 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
968 q_vector->rx.ring->queue_index);
969 else if (q_vector->tx.ring)
970 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
971 q_vector->tx.ring->queue_index);
972 else if (q_vector->rx.ring)
973 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
974 q_vector->rx.ring->queue_index);
975 else
976 sprintf(q_vector->name, "%s-unused", netdev->name);
977
978 err = request_irq(adapter->msix_entries[vector].vector,
979 igb_msix_ring, 0, q_vector->name,
980 q_vector);
981 if (err)
982 goto err_free;
983 }
984
985 igb_configure_msix(adapter);
986 return 0;
987
988err_free:
989 /* free already assigned IRQs */
990 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
991
992 vector--;
993 for (i = 0; i < vector; i++) {
994 free_irq(adapter->msix_entries[free_vector++].vector,
995 adapter->q_vector[i]);
996 }
997err_out:
998 return err;
999}
1000
1001/**
1002 * igb_free_q_vector - Free memory allocated for specific interrupt vector
1003 * @adapter: board private structure to initialize
1004 * @v_idx: Index of vector to be freed
1005 *
1006 * This function frees the memory allocated to the q_vector.
1007 **/
1008static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1009{
1010 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1011
1012 adapter->q_vector[v_idx] = NULL;
1013
1014 /* igb_get_stats64() might access the rings on this vector,
1015 * we must wait a grace period before freeing it.
1016 */
1017 if (q_vector)
1018 kfree_rcu(q_vector, rcu);
1019}
1020
1021/**
1022 * igb_reset_q_vector - Reset config for interrupt vector
1023 * @adapter: board private structure to initialize
1024 * @v_idx: Index of vector to be reset
1025 *
1026 * If NAPI is enabled it will delete any references to the
1027 * NAPI struct. This is preparation for igb_free_q_vector.
1028 **/
1029static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1030{
1031 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1032
1033 /* Coming from igb_set_interrupt_capability, the vectors are not yet
1034 * allocated. So, q_vector is NULL so we should stop here.
1035 */
1036 if (!q_vector)
1037 return;
1038
1039 if (q_vector->tx.ring)
1040 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1041
1042 if (q_vector->rx.ring)
1043 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
1044
1045 netif_napi_del(&q_vector->napi);
1046
1047}
1048
1049static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
1050{
1051 int v_idx = adapter->num_q_vectors;
1052
1053 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1054 pci_disable_msix(adapter->pdev);
1055 else if (adapter->flags & IGB_FLAG_HAS_MSI)
1056 pci_disable_msi(adapter->pdev);
1057
1058 while (v_idx--)
1059 igb_reset_q_vector(adapter, v_idx);
1060}
1061
1062/**
1063 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1064 * @adapter: board private structure to initialize
1065 *
1066 * This function frees the memory allocated to the q_vectors. In addition if
1067 * NAPI is enabled it will delete any references to the NAPI struct prior
1068 * to freeing the q_vector.
1069 **/
1070static void igb_free_q_vectors(struct igb_adapter *adapter)
1071{
1072 int v_idx = adapter->num_q_vectors;
1073
1074 adapter->num_tx_queues = 0;
1075 adapter->num_rx_queues = 0;
1076 adapter->num_q_vectors = 0;
1077
1078 while (v_idx--) {
1079 igb_reset_q_vector(adapter, v_idx);
1080 igb_free_q_vector(adapter, v_idx);
1081 }
1082}
1083
1084/**
1085 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1086 * @adapter: board private structure to initialize
1087 *
1088 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1089 * MSI-X interrupts allocated.
1090 */
1091static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1092{
1093 igb_free_q_vectors(adapter);
1094 igb_reset_interrupt_capability(adapter);
1095}
1096
1097/**
1098 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1099 * @adapter: board private structure to initialize
1100 * @msix: boolean value of MSIX capability
1101 *
1102 * Attempt to configure interrupts using the best available
1103 * capabilities of the hardware and kernel.
1104 **/
1105static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1106{
1107 int err;
1108 int numvecs, i;
1109
1110 if (!msix)
1111 goto msi_only;
1112 adapter->flags |= IGB_FLAG_HAS_MSIX;
1113
1114 /* Number of supported queues. */
1115 adapter->num_rx_queues = adapter->rss_queues;
1116 if (adapter->vfs_allocated_count)
1117 adapter->num_tx_queues = 1;
1118 else
1119 adapter->num_tx_queues = adapter->rss_queues;
1120
1121 /* start with one vector for every Rx queue */
1122 numvecs = adapter->num_rx_queues;
1123
1124 /* if Tx handler is separate add 1 for every Tx queue */
1125 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1126 numvecs += adapter->num_tx_queues;
1127
1128 /* store the number of vectors reserved for queues */
1129 adapter->num_q_vectors = numvecs;
1130
1131 /* add 1 vector for link status interrupts */
1132 numvecs++;
1133 for (i = 0; i < numvecs; i++)
1134 adapter->msix_entries[i].entry = i;
1135
1136 err = pci_enable_msix_range(adapter->pdev,
1137 adapter->msix_entries,
1138 numvecs,
1139 numvecs);
1140 if (err > 0)
1141 return;
1142
1143 igb_reset_interrupt_capability(adapter);
1144
1145 /* If we can't do MSI-X, try MSI */
1146msi_only:
1147 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1148#ifdef CONFIG_PCI_IOV
1149 /* disable SR-IOV for non MSI-X configurations */
1150 if (adapter->vf_data) {
1151 struct e1000_hw *hw = &adapter->hw;
1152 /* disable iov and allow time for transactions to clear */
1153 pci_disable_sriov(adapter->pdev);
1154 msleep(500);
1155
1156 kfree(adapter->vf_data);
1157 adapter->vf_data = NULL;
1158 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1159 wrfl();
1160 msleep(100);
1161 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1162 }
1163#endif
1164 adapter->vfs_allocated_count = 0;
1165 adapter->rss_queues = 1;
1166 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1167 adapter->num_rx_queues = 1;
1168 adapter->num_tx_queues = 1;
1169 adapter->num_q_vectors = 1;
1170 if (!pci_enable_msi(adapter->pdev))
1171 adapter->flags |= IGB_FLAG_HAS_MSI;
1172}
1173
1174static void igb_add_ring(struct igb_ring *ring,
1175 struct igb_ring_container *head)
1176{
1177 head->ring = ring;
1178 head->count++;
1179}
1180
1181/**
1182 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1183 * @adapter: board private structure to initialize
1184 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1185 * @v_idx: index of vector in adapter struct
1186 * @txr_count: total number of Tx rings to allocate
1187 * @txr_idx: index of first Tx ring to allocate
1188 * @rxr_count: total number of Rx rings to allocate
1189 * @rxr_idx: index of first Rx ring to allocate
1190 *
1191 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1192 **/
1193static int igb_alloc_q_vector(struct igb_adapter *adapter,
1194 int v_count, int v_idx,
1195 int txr_count, int txr_idx,
1196 int rxr_count, int rxr_idx)
1197{
1198 struct igb_q_vector *q_vector;
1199 struct igb_ring *ring;
1200 int ring_count, size;
1201
1202 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1203 if (txr_count > 1 || rxr_count > 1)
1204 return -ENOMEM;
1205
1206 ring_count = txr_count + rxr_count;
1207 size = sizeof(struct igb_q_vector) +
1208 (sizeof(struct igb_ring) * ring_count);
1209
1210 /* allocate q_vector and rings */
1211 q_vector = adapter->q_vector[v_idx];
1212 if (!q_vector) {
1213 q_vector = kzalloc(size, GFP_KERNEL);
1214 } else if (size > ksize(q_vector)) {
1215 kfree_rcu(q_vector, rcu);
1216 q_vector = kzalloc(size, GFP_KERNEL);
1217 } else {
1218 memset(q_vector, 0, size);
1219 }
1220 if (!q_vector)
1221 return -ENOMEM;
1222
1223 /* initialize NAPI */
1224 netif_napi_add(adapter->netdev, &q_vector->napi,
1225 igb_poll, 64);
1226
1227 /* tie q_vector and adapter together */
1228 adapter->q_vector[v_idx] = q_vector;
1229 q_vector->adapter = adapter;
1230
1231 /* initialize work limits */
1232 q_vector->tx.work_limit = adapter->tx_work_limit;
1233
1234 /* initialize ITR configuration */
1235 q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
1236 q_vector->itr_val = IGB_START_ITR;
1237
1238 /* initialize pointer to rings */
1239 ring = q_vector->ring;
1240
1241 /* intialize ITR */
1242 if (rxr_count) {
1243 /* rx or rx/tx vector */
1244 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
1245 q_vector->itr_val = adapter->rx_itr_setting;
1246 } else {
1247 /* tx only vector */
1248 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
1249 q_vector->itr_val = adapter->tx_itr_setting;
1250 }
1251
1252 if (txr_count) {
1253 /* assign generic ring traits */
1254 ring->dev = &adapter->pdev->dev;
1255 ring->netdev = adapter->netdev;
1256
1257 /* configure backlink on ring */
1258 ring->q_vector = q_vector;
1259
1260 /* update q_vector Tx values */
1261 igb_add_ring(ring, &q_vector->tx);
1262
1263 /* For 82575, context index must be unique per ring. */
1264 if (adapter->hw.mac.type == e1000_82575)
1265 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1266
1267 /* apply Tx specific ring traits */
1268 ring->count = adapter->tx_ring_count;
1269 ring->queue_index = txr_idx;
1270
1271 u64_stats_init(&ring->tx_syncp);
1272 u64_stats_init(&ring->tx_syncp2);
1273
1274 /* assign ring to adapter */
1275 adapter->tx_ring[txr_idx] = ring;
1276
1277 /* push pointer to next ring */
1278 ring++;
1279 }
1280
1281 if (rxr_count) {
1282 /* assign generic ring traits */
1283 ring->dev = &adapter->pdev->dev;
1284 ring->netdev = adapter->netdev;
1285
1286 /* configure backlink on ring */
1287 ring->q_vector = q_vector;
1288
1289 /* update q_vector Rx values */
1290 igb_add_ring(ring, &q_vector->rx);
1291
1292 /* set flag indicating ring supports SCTP checksum offload */
1293 if (adapter->hw.mac.type >= e1000_82576)
1294 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1295
1296 /* On i350, i354, i210, and i211, loopback VLAN packets
1297 * have the tag byte-swapped.
1298 */
1299 if (adapter->hw.mac.type >= e1000_i350)
1300 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1301
1302 /* apply Rx specific ring traits */
1303 ring->count = adapter->rx_ring_count;
1304 ring->queue_index = rxr_idx;
1305
1306 u64_stats_init(&ring->rx_syncp);
1307
1308 /* assign ring to adapter */
1309 adapter->rx_ring[rxr_idx] = ring;
1310 }
1311
1312 return 0;
1313}
1314
1315
1316/**
1317 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1318 * @adapter: board private structure to initialize
1319 *
1320 * We allocate one q_vector per queue interrupt. If allocation fails we
1321 * return -ENOMEM.
1322 **/
1323static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1324{
1325 int q_vectors = adapter->num_q_vectors;
1326 int rxr_remaining = adapter->num_rx_queues;
1327 int txr_remaining = adapter->num_tx_queues;
1328 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1329 int err;
1330
1331 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1332 for (; rxr_remaining; v_idx++) {
1333 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1334 0, 0, 1, rxr_idx);
1335
1336 if (err)
1337 goto err_out;
1338
1339 /* update counts and index */
1340 rxr_remaining--;
1341 rxr_idx++;
1342 }
1343 }
1344
1345 for (; v_idx < q_vectors; v_idx++) {
1346 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1347 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1348
1349 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1350 tqpv, txr_idx, rqpv, rxr_idx);
1351
1352 if (err)
1353 goto err_out;
1354
1355 /* update counts and index */
1356 rxr_remaining -= rqpv;
1357 txr_remaining -= tqpv;
1358 rxr_idx++;
1359 txr_idx++;
1360 }
1361
1362 return 0;
1363
1364err_out:
1365 adapter->num_tx_queues = 0;
1366 adapter->num_rx_queues = 0;
1367 adapter->num_q_vectors = 0;
1368
1369 while (v_idx--)
1370 igb_free_q_vector(adapter, v_idx);
1371
1372 return -ENOMEM;
1373}
1374
1375/**
1376 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1377 * @adapter: board private structure to initialize
1378 * @msix: boolean value of MSIX capability
1379 *
1380 * This function initializes the interrupts and allocates all of the queues.
1381 **/
1382static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1383{
1384 struct pci_dev *pdev = adapter->pdev;
1385 int err;
1386
1387 igb_set_interrupt_capability(adapter, msix);
1388
1389 err = igb_alloc_q_vectors(adapter);
1390 if (err) {
1391 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1392 goto err_alloc_q_vectors;
1393 }
1394
1395 igb_cache_ring_register(adapter);
1396
1397 return 0;
1398
1399err_alloc_q_vectors:
1400 igb_reset_interrupt_capability(adapter);
1401 return err;
1402}
1403
1404/**
1405 * igb_request_irq - initialize interrupts
1406 * @adapter: board private structure to initialize
1407 *
1408 * Attempts to configure interrupts using the best available
1409 * capabilities of the hardware and kernel.
1410 **/
1411static int igb_request_irq(struct igb_adapter *adapter)
1412{
1413 struct net_device *netdev = adapter->netdev;
1414 struct pci_dev *pdev = adapter->pdev;
1415 int err = 0;
1416
1417 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1418 err = igb_request_msix(adapter);
1419 if (!err)
1420 goto request_done;
1421 /* fall back to MSI */
1422 igb_free_all_tx_resources(adapter);
1423 igb_free_all_rx_resources(adapter);
1424
1425 igb_clear_interrupt_scheme(adapter);
1426 err = igb_init_interrupt_scheme(adapter, false);
1427 if (err)
1428 goto request_done;
1429
1430 igb_setup_all_tx_resources(adapter);
1431 igb_setup_all_rx_resources(adapter);
1432 igb_configure(adapter);
1433 }
1434
1435 igb_assign_vector(adapter->q_vector[0], 0);
1436
1437 if (adapter->flags & IGB_FLAG_HAS_MSI) {
1438 err = request_irq(pdev->irq, igb_intr_msi, 0,
1439 netdev->name, adapter);
1440 if (!err)
1441 goto request_done;
1442
1443 /* fall back to legacy interrupts */
1444 igb_reset_interrupt_capability(adapter);
1445 adapter->flags &= ~IGB_FLAG_HAS_MSI;
1446 }
1447
1448 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
1449 netdev->name, adapter);
1450
1451 if (err)
1452 dev_err(&pdev->dev, "Error %d getting interrupt\n",
1453 err);
1454
1455request_done:
1456 return err;
1457}
1458
1459static void igb_free_irq(struct igb_adapter *adapter)
1460{
1461 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1462 int vector = 0, i;
1463
1464 free_irq(adapter->msix_entries[vector++].vector, adapter);
1465
1466 for (i = 0; i < adapter->num_q_vectors; i++)
1467 free_irq(adapter->msix_entries[vector++].vector,
1468 adapter->q_vector[i]);
1469 } else {
1470 free_irq(adapter->pdev->irq, adapter);
1471 }
1472}
1473
1474/**
1475 * igb_irq_disable - Mask off interrupt generation on the NIC
1476 * @adapter: board private structure
1477 **/
1478static void igb_irq_disable(struct igb_adapter *adapter)
1479{
1480 struct e1000_hw *hw = &adapter->hw;
1481
1482 /* we need to be careful when disabling interrupts. The VFs are also
1483 * mapped into these registers and so clearing the bits can cause
1484 * issues on the VF drivers so we only need to clear what we set
1485 */
1486 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1487 u32 regval = rd32(E1000_EIAM);
1488
1489 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1490 wr32(E1000_EIMC, adapter->eims_enable_mask);
1491 regval = rd32(E1000_EIAC);
1492 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
1493 }
1494
1495 wr32(E1000_IAM, 0);
1496 wr32(E1000_IMC, ~0);
1497 wrfl();
1498 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1499 int i;
1500
1501 for (i = 0; i < adapter->num_q_vectors; i++)
1502 synchronize_irq(adapter->msix_entries[i].vector);
1503 } else {
1504 synchronize_irq(adapter->pdev->irq);
1505 }
1506}
1507
1508/**
1509 * igb_irq_enable - Enable default interrupt generation settings
1510 * @adapter: board private structure
1511 **/
1512static void igb_irq_enable(struct igb_adapter *adapter)
1513{
1514 struct e1000_hw *hw = &adapter->hw;
1515
1516 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
1517 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
1518 u32 regval = rd32(E1000_EIAC);
1519
1520 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1521 regval = rd32(E1000_EIAM);
1522 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
1523 wr32(E1000_EIMS, adapter->eims_enable_mask);
1524 if (adapter->vfs_allocated_count) {
1525 wr32(E1000_MBVFIMR, 0xFF);
1526 ims |= E1000_IMS_VMMB;
1527 }
1528 wr32(E1000_IMS, ims);
1529 } else {
1530 wr32(E1000_IMS, IMS_ENABLE_MASK |
1531 E1000_IMS_DRSTA);
1532 wr32(E1000_IAM, IMS_ENABLE_MASK |
1533 E1000_IMS_DRSTA);
1534 }
1535}
1536
1537static void igb_update_mng_vlan(struct igb_adapter *adapter)
1538{
1539 struct e1000_hw *hw = &adapter->hw;
1540 u16 pf_id = adapter->vfs_allocated_count;
1541 u16 vid = adapter->hw.mng_cookie.vlan_id;
1542 u16 old_vid = adapter->mng_vlan_id;
1543
1544 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1545 /* add VID to filter table */
1546 igb_vfta_set(hw, vid, pf_id, true, true);
1547 adapter->mng_vlan_id = vid;
1548 } else {
1549 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1550 }
1551
1552 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1553 (vid != old_vid) &&
1554 !test_bit(old_vid, adapter->active_vlans)) {
1555 /* remove VID from filter table */
1556 igb_vfta_set(hw, vid, pf_id, false, true);
1557 }
1558}
1559
1560/**
1561 * igb_release_hw_control - release control of the h/w to f/w
1562 * @adapter: address of board private structure
1563 *
1564 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1565 * For ASF and Pass Through versions of f/w this means that the
1566 * driver is no longer loaded.
1567 **/
1568static void igb_release_hw_control(struct igb_adapter *adapter)
1569{
1570 struct e1000_hw *hw = &adapter->hw;
1571 u32 ctrl_ext;
1572
1573 /* Let firmware take over control of h/w */
1574 ctrl_ext = rd32(E1000_CTRL_EXT);
1575 wr32(E1000_CTRL_EXT,
1576 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1577}
1578
1579/**
1580 * igb_get_hw_control - get control of the h/w from f/w
1581 * @adapter: address of board private structure
1582 *
1583 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1584 * For ASF and Pass Through versions of f/w this means that
1585 * the driver is loaded.
1586 **/
1587static void igb_get_hw_control(struct igb_adapter *adapter)
1588{
1589 struct e1000_hw *hw = &adapter->hw;
1590 u32 ctrl_ext;
1591
1592 /* Let firmware know the driver has taken over */
1593 ctrl_ext = rd32(E1000_CTRL_EXT);
1594 wr32(E1000_CTRL_EXT,
1595 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1596}
1597
1598/**
1599 * igb_configure - configure the hardware for RX and TX
1600 * @adapter: private board structure
1601 **/
1602static void igb_configure(struct igb_adapter *adapter)
1603{
1604 struct net_device *netdev = adapter->netdev;
1605 int i;
1606
1607 igb_get_hw_control(adapter);
1608 igb_set_rx_mode(netdev);
1609
1610 igb_restore_vlan(adapter);
1611
1612 igb_setup_tctl(adapter);
1613 igb_setup_mrqc(adapter);
1614 igb_setup_rctl(adapter);
1615
1616 igb_nfc_filter_restore(adapter);
1617 igb_configure_tx(adapter);
1618 igb_configure_rx(adapter);
1619
1620 igb_rx_fifo_flush_82575(&adapter->hw);
1621
1622 /* call igb_desc_unused which always leaves
1623 * at least 1 descriptor unused to make sure
1624 * next_to_use != next_to_clean
1625 */
1626 for (i = 0; i < adapter->num_rx_queues; i++) {
1627 struct igb_ring *ring = adapter->rx_ring[i];
1628 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
1629 }
1630}
1631
1632/**
1633 * igb_power_up_link - Power up the phy/serdes link
1634 * @adapter: address of board private structure
1635 **/
1636void igb_power_up_link(struct igb_adapter *adapter)
1637{
1638 igb_reset_phy(&adapter->hw);
1639
1640 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1641 igb_power_up_phy_copper(&adapter->hw);
1642 else
1643 igb_power_up_serdes_link_82575(&adapter->hw);
1644
1645 igb_setup_link(&adapter->hw);
1646}
1647
1648/**
1649 * igb_power_down_link - Power down the phy/serdes link
1650 * @adapter: address of board private structure
1651 */
1652static void igb_power_down_link(struct igb_adapter *adapter)
1653{
1654 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1655 igb_power_down_phy_copper_82575(&adapter->hw);
1656 else
1657 igb_shutdown_serdes_link_82575(&adapter->hw);
1658}
1659
1660/**
1661 * Detect and switch function for Media Auto Sense
1662 * @adapter: address of the board private structure
1663 **/
1664static void igb_check_swap_media(struct igb_adapter *adapter)
1665{
1666 struct e1000_hw *hw = &adapter->hw;
1667 u32 ctrl_ext, connsw;
1668 bool swap_now = false;
1669
1670 ctrl_ext = rd32(E1000_CTRL_EXT);
1671 connsw = rd32(E1000_CONNSW);
1672
1673 /* need to live swap if current media is copper and we have fiber/serdes
1674 * to go to.
1675 */
1676
1677 if ((hw->phy.media_type == e1000_media_type_copper) &&
1678 (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
1679 swap_now = true;
1680 } else if (!(connsw & E1000_CONNSW_SERDESD)) {
1681 /* copper signal takes time to appear */
1682 if (adapter->copper_tries < 4) {
1683 adapter->copper_tries++;
1684 connsw |= E1000_CONNSW_AUTOSENSE_CONF;
1685 wr32(E1000_CONNSW, connsw);
1686 return;
1687 } else {
1688 adapter->copper_tries = 0;
1689 if ((connsw & E1000_CONNSW_PHYSD) &&
1690 (!(connsw & E1000_CONNSW_PHY_PDN))) {
1691 swap_now = true;
1692 connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
1693 wr32(E1000_CONNSW, connsw);
1694 }
1695 }
1696 }
1697
1698 if (!swap_now)
1699 return;
1700
1701 switch (hw->phy.media_type) {
1702 case e1000_media_type_copper:
1703 netdev_info(adapter->netdev,
1704 "MAS: changing media to fiber/serdes\n");
1705 ctrl_ext |=
1706 E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1707 adapter->flags |= IGB_FLAG_MEDIA_RESET;
1708 adapter->copper_tries = 0;
1709 break;
1710 case e1000_media_type_internal_serdes:
1711 case e1000_media_type_fiber:
1712 netdev_info(adapter->netdev,
1713 "MAS: changing media to copper\n");
1714 ctrl_ext &=
1715 ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
1716 adapter->flags |= IGB_FLAG_MEDIA_RESET;
1717 break;
1718 default:
1719 /* shouldn't get here during regular operation */
1720 netdev_err(adapter->netdev,
1721 "AMS: Invalid media type found, returning\n");
1722 break;
1723 }
1724 wr32(E1000_CTRL_EXT, ctrl_ext);
1725}
1726
1727/**
1728 * igb_up - Open the interface and prepare it to handle traffic
1729 * @adapter: board private structure
1730 **/
1731int igb_up(struct igb_adapter *adapter)
1732{
1733 struct e1000_hw *hw = &adapter->hw;
1734 int i;
1735
1736 /* hardware has been reset, we need to reload some things */
1737 igb_configure(adapter);
1738
1739 clear_bit(__IGB_DOWN, &adapter->state);
1740
1741 for (i = 0; i < adapter->num_q_vectors; i++)
1742 napi_enable(&(adapter->q_vector[i]->napi));
1743
1744 if (adapter->flags & IGB_FLAG_HAS_MSIX)
1745 igb_configure_msix(adapter);
1746 else
1747 igb_assign_vector(adapter->q_vector[0], 0);
1748
1749 /* Clear any pending interrupts. */
1750 rd32(E1000_ICR);
1751 igb_irq_enable(adapter);
1752
1753 /* notify VFs that reset has been completed */
1754 if (adapter->vfs_allocated_count) {
1755 u32 reg_data = rd32(E1000_CTRL_EXT);
1756
1757 reg_data |= E1000_CTRL_EXT_PFRSTD;
1758 wr32(E1000_CTRL_EXT, reg_data);
1759 }
1760
1761 netif_tx_start_all_queues(adapter->netdev);
1762
1763 /* start the watchdog. */
1764 hw->mac.get_link_status = 1;
1765 schedule_work(&adapter->watchdog_task);
1766
1767 if ((adapter->flags & IGB_FLAG_EEE) &&
1768 (!hw->dev_spec._82575.eee_disable))
1769 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
1770
1771 return 0;
1772}
1773
1774void igb_down(struct igb_adapter *adapter)
1775{
1776 struct net_device *netdev = adapter->netdev;
1777 struct e1000_hw *hw = &adapter->hw;
1778 u32 tctl, rctl;
1779 int i;
1780
1781 /* signal that we're down so the interrupt handler does not
1782 * reschedule our watchdog timer
1783 */
1784 set_bit(__IGB_DOWN, &adapter->state);
1785
1786 /* disable receives in the hardware */
1787 rctl = rd32(E1000_RCTL);
1788 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1789 /* flush and sleep below */
1790
1791 netif_carrier_off(netdev);
1792 netif_tx_stop_all_queues(netdev);
1793
1794 /* disable transmits in the hardware */
1795 tctl = rd32(E1000_TCTL);
1796 tctl &= ~E1000_TCTL_EN;
1797 wr32(E1000_TCTL, tctl);
1798 /* flush both disables and wait for them to finish */
1799 wrfl();
1800 usleep_range(10000, 11000);
1801
1802 igb_irq_disable(adapter);
1803
1804 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1805
1806 for (i = 0; i < adapter->num_q_vectors; i++) {
1807 if (adapter->q_vector[i]) {
1808 napi_synchronize(&adapter->q_vector[i]->napi);
1809 napi_disable(&adapter->q_vector[i]->napi);
1810 }
1811 }
1812
1813 del_timer_sync(&adapter->watchdog_timer);
1814 del_timer_sync(&adapter->phy_info_timer);
1815
1816 /* record the stats before reset*/
1817 spin_lock(&adapter->stats64_lock);
1818 igb_update_stats(adapter, &adapter->stats64);
1819 spin_unlock(&adapter->stats64_lock);
1820
1821 adapter->link_speed = 0;
1822 adapter->link_duplex = 0;
1823
1824 if (!pci_channel_offline(adapter->pdev))
1825 igb_reset(adapter);
1826
1827 /* clear VLAN promisc flag so VFTA will be updated if necessary */
1828 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
1829
1830 igb_clean_all_tx_rings(adapter);
1831 igb_clean_all_rx_rings(adapter);
1832#ifdef CONFIG_IGB_DCA
1833
1834 /* since we reset the hardware DCA settings were cleared */
1835 igb_setup_dca(adapter);
1836#endif
1837}
1838
1839void igb_reinit_locked(struct igb_adapter *adapter)
1840{
1841 WARN_ON(in_interrupt());
1842 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1843 usleep_range(1000, 2000);
1844 igb_down(adapter);
1845 igb_up(adapter);
1846 clear_bit(__IGB_RESETTING, &adapter->state);
1847}
1848
1849/** igb_enable_mas - Media Autosense re-enable after swap
1850 *
1851 * @adapter: adapter struct
1852 **/
1853static void igb_enable_mas(struct igb_adapter *adapter)
1854{
1855 struct e1000_hw *hw = &adapter->hw;
1856 u32 connsw = rd32(E1000_CONNSW);
1857
1858 /* configure for SerDes media detect */
1859 if ((hw->phy.media_type == e1000_media_type_copper) &&
1860 (!(connsw & E1000_CONNSW_SERDESD))) {
1861 connsw |= E1000_CONNSW_ENRGSRC;
1862 connsw |= E1000_CONNSW_AUTOSENSE_EN;
1863 wr32(E1000_CONNSW, connsw);
1864 wrfl();
1865 }
1866}
1867
1868void igb_reset(struct igb_adapter *adapter)
1869{
1870 struct pci_dev *pdev = adapter->pdev;
1871 struct e1000_hw *hw = &adapter->hw;
1872 struct e1000_mac_info *mac = &hw->mac;
1873 struct e1000_fc_info *fc = &hw->fc;
1874 u32 pba, hwm;
1875
1876 /* Repartition Pba for greater than 9k mtu
1877 * To take effect CTRL.RST is required.
1878 */
1879 switch (mac->type) {
1880 case e1000_i350:
1881 case e1000_i354:
1882 case e1000_82580:
1883 pba = rd32(E1000_RXPBS);
1884 pba = igb_rxpbs_adjust_82580(pba);
1885 break;
1886 case e1000_82576:
1887 pba = rd32(E1000_RXPBS);
1888 pba &= E1000_RXPBS_SIZE_MASK_82576;
1889 break;
1890 case e1000_82575:
1891 case e1000_i210:
1892 case e1000_i211:
1893 default:
1894 pba = E1000_PBA_34K;
1895 break;
1896 }
1897
1898 if (mac->type == e1000_82575) {
1899 u32 min_rx_space, min_tx_space, needed_tx_space;
1900
1901 /* write Rx PBA so that hardware can report correct Tx PBA */
1902 wr32(E1000_PBA, pba);
1903
1904 /* To maintain wire speed transmits, the Tx FIFO should be
1905 * large enough to accommodate two full transmit packets,
1906 * rounded up to the next 1KB and expressed in KB. Likewise,
1907 * the Rx FIFO should be large enough to accommodate at least
1908 * one full receive packet and is similarly rounded up and
1909 * expressed in KB.
1910 */
1911 min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
1912
1913 /* The Tx FIFO also stores 16 bytes of information about the Tx
1914 * but don't include Ethernet FCS because hardware appends it.
1915 * We only need to round down to the nearest 512 byte block
1916 * count since the value we care about is 2 frames, not 1.
1917 */
1918 min_tx_space = adapter->max_frame_size;
1919 min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
1920 min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
1921
1922 /* upper 16 bits has Tx packet buffer allocation size in KB */
1923 needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
1924
1925 /* If current Tx allocation is less than the min Tx FIFO size,
1926 * and the min Tx FIFO size is less than the current Rx FIFO
1927 * allocation, take space away from current Rx allocation.
1928 */
1929 if (needed_tx_space < pba) {
1930 pba -= needed_tx_space;
1931
1932 /* if short on Rx space, Rx wins and must trump Tx
1933 * adjustment
1934 */
1935 if (pba < min_rx_space)
1936 pba = min_rx_space;
1937 }
1938
1939 /* adjust PBA for jumbo frames */
1940 wr32(E1000_PBA, pba);
1941 }
1942
1943 /* flow control settings
1944 * The high water mark must be low enough to fit one full frame
1945 * after transmitting the pause frame. As such we must have enough
1946 * space to allow for us to complete our current transmit and then
1947 * receive the frame that is in progress from the link partner.
1948 * Set it to:
1949 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
1950 */
1951 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
1952
1953 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
1954 fc->low_water = fc->high_water - 16;
1955 fc->pause_time = 0xFFFF;
1956 fc->send_xon = 1;
1957 fc->current_mode = fc->requested_mode;
1958
1959 /* disable receive for all VFs and wait one second */
1960 if (adapter->vfs_allocated_count) {
1961 int i;
1962
1963 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1964 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1965
1966 /* ping all the active vfs to let them know we are going down */
1967 igb_ping_all_vfs(adapter);
1968
1969 /* disable transmits and receives */
1970 wr32(E1000_VFRE, 0);
1971 wr32(E1000_VFTE, 0);
1972 }
1973
1974 /* Allow time for pending master requests to run */
1975 hw->mac.ops.reset_hw(hw);
1976 wr32(E1000_WUC, 0);
1977
1978 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
1979 /* need to resetup here after media swap */
1980 adapter->ei.get_invariants(hw);
1981 adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
1982 }
1983 if ((mac->type == e1000_82575) &&
1984 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
1985 igb_enable_mas(adapter);
1986 }
1987 if (hw->mac.ops.init_hw(hw))
1988 dev_err(&pdev->dev, "Hardware Error\n");
1989
1990 /* Flow control settings reset on hardware reset, so guarantee flow
1991 * control is off when forcing speed.
1992 */
1993 if (!hw->mac.autoneg)
1994 igb_force_mac_fc(hw);
1995
1996 igb_init_dmac(adapter, pba);
1997#ifdef CONFIG_IGB_HWMON
1998 /* Re-initialize the thermal sensor on i350 devices. */
1999 if (!test_bit(__IGB_DOWN, &adapter->state)) {
2000 if (mac->type == e1000_i350 && hw->bus.func == 0) {
2001 /* If present, re-initialize the external thermal sensor
2002 * interface.
2003 */
2004 if (adapter->ets)
2005 mac->ops.init_thermal_sensor_thresh(hw);
2006 }
2007 }
2008#endif
2009 /* Re-establish EEE setting */
2010 if (hw->phy.media_type == e1000_media_type_copper) {
2011 switch (mac->type) {
2012 case e1000_i350:
2013 case e1000_i210:
2014 case e1000_i211:
2015 igb_set_eee_i350(hw, true, true);
2016 break;
2017 case e1000_i354:
2018 igb_set_eee_i354(hw, true, true);
2019 break;
2020 default:
2021 break;
2022 }
2023 }
2024 if (!netif_running(adapter->netdev))
2025 igb_power_down_link(adapter);
2026
2027 igb_update_mng_vlan(adapter);
2028
2029 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
2030 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
2031
2032 /* Re-enable PTP, where applicable. */
2033 if (adapter->ptp_flags & IGB_PTP_ENABLED)
2034 igb_ptp_reset(adapter);
2035
2036 igb_get_phy_info(hw);
2037}
2038
2039static netdev_features_t igb_fix_features(struct net_device *netdev,
2040 netdev_features_t features)
2041{
2042 /* Since there is no support for separate Rx/Tx vlan accel
2043 * enable/disable make sure Tx flag is always in same state as Rx.
2044 */
2045 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2046 features |= NETIF_F_HW_VLAN_CTAG_TX;
2047 else
2048 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2049
2050 return features;
2051}
2052
2053static int igb_set_features(struct net_device *netdev,
2054 netdev_features_t features)
2055{
2056 netdev_features_t changed = netdev->features ^ features;
2057 struct igb_adapter *adapter = netdev_priv(netdev);
2058
2059 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2060 igb_vlan_mode(netdev, features);
2061
2062 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
2063 return 0;
2064
2065 if (!(features & NETIF_F_NTUPLE)) {
2066 struct hlist_node *node2;
2067 struct igb_nfc_filter *rule;
2068
2069 spin_lock(&adapter->nfc_lock);
2070 hlist_for_each_entry_safe(rule, node2,
2071 &adapter->nfc_filter_list, nfc_node) {
2072 igb_erase_filter(adapter, rule);
2073 hlist_del(&rule->nfc_node);
2074 kfree(rule);
2075 }
2076 spin_unlock(&adapter->nfc_lock);
2077 adapter->nfc_filter_count = 0;
2078 }
2079
2080 netdev->features = features;
2081
2082 if (netif_running(netdev))
2083 igb_reinit_locked(adapter);
2084 else
2085 igb_reset(adapter);
2086
2087 return 0;
2088}
2089
2090static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2091 struct net_device *dev,
2092 const unsigned char *addr, u16 vid,
2093 u16 flags)
2094{
2095 /* guarantee we can provide a unique filter for the unicast address */
2096 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
2097 struct igb_adapter *adapter = netdev_priv(dev);
2098 struct e1000_hw *hw = &adapter->hw;
2099 int vfn = adapter->vfs_allocated_count;
2100 int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2101
2102 if (netdev_uc_count(dev) >= rar_entries)
2103 return -ENOMEM;
2104 }
2105
2106 return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
2107}
2108
2109#define IGB_MAX_MAC_HDR_LEN 127
2110#define IGB_MAX_NETWORK_HDR_LEN 511
2111
2112static netdev_features_t
2113igb_features_check(struct sk_buff *skb, struct net_device *dev,
2114 netdev_features_t features)
2115{
2116 unsigned int network_hdr_len, mac_hdr_len;
2117
2118 /* Make certain the headers can be described by a context descriptor */
2119 mac_hdr_len = skb_network_header(skb) - skb->data;
2120 if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
2121 return features & ~(NETIF_F_HW_CSUM |
2122 NETIF_F_SCTP_CRC |
2123 NETIF_F_HW_VLAN_CTAG_TX |
2124 NETIF_F_TSO |
2125 NETIF_F_TSO6);
2126
2127 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
2128 if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
2129 return features & ~(NETIF_F_HW_CSUM |
2130 NETIF_F_SCTP_CRC |
2131 NETIF_F_TSO |
2132 NETIF_F_TSO6);
2133
2134 /* We can only support IPV4 TSO in tunnels if we can mangle the
2135 * inner IP ID field, so strip TSO if MANGLEID is not supported.
2136 */
2137 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
2138 features &= ~NETIF_F_TSO;
2139
2140 return features;
2141}
2142
2143static const struct net_device_ops igb_netdev_ops = {
2144 .ndo_open = igb_open,
2145 .ndo_stop = igb_close,
2146 .ndo_start_xmit = igb_xmit_frame,
2147 .ndo_get_stats64 = igb_get_stats64,
2148 .ndo_set_rx_mode = igb_set_rx_mode,
2149 .ndo_set_mac_address = igb_set_mac,
2150 .ndo_change_mtu = igb_change_mtu,
2151 .ndo_do_ioctl = igb_ioctl,
2152 .ndo_tx_timeout = igb_tx_timeout,
2153 .ndo_validate_addr = eth_validate_addr,
2154 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
2155 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
2156 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
2157 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
2158 .ndo_set_vf_rate = igb_ndo_set_vf_bw,
2159 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2160 .ndo_get_vf_config = igb_ndo_get_vf_config,
2161#ifdef CONFIG_NET_POLL_CONTROLLER
2162 .ndo_poll_controller = igb_netpoll,
2163#endif
2164 .ndo_fix_features = igb_fix_features,
2165 .ndo_set_features = igb_set_features,
2166 .ndo_fdb_add = igb_ndo_fdb_add,
2167 .ndo_features_check = igb_features_check,
2168};
2169
2170/**
2171 * igb_set_fw_version - Configure version string for ethtool
2172 * @adapter: adapter struct
2173 **/
2174void igb_set_fw_version(struct igb_adapter *adapter)
2175{
2176 struct e1000_hw *hw = &adapter->hw;
2177 struct e1000_fw_version fw;
2178
2179 igb_get_fw_version(hw, &fw);
2180
2181 switch (hw->mac.type) {
2182 case e1000_i210:
2183 case e1000_i211:
2184 if (!(igb_get_flash_presence_i210(hw))) {
2185 snprintf(adapter->fw_version,
2186 sizeof(adapter->fw_version),
2187 "%2d.%2d-%d",
2188 fw.invm_major, fw.invm_minor,
2189 fw.invm_img_type);
2190 break;
2191 }
2192 /* fall through */
2193 default:
2194 /* if option is rom valid, display its version too */
2195 if (fw.or_valid) {
2196 snprintf(adapter->fw_version,
2197 sizeof(adapter->fw_version),
2198 "%d.%d, 0x%08x, %d.%d.%d",
2199 fw.eep_major, fw.eep_minor, fw.etrack_id,
2200 fw.or_major, fw.or_build, fw.or_patch);
2201 /* no option rom */
2202 } else if (fw.etrack_id != 0X0000) {
2203 snprintf(adapter->fw_version,
2204 sizeof(adapter->fw_version),
2205 "%d.%d, 0x%08x",
2206 fw.eep_major, fw.eep_minor, fw.etrack_id);
2207 } else {
2208 snprintf(adapter->fw_version,
2209 sizeof(adapter->fw_version),
2210 "%d.%d.%d",
2211 fw.eep_major, fw.eep_minor, fw.eep_build);
2212 }
2213 break;
2214 }
2215}
2216
2217/**
2218 * igb_init_mas - init Media Autosense feature if enabled in the NVM
2219 *
2220 * @adapter: adapter struct
2221 **/
2222static void igb_init_mas(struct igb_adapter *adapter)
2223{
2224 struct e1000_hw *hw = &adapter->hw;
2225 u16 eeprom_data;
2226
2227 hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
2228 switch (hw->bus.func) {
2229 case E1000_FUNC_0:
2230 if (eeprom_data & IGB_MAS_ENABLE_0) {
2231 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2232 netdev_info(adapter->netdev,
2233 "MAS: Enabling Media Autosense for port %d\n",
2234 hw->bus.func);
2235 }
2236 break;
2237 case E1000_FUNC_1:
2238 if (eeprom_data & IGB_MAS_ENABLE_1) {
2239 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2240 netdev_info(adapter->netdev,
2241 "MAS: Enabling Media Autosense for port %d\n",
2242 hw->bus.func);
2243 }
2244 break;
2245 case E1000_FUNC_2:
2246 if (eeprom_data & IGB_MAS_ENABLE_2) {
2247 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2248 netdev_info(adapter->netdev,
2249 "MAS: Enabling Media Autosense for port %d\n",
2250 hw->bus.func);
2251 }
2252 break;
2253 case E1000_FUNC_3:
2254 if (eeprom_data & IGB_MAS_ENABLE_3) {
2255 adapter->flags |= IGB_FLAG_MAS_ENABLE;
2256 netdev_info(adapter->netdev,
2257 "MAS: Enabling Media Autosense for port %d\n",
2258 hw->bus.func);
2259 }
2260 break;
2261 default:
2262 /* Shouldn't get here */
2263 netdev_err(adapter->netdev,
2264 "MAS: Invalid port configuration, returning\n");
2265 break;
2266 }
2267}
2268
2269/**
2270 * igb_init_i2c - Init I2C interface
2271 * @adapter: pointer to adapter structure
2272 **/
2273static s32 igb_init_i2c(struct igb_adapter *adapter)
2274{
2275 s32 status = 0;
2276
2277 /* I2C interface supported on i350 devices */
2278 if (adapter->hw.mac.type != e1000_i350)
2279 return 0;
2280
2281 /* Initialize the i2c bus which is controlled by the registers.
2282 * This bus will use the i2c_algo_bit structue that implements
2283 * the protocol through toggling of the 4 bits in the register.
2284 */
2285 adapter->i2c_adap.owner = THIS_MODULE;
2286 adapter->i2c_algo = igb_i2c_algo;
2287 adapter->i2c_algo.data = adapter;
2288 adapter->i2c_adap.algo_data = &adapter->i2c_algo;
2289 adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
2290 strlcpy(adapter->i2c_adap.name, "igb BB",
2291 sizeof(adapter->i2c_adap.name));
2292 status = i2c_bit_add_bus(&adapter->i2c_adap);
2293 return status;
2294}
2295
2296/**
2297 * igb_probe - Device Initialization Routine
2298 * @pdev: PCI device information struct
2299 * @ent: entry in igb_pci_tbl
2300 *
2301 * Returns 0 on success, negative on failure
2302 *
2303 * igb_probe initializes an adapter identified by a pci_dev structure.
2304 * The OS initialization, configuring of the adapter private structure,
2305 * and a hardware reset occur.
2306 **/
2307static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2308{
2309 struct net_device *netdev;
2310 struct igb_adapter *adapter;
2311 struct e1000_hw *hw;
2312 u16 eeprom_data = 0;
2313 s32 ret_val;
2314 static int global_quad_port_a; /* global quad port a indication */
2315 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
2316 int err, pci_using_dac;
2317 u8 part_str[E1000_PBANUM_LENGTH];
2318
2319 /* Catch broken hardware that put the wrong VF device ID in
2320 * the PCIe SR-IOV capability.
2321 */
2322 if (pdev->is_virtfn) {
2323 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
2324 pci_name(pdev), pdev->vendor, pdev->device);
2325 return -EINVAL;
2326 }
2327
2328 err = pci_enable_device_mem(pdev);
2329 if (err)
2330 return err;
2331
2332 pci_using_dac = 0;
2333 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2334 if (!err) {
2335 pci_using_dac = 1;
2336 } else {
2337 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2338 if (err) {
2339 dev_err(&pdev->dev,
2340 "No usable DMA configuration, aborting\n");
2341 goto err_dma;
2342 }
2343 }
2344
2345 err = pci_request_mem_regions(pdev, igb_driver_name);
2346 if (err)
2347 goto err_pci_reg;
2348
2349 pci_enable_pcie_error_reporting(pdev);
2350
2351 pci_set_master(pdev);
2352 pci_save_state(pdev);
2353
2354 err = -ENOMEM;
2355 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
2356 IGB_MAX_TX_QUEUES);
2357 if (!netdev)
2358 goto err_alloc_etherdev;
2359
2360 SET_NETDEV_DEV(netdev, &pdev->dev);
2361
2362 pci_set_drvdata(pdev, netdev);
2363 adapter = netdev_priv(netdev);
2364 adapter->netdev = netdev;
2365 adapter->pdev = pdev;
2366 hw = &adapter->hw;
2367 hw->back = adapter;
2368 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2369
2370 err = -EIO;
2371 adapter->io_addr = pci_iomap(pdev, 0, 0);
2372 if (!adapter->io_addr)
2373 goto err_ioremap;
2374 /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
2375 hw->hw_addr = adapter->io_addr;
2376
2377 netdev->netdev_ops = &igb_netdev_ops;
2378 igb_set_ethtool_ops(netdev);
2379 netdev->watchdog_timeo = 5 * HZ;
2380
2381 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2382
2383 netdev->mem_start = pci_resource_start(pdev, 0);
2384 netdev->mem_end = pci_resource_end(pdev, 0);
2385
2386 /* PCI config space info */
2387 hw->vendor_id = pdev->vendor;
2388 hw->device_id = pdev->device;
2389 hw->revision_id = pdev->revision;
2390 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2391 hw->subsystem_device_id = pdev->subsystem_device;
2392
2393 /* Copy the default MAC, PHY and NVM function pointers */
2394 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
2395 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
2396 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
2397 /* Initialize skew-specific constants */
2398 err = ei->get_invariants(hw);
2399 if (err)
2400 goto err_sw_init;
2401
2402 /* setup the private structure */
2403 err = igb_sw_init(adapter);
2404 if (err)
2405 goto err_sw_init;
2406
2407 igb_get_bus_info_pcie(hw);
2408
2409 hw->phy.autoneg_wait_to_complete = false;
2410
2411 /* Copper options */
2412 if (hw->phy.media_type == e1000_media_type_copper) {
2413 hw->phy.mdix = AUTO_ALL_MODES;
2414 hw->phy.disable_polarity_correction = false;
2415 hw->phy.ms_type = e1000_ms_hw_default;
2416 }
2417
2418 if (igb_check_reset_block(hw))
2419 dev_info(&pdev->dev,
2420 "PHY reset is blocked due to SOL/IDER session.\n");
2421
2422 /* features is initialized to 0 in allocation, it might have bits
2423 * set by igb_sw_init so we should use an or instead of an
2424 * assignment.
2425 */
2426 netdev->features |= NETIF_F_SG |
2427 NETIF_F_TSO |
2428 NETIF_F_TSO6 |
2429 NETIF_F_RXHASH |
2430 NETIF_F_RXCSUM |
2431 NETIF_F_HW_CSUM;
2432
2433 if (hw->mac.type >= e1000_82576)
2434 netdev->features |= NETIF_F_SCTP_CRC;
2435
2436#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
2437 NETIF_F_GSO_GRE_CSUM | \
2438 NETIF_F_GSO_IPXIP4 | \
2439 NETIF_F_GSO_IPXIP6 | \
2440 NETIF_F_GSO_UDP_TUNNEL | \
2441 NETIF_F_GSO_UDP_TUNNEL_CSUM)
2442
2443 netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
2444 netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
2445
2446 /* copy netdev features into list of user selectable features */
2447 netdev->hw_features |= netdev->features |
2448 NETIF_F_HW_VLAN_CTAG_RX |
2449 NETIF_F_HW_VLAN_CTAG_TX |
2450 NETIF_F_RXALL;
2451
2452 if (hw->mac.type >= e1000_i350)
2453 netdev->hw_features |= NETIF_F_NTUPLE;
2454
2455 if (pci_using_dac)
2456 netdev->features |= NETIF_F_HIGHDMA;
2457
2458 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
2459 netdev->mpls_features |= NETIF_F_HW_CSUM;
2460 netdev->hw_enc_features |= netdev->vlan_features;
2461
2462 /* set this bit last since it cannot be part of vlan_features */
2463 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
2464 NETIF_F_HW_VLAN_CTAG_RX |
2465 NETIF_F_HW_VLAN_CTAG_TX;
2466
2467 netdev->priv_flags |= IFF_SUPP_NOFCS;
2468
2469 netdev->priv_flags |= IFF_UNICAST_FLT;
2470
2471 /* MTU range: 68 - 9216 */
2472 netdev->min_mtu = ETH_MIN_MTU;
2473 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
2474
2475 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2476
2477 /* before reading the NVM, reset the controller to put the device in a
2478 * known good starting state
2479 */
2480 hw->mac.ops.reset_hw(hw);
2481
2482 /* make sure the NVM is good , i211/i210 parts can have special NVM
2483 * that doesn't contain a checksum
2484 */
2485 switch (hw->mac.type) {
2486 case e1000_i210:
2487 case e1000_i211:
2488 if (igb_get_flash_presence_i210(hw)) {
2489 if (hw->nvm.ops.validate(hw) < 0) {
2490 dev_err(&pdev->dev,
2491 "The NVM Checksum Is Not Valid\n");
2492 err = -EIO;
2493 goto err_eeprom;
2494 }
2495 }
2496 break;
2497 default:
2498 if (hw->nvm.ops.validate(hw) < 0) {
2499 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2500 err = -EIO;
2501 goto err_eeprom;
2502 }
2503 break;
2504 }
2505
2506 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
2507 /* copy the MAC address out of the NVM */
2508 if (hw->mac.ops.read_mac_addr(hw))
2509 dev_err(&pdev->dev, "NVM Read Error\n");
2510 }
2511
2512 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2513
2514 if (!is_valid_ether_addr(netdev->dev_addr)) {
2515 dev_err(&pdev->dev, "Invalid MAC Address\n");
2516 err = -EIO;
2517 goto err_eeprom;
2518 }
2519
2520 /* get firmware version for ethtool -i */
2521 igb_set_fw_version(adapter);
2522
2523 /* configure RXPBSIZE and TXPBSIZE */
2524 if (hw->mac.type == e1000_i210) {
2525 wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
2526 wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
2527 }
2528
2529 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2530 (unsigned long) adapter);
2531 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2532 (unsigned long) adapter);
2533
2534 INIT_WORK(&adapter->reset_task, igb_reset_task);
2535 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2536
2537 /* Initialize link properties that are user-changeable */
2538 adapter->fc_autoneg = true;
2539 hw->mac.autoneg = true;
2540 hw->phy.autoneg_advertised = 0x2f;
2541
2542 hw->fc.requested_mode = e1000_fc_default;
2543 hw->fc.current_mode = e1000_fc_default;
2544
2545 igb_validate_mdi_setting(hw);
2546
2547 /* By default, support wake on port A */
2548 if (hw->bus.func == 0)
2549 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2550
2551 /* Check the NVM for wake support on non-port A ports */
2552 if (hw->mac.type >= e1000_82580)
2553 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2554 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2555 &eeprom_data);
2556 else if (hw->bus.func == 1)
2557 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2558
2559 if (eeprom_data & IGB_EEPROM_APME)
2560 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2561
2562 /* now that we have the eeprom settings, apply the special cases where
2563 * the eeprom may be wrong or the board simply won't support wake on
2564 * lan on a particular port
2565 */
2566 switch (pdev->device) {
2567 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2568 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2569 break;
2570 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2571 case E1000_DEV_ID_82576_FIBER:
2572 case E1000_DEV_ID_82576_SERDES:
2573 /* Wake events only supported on port A for dual fiber
2574 * regardless of eeprom setting
2575 */
2576 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2577 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2578 break;
2579 case E1000_DEV_ID_82576_QUAD_COPPER:
2580 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
2581 /* if quad port adapter, disable WoL on all but port A */
2582 if (global_quad_port_a != 0)
2583 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2584 else
2585 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2586 /* Reset for multiple quad port adapters */
2587 if (++global_quad_port_a == 4)
2588 global_quad_port_a = 0;
2589 break;
2590 default:
2591 /* If the device can't wake, don't set software support */
2592 if (!device_can_wakeup(&adapter->pdev->dev))
2593 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2594 }
2595
2596 /* initialize the wol settings based on the eeprom settings */
2597 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2598 adapter->wol |= E1000_WUFC_MAG;
2599
2600 /* Some vendors want WoL disabled by default, but still supported */
2601 if ((hw->mac.type == e1000_i350) &&
2602 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2603 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2604 adapter->wol = 0;
2605 }
2606
2607 /* Some vendors want the ability to Use the EEPROM setting as
2608 * enable/disable only, and not for capability
2609 */
2610 if (((hw->mac.type == e1000_i350) ||
2611 (hw->mac.type == e1000_i354)) &&
2612 (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
2613 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2614 adapter->wol = 0;
2615 }
2616 if (hw->mac.type == e1000_i350) {
2617 if (((pdev->subsystem_device == 0x5001) ||
2618 (pdev->subsystem_device == 0x5002)) &&
2619 (hw->bus.func == 0)) {
2620 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2621 adapter->wol = 0;
2622 }
2623 if (pdev->subsystem_device == 0x1F52)
2624 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2625 }
2626
2627 device_set_wakeup_enable(&adapter->pdev->dev,
2628 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
2629
2630 /* reset the hardware with the new settings */
2631 igb_reset(adapter);
2632
2633 /* Init the I2C interface */
2634 err = igb_init_i2c(adapter);
2635 if (err) {
2636 dev_err(&pdev->dev, "failed to init i2c interface\n");
2637 goto err_eeprom;
2638 }
2639
2640 /* let the f/w know that the h/w is now under the control of the
2641 * driver.
2642 */
2643 igb_get_hw_control(adapter);
2644
2645 strcpy(netdev->name, "eth%d");
2646 err = register_netdev(netdev);
2647 if (err)
2648 goto err_register;
2649
2650 /* carrier off reporting is important to ethtool even BEFORE open */
2651 netif_carrier_off(netdev);
2652
2653#ifdef CONFIG_IGB_DCA
2654 if (dca_add_requester(&pdev->dev) == 0) {
2655 adapter->flags |= IGB_FLAG_DCA_ENABLED;
2656 dev_info(&pdev->dev, "DCA enabled\n");
2657 igb_setup_dca(adapter);
2658 }
2659
2660#endif
2661#ifdef CONFIG_IGB_HWMON
2662 /* Initialize the thermal sensor on i350 devices. */
2663 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2664 u16 ets_word;
2665
2666 /* Read the NVM to determine if this i350 device supports an
2667 * external thermal sensor.
2668 */
2669 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
2670 if (ets_word != 0x0000 && ets_word != 0xFFFF)
2671 adapter->ets = true;
2672 else
2673 adapter->ets = false;
2674 if (igb_sysfs_init(adapter))
2675 dev_err(&pdev->dev,
2676 "failed to allocate sysfs resources\n");
2677 } else {
2678 adapter->ets = false;
2679 }
2680#endif
2681 /* Check if Media Autosense is enabled */
2682 adapter->ei = *ei;
2683 if (hw->dev_spec._82575.mas_capable)
2684 igb_init_mas(adapter);
2685
2686 /* do hw tstamp init after resetting */
2687 igb_ptp_init(adapter);
2688
2689 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2690 /* print bus type/speed/width info, not applicable to i354 */
2691 if (hw->mac.type != e1000_i354) {
2692 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
2693 netdev->name,
2694 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2695 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2696 "unknown"),
2697 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
2698 "Width x4" :
2699 (hw->bus.width == e1000_bus_width_pcie_x2) ?
2700 "Width x2" :
2701 (hw->bus.width == e1000_bus_width_pcie_x1) ?
2702 "Width x1" : "unknown"), netdev->dev_addr);
2703 }
2704
2705 if ((hw->mac.type >= e1000_i210 ||
2706 igb_get_flash_presence_i210(hw))) {
2707 ret_val = igb_read_part_string(hw, part_str,
2708 E1000_PBANUM_LENGTH);
2709 } else {
2710 ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
2711 }
2712
2713 if (ret_val)
2714 strcpy(part_str, "Unknown");
2715 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2716 dev_info(&pdev->dev,
2717 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2718 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
2719 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2720 adapter->num_rx_queues, adapter->num_tx_queues);
2721 if (hw->phy.media_type == e1000_media_type_copper) {
2722 switch (hw->mac.type) {
2723 case e1000_i350:
2724 case e1000_i210:
2725 case e1000_i211:
2726 /* Enable EEE for internal copper PHY devices */
2727 err = igb_set_eee_i350(hw, true, true);
2728 if ((!err) &&
2729 (!hw->dev_spec._82575.eee_disable)) {
2730 adapter->eee_advert =
2731 MDIO_EEE_100TX | MDIO_EEE_1000T;
2732 adapter->flags |= IGB_FLAG_EEE;
2733 }
2734 break;
2735 case e1000_i354:
2736 if ((rd32(E1000_CTRL_EXT) &
2737 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
2738 err = igb_set_eee_i354(hw, true, true);
2739 if ((!err) &&
2740 (!hw->dev_spec._82575.eee_disable)) {
2741 adapter->eee_advert =
2742 MDIO_EEE_100TX | MDIO_EEE_1000T;
2743 adapter->flags |= IGB_FLAG_EEE;
2744 }
2745 }
2746 break;
2747 default:
2748 break;
2749 }
2750 }
2751 pm_runtime_put_noidle(&pdev->dev);
2752 return 0;
2753
2754err_register:
2755 igb_release_hw_control(adapter);
2756 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
2757err_eeprom:
2758 if (!igb_check_reset_block(hw))
2759 igb_reset_phy(hw);
2760
2761 if (hw->flash_address)
2762 iounmap(hw->flash_address);
2763err_sw_init:
2764 kfree(adapter->shadow_vfta);
2765 igb_clear_interrupt_scheme(adapter);
2766#ifdef CONFIG_PCI_IOV
2767 igb_disable_sriov(pdev);
2768#endif
2769 pci_iounmap(pdev, adapter->io_addr);
2770err_ioremap:
2771 free_netdev(netdev);
2772err_alloc_etherdev:
2773 pci_release_mem_regions(pdev);
2774err_pci_reg:
2775err_dma:
2776 pci_disable_device(pdev);
2777 return err;
2778}
2779
2780#ifdef CONFIG_PCI_IOV
2781static int igb_disable_sriov(struct pci_dev *pdev)
2782{
2783 struct net_device *netdev = pci_get_drvdata(pdev);
2784 struct igb_adapter *adapter = netdev_priv(netdev);
2785 struct e1000_hw *hw = &adapter->hw;
2786
2787 /* reclaim resources allocated to VFs */
2788 if (adapter->vf_data) {
2789 /* disable iov and allow time for transactions to clear */
2790 if (pci_vfs_assigned(pdev)) {
2791 dev_warn(&pdev->dev,
2792 "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
2793 return -EPERM;
2794 } else {
2795 pci_disable_sriov(pdev);
2796 msleep(500);
2797 }
2798
2799 kfree(adapter->vf_data);
2800 adapter->vf_data = NULL;
2801 adapter->vfs_allocated_count = 0;
2802 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
2803 wrfl();
2804 msleep(100);
2805 dev_info(&pdev->dev, "IOV Disabled\n");
2806
2807 /* Re-enable DMA Coalescing flag since IOV is turned off */
2808 adapter->flags |= IGB_FLAG_DMAC;
2809 }
2810
2811 return 0;
2812}
2813
2814static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
2815{
2816 struct net_device *netdev = pci_get_drvdata(pdev);
2817 struct igb_adapter *adapter = netdev_priv(netdev);
2818 int old_vfs = pci_num_vf(pdev);
2819 int err = 0;
2820 int i;
2821
2822 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
2823 err = -EPERM;
2824 goto out;
2825 }
2826 if (!num_vfs)
2827 goto out;
2828
2829 if (old_vfs) {
2830 dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
2831 old_vfs, max_vfs);
2832 adapter->vfs_allocated_count = old_vfs;
2833 } else
2834 adapter->vfs_allocated_count = num_vfs;
2835
2836 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2837 sizeof(struct vf_data_storage), GFP_KERNEL);
2838
2839 /* if allocation failed then we do not support SR-IOV */
2840 if (!adapter->vf_data) {
2841 adapter->vfs_allocated_count = 0;
2842 dev_err(&pdev->dev,
2843 "Unable to allocate memory for VF Data Storage\n");
2844 err = -ENOMEM;
2845 goto out;
2846 }
2847
2848 /* only call pci_enable_sriov() if no VFs are allocated already */
2849 if (!old_vfs) {
2850 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
2851 if (err)
2852 goto err_out;
2853 }
2854 dev_info(&pdev->dev, "%d VFs allocated\n",
2855 adapter->vfs_allocated_count);
2856 for (i = 0; i < adapter->vfs_allocated_count; i++)
2857 igb_vf_configure(adapter, i);
2858
2859 /* DMA Coalescing is not supported in IOV mode. */
2860 adapter->flags &= ~IGB_FLAG_DMAC;
2861 goto out;
2862
2863err_out:
2864 kfree(adapter->vf_data);
2865 adapter->vf_data = NULL;
2866 adapter->vfs_allocated_count = 0;
2867out:
2868 return err;
2869}
2870
2871#endif
2872/**
2873 * igb_remove_i2c - Cleanup I2C interface
2874 * @adapter: pointer to adapter structure
2875 **/
2876static void igb_remove_i2c(struct igb_adapter *adapter)
2877{
2878 /* free the adapter bus structure */
2879 i2c_del_adapter(&adapter->i2c_adap);
2880}
2881
2882/**
2883 * igb_remove - Device Removal Routine
2884 * @pdev: PCI device information struct
2885 *
2886 * igb_remove is called by the PCI subsystem to alert the driver
2887 * that it should release a PCI device. The could be caused by a
2888 * Hot-Plug event, or because the driver is going to be removed from
2889 * memory.
2890 **/
2891static void igb_remove(struct pci_dev *pdev)
2892{
2893 struct net_device *netdev = pci_get_drvdata(pdev);
2894 struct igb_adapter *adapter = netdev_priv(netdev);
2895 struct e1000_hw *hw = &adapter->hw;
2896
2897 pm_runtime_get_noresume(&pdev->dev);
2898#ifdef CONFIG_IGB_HWMON
2899 igb_sysfs_exit(adapter);
2900#endif
2901 igb_remove_i2c(adapter);
2902 igb_ptp_stop(adapter);
2903 /* The watchdog timer may be rescheduled, so explicitly
2904 * disable watchdog from being rescheduled.
2905 */
2906 set_bit(__IGB_DOWN, &adapter->state);
2907 del_timer_sync(&adapter->watchdog_timer);
2908 del_timer_sync(&adapter->phy_info_timer);
2909
2910 cancel_work_sync(&adapter->reset_task);
2911 cancel_work_sync(&adapter->watchdog_task);
2912
2913#ifdef CONFIG_IGB_DCA
2914 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
2915 dev_info(&pdev->dev, "DCA disabled\n");
2916 dca_remove_requester(&pdev->dev);
2917 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
2918 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
2919 }
2920#endif
2921
2922 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2923 * would have already happened in close and is redundant.
2924 */
2925 igb_release_hw_control(adapter);
2926
2927#ifdef CONFIG_PCI_IOV
2928 igb_disable_sriov(pdev);
2929#endif
2930
2931 unregister_netdev(netdev);
2932
2933 igb_clear_interrupt_scheme(adapter);
2934
2935 pci_iounmap(pdev, adapter->io_addr);
2936 if (hw->flash_address)
2937 iounmap(hw->flash_address);
2938 pci_release_mem_regions(pdev);
2939
2940 kfree(adapter->shadow_vfta);
2941 free_netdev(netdev);
2942
2943 pci_disable_pcie_error_reporting(pdev);
2944
2945 pci_disable_device(pdev);
2946}
2947
2948/**
2949 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2950 * @adapter: board private structure to initialize
2951 *
2952 * This function initializes the vf specific data storage and then attempts to
2953 * allocate the VFs. The reason for ordering it this way is because it is much
2954 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2955 * the memory for the VFs.
2956 **/
2957static void igb_probe_vfs(struct igb_adapter *adapter)
2958{
2959#ifdef CONFIG_PCI_IOV
2960 struct pci_dev *pdev = adapter->pdev;
2961 struct e1000_hw *hw = &adapter->hw;
2962
2963 /* Virtualization features not supported on i210 family. */
2964 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2965 return;
2966
2967 /* Of the below we really only want the effect of getting
2968 * IGB_FLAG_HAS_MSIX set (if available), without which
2969 * igb_enable_sriov() has no effect.
2970 */
2971 igb_set_interrupt_capability(adapter, true);
2972 igb_reset_interrupt_capability(adapter);
2973
2974 pci_sriov_set_totalvfs(pdev, 7);
2975 igb_enable_sriov(pdev, max_vfs);
2976
2977#endif /* CONFIG_PCI_IOV */
2978}
2979
2980static void igb_init_queue_configuration(struct igb_adapter *adapter)
2981{
2982 struct e1000_hw *hw = &adapter->hw;
2983 u32 max_rss_queues;
2984
2985 /* Determine the maximum number of RSS queues supported. */
2986 switch (hw->mac.type) {
2987 case e1000_i211:
2988 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2989 break;
2990 case e1000_82575:
2991 case e1000_i210:
2992 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2993 break;
2994 case e1000_i350:
2995 /* I350 cannot do RSS and SR-IOV at the same time */
2996 if (!!adapter->vfs_allocated_count) {
2997 max_rss_queues = 1;
2998 break;
2999 }
3000 /* fall through */
3001 case e1000_82576:
3002 if (!!adapter->vfs_allocated_count) {
3003 max_rss_queues = 2;
3004 break;
3005 }
3006 /* fall through */
3007 case e1000_82580:
3008 case e1000_i354:
3009 default:
3010 max_rss_queues = IGB_MAX_RX_QUEUES;
3011 break;
3012 }
3013
3014 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3015
3016 igb_set_flag_queue_pairs(adapter, max_rss_queues);
3017}
3018
3019void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
3020 const u32 max_rss_queues)
3021{
3022 struct e1000_hw *hw = &adapter->hw;
3023
3024 /* Determine if we need to pair queues. */
3025 switch (hw->mac.type) {
3026 case e1000_82575:
3027 case e1000_i211:
3028 /* Device supports enough interrupts without queue pairing. */
3029 break;
3030 case e1000_82576:
3031 case e1000_82580:
3032 case e1000_i350:
3033 case e1000_i354:
3034 case e1000_i210:
3035 default:
3036 /* If rss_queues > half of max_rss_queues, pair the queues in
3037 * order to conserve interrupts due to limited supply.
3038 */
3039 if (adapter->rss_queues > (max_rss_queues / 2))
3040 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
3041 else
3042 adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
3043 break;
3044 }
3045}
3046
3047/**
3048 * igb_sw_init - Initialize general software structures (struct igb_adapter)
3049 * @adapter: board private structure to initialize
3050 *
3051 * igb_sw_init initializes the Adapter private data structure.
3052 * Fields are initialized based on PCI device information and
3053 * OS network device settings (MTU size).
3054 **/
3055static int igb_sw_init(struct igb_adapter *adapter)
3056{
3057 struct e1000_hw *hw = &adapter->hw;
3058 struct net_device *netdev = adapter->netdev;
3059 struct pci_dev *pdev = adapter->pdev;
3060
3061 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3062
3063 /* set default ring sizes */
3064 adapter->tx_ring_count = IGB_DEFAULT_TXD;
3065 adapter->rx_ring_count = IGB_DEFAULT_RXD;
3066
3067 /* set default ITR values */
3068 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
3069 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
3070
3071 /* set default work limits */
3072 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
3073
3074 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3075 VLAN_HLEN;
3076 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3077
3078 spin_lock_init(&adapter->nfc_lock);
3079 spin_lock_init(&adapter->stats64_lock);
3080#ifdef CONFIG_PCI_IOV
3081 switch (hw->mac.type) {
3082 case e1000_82576:
3083 case e1000_i350:
3084 if (max_vfs > 7) {
3085 dev_warn(&pdev->dev,
3086 "Maximum of 7 VFs per PF, using max\n");
3087 max_vfs = adapter->vfs_allocated_count = 7;
3088 } else
3089 adapter->vfs_allocated_count = max_vfs;
3090 if (adapter->vfs_allocated_count)
3091 dev_warn(&pdev->dev,
3092 "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
3093 break;
3094 default:
3095 break;
3096 }
3097#endif /* CONFIG_PCI_IOV */
3098
3099 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3100 adapter->flags |= IGB_FLAG_HAS_MSIX;
3101
3102 igb_probe_vfs(adapter);
3103
3104 igb_init_queue_configuration(adapter);
3105
3106 /* Setup and initialize a copy of the hw vlan table array */
3107 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
3108 GFP_ATOMIC);
3109
3110 /* This call may decrease the number of queues */
3111 if (igb_init_interrupt_scheme(adapter, true)) {
3112 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
3113 return -ENOMEM;
3114 }
3115
3116 /* Explicitly disable IRQ since the NIC can be in any state. */
3117 igb_irq_disable(adapter);
3118
3119 if (hw->mac.type >= e1000_i350)
3120 adapter->flags &= ~IGB_FLAG_DMAC;
3121
3122 set_bit(__IGB_DOWN, &adapter->state);
3123 return 0;
3124}
3125
3126/**
3127 * igb_open - Called when a network interface is made active
3128 * @netdev: network interface device structure
3129 *
3130 * Returns 0 on success, negative value on failure
3131 *
3132 * The open entry point is called when a network interface is made
3133 * active by the system (IFF_UP). At this point all resources needed
3134 * for transmit and receive operations are allocated, the interrupt
3135 * handler is registered with the OS, the watchdog timer is started,
3136 * and the stack is notified that the interface is ready.
3137 **/
3138static int __igb_open(struct net_device *netdev, bool resuming)
3139{
3140 struct igb_adapter *adapter = netdev_priv(netdev);
3141 struct e1000_hw *hw = &adapter->hw;
3142 struct pci_dev *pdev = adapter->pdev;
3143 int err;
3144 int i;
3145
3146 /* disallow open during test */
3147 if (test_bit(__IGB_TESTING, &adapter->state)) {
3148 WARN_ON(resuming);
3149 return -EBUSY;
3150 }
3151
3152 if (!resuming)
3153 pm_runtime_get_sync(&pdev->dev);
3154
3155 netif_carrier_off(netdev);
3156
3157 /* allocate transmit descriptors */
3158 err = igb_setup_all_tx_resources(adapter);
3159 if (err)
3160 goto err_setup_tx;
3161
3162 /* allocate receive descriptors */
3163 err = igb_setup_all_rx_resources(adapter);
3164 if (err)
3165 goto err_setup_rx;
3166
3167 igb_power_up_link(adapter);
3168
3169 /* before we allocate an interrupt, we must be ready to handle it.
3170 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3171 * as soon as we call pci_request_irq, so we have to setup our
3172 * clean_rx handler before we do so.
3173 */
3174 igb_configure(adapter);
3175
3176 err = igb_request_irq(adapter);
3177 if (err)
3178 goto err_req_irq;
3179
3180 /* Notify the stack of the actual queue counts. */
3181 err = netif_set_real_num_tx_queues(adapter->netdev,
3182 adapter->num_tx_queues);
3183 if (err)
3184 goto err_set_queues;
3185
3186 err = netif_set_real_num_rx_queues(adapter->netdev,
3187 adapter->num_rx_queues);
3188 if (err)
3189 goto err_set_queues;
3190
3191 /* From here on the code is the same as igb_up() */
3192 clear_bit(__IGB_DOWN, &adapter->state);
3193
3194 for (i = 0; i < adapter->num_q_vectors; i++)
3195 napi_enable(&(adapter->q_vector[i]->napi));
3196
3197 /* Clear any pending interrupts. */
3198 rd32(E1000_ICR);
3199
3200 igb_irq_enable(adapter);
3201
3202 /* notify VFs that reset has been completed */
3203 if (adapter->vfs_allocated_count) {
3204 u32 reg_data = rd32(E1000_CTRL_EXT);
3205
3206 reg_data |= E1000_CTRL_EXT_PFRSTD;
3207 wr32(E1000_CTRL_EXT, reg_data);
3208 }
3209
3210 netif_tx_start_all_queues(netdev);
3211
3212 if (!resuming)
3213 pm_runtime_put(&pdev->dev);
3214
3215 /* start the watchdog. */
3216 hw->mac.get_link_status = 1;
3217 schedule_work(&adapter->watchdog_task);
3218
3219 return 0;
3220
3221err_set_queues:
3222 igb_free_irq(adapter);
3223err_req_irq:
3224 igb_release_hw_control(adapter);
3225 igb_power_down_link(adapter);
3226 igb_free_all_rx_resources(adapter);
3227err_setup_rx:
3228 igb_free_all_tx_resources(adapter);
3229err_setup_tx:
3230 igb_reset(adapter);
3231 if (!resuming)
3232 pm_runtime_put(&pdev->dev);
3233
3234 return err;
3235}
3236
3237int igb_open(struct net_device *netdev)
3238{
3239 return __igb_open(netdev, false);
3240}
3241
3242/**
3243 * igb_close - Disables a network interface
3244 * @netdev: network interface device structure
3245 *
3246 * Returns 0, this is not allowed to fail
3247 *
3248 * The close entry point is called when an interface is de-activated
3249 * by the OS. The hardware is still under the driver's control, but
3250 * needs to be disabled. A global MAC reset is issued to stop the
3251 * hardware, and all transmit and receive resources are freed.
3252 **/
3253static int __igb_close(struct net_device *netdev, bool suspending)
3254{
3255 struct igb_adapter *adapter = netdev_priv(netdev);
3256 struct pci_dev *pdev = adapter->pdev;
3257
3258 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
3259
3260 if (!suspending)
3261 pm_runtime_get_sync(&pdev->dev);
3262
3263 igb_down(adapter);
3264 igb_free_irq(adapter);
3265
3266 igb_nfc_filter_exit(adapter);
3267
3268 igb_free_all_tx_resources(adapter);
3269 igb_free_all_rx_resources(adapter);
3270
3271 if (!suspending)
3272 pm_runtime_put_sync(&pdev->dev);
3273 return 0;
3274}
3275
3276int igb_close(struct net_device *netdev)
3277{
3278 return __igb_close(netdev, false);
3279}
3280
3281/**
3282 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
3283 * @tx_ring: tx descriptor ring (for a specific queue) to setup
3284 *
3285 * Return 0 on success, negative on failure
3286 **/
3287int igb_setup_tx_resources(struct igb_ring *tx_ring)
3288{
3289 struct device *dev = tx_ring->dev;
3290 int size;
3291
3292 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3293
3294 tx_ring->tx_buffer_info = vzalloc(size);
3295 if (!tx_ring->tx_buffer_info)
3296 goto err;
3297
3298 /* round up to nearest 4K */
3299 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
3300 tx_ring->size = ALIGN(tx_ring->size, 4096);
3301
3302 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
3303 &tx_ring->dma, GFP_KERNEL);
3304 if (!tx_ring->desc)
3305 goto err;
3306
3307 tx_ring->next_to_use = 0;
3308 tx_ring->next_to_clean = 0;
3309
3310 return 0;
3311
3312err:
3313 vfree(tx_ring->tx_buffer_info);
3314 tx_ring->tx_buffer_info = NULL;
3315 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
3316 return -ENOMEM;
3317}
3318
3319/**
3320 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
3321 * (Descriptors) for all queues
3322 * @adapter: board private structure
3323 *
3324 * Return 0 on success, negative on failure
3325 **/
3326static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
3327{
3328 struct pci_dev *pdev = adapter->pdev;
3329 int i, err = 0;
3330
3331 for (i = 0; i < adapter->num_tx_queues; i++) {
3332 err = igb_setup_tx_resources(adapter->tx_ring[i]);
3333 if (err) {
3334 dev_err(&pdev->dev,
3335 "Allocation for Tx Queue %u failed\n", i);
3336 for (i--; i >= 0; i--)
3337 igb_free_tx_resources(adapter->tx_ring[i]);
3338 break;
3339 }
3340 }
3341
3342 return err;
3343}
3344
3345/**
3346 * igb_setup_tctl - configure the transmit control registers
3347 * @adapter: Board private structure
3348 **/
3349void igb_setup_tctl(struct igb_adapter *adapter)
3350{
3351 struct e1000_hw *hw = &adapter->hw;
3352 u32 tctl;
3353
3354 /* disable queue 0 which is enabled by default on 82575 and 82576 */
3355 wr32(E1000_TXDCTL(0), 0);
3356
3357 /* Program the Transmit Control Register */
3358 tctl = rd32(E1000_TCTL);
3359 tctl &= ~E1000_TCTL_CT;
3360 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
3361 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
3362
3363 igb_config_collision_dist(hw);
3364
3365 /* Enable transmits */
3366 tctl |= E1000_TCTL_EN;
3367
3368 wr32(E1000_TCTL, tctl);
3369}
3370
3371/**
3372 * igb_configure_tx_ring - Configure transmit ring after Reset
3373 * @adapter: board private structure
3374 * @ring: tx ring to configure
3375 *
3376 * Configure a transmit ring after a reset.
3377 **/
3378void igb_configure_tx_ring(struct igb_adapter *adapter,
3379 struct igb_ring *ring)
3380{
3381 struct e1000_hw *hw = &adapter->hw;
3382 u32 txdctl = 0;
3383 u64 tdba = ring->dma;
3384 int reg_idx = ring->reg_idx;
3385
3386 /* disable the queue */
3387 wr32(E1000_TXDCTL(reg_idx), 0);
3388 wrfl();
3389 mdelay(10);
3390
3391 wr32(E1000_TDLEN(reg_idx),
3392 ring->count * sizeof(union e1000_adv_tx_desc));
3393 wr32(E1000_TDBAL(reg_idx),
3394 tdba & 0x00000000ffffffffULL);
3395 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
3396
3397 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
3398 wr32(E1000_TDH(reg_idx), 0);
3399 writel(0, ring->tail);
3400
3401 txdctl |= IGB_TX_PTHRESH;
3402 txdctl |= IGB_TX_HTHRESH << 8;
3403 txdctl |= IGB_TX_WTHRESH << 16;
3404
3405 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
3406 wr32(E1000_TXDCTL(reg_idx), txdctl);
3407}
3408
3409/**
3410 * igb_configure_tx - Configure transmit Unit after Reset
3411 * @adapter: board private structure
3412 *
3413 * Configure the Tx unit of the MAC after a reset.
3414 **/
3415static void igb_configure_tx(struct igb_adapter *adapter)
3416{
3417 int i;
3418
3419 for (i = 0; i < adapter->num_tx_queues; i++)
3420 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
3421}
3422
3423/**
3424 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
3425 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3426 *
3427 * Returns 0 on success, negative on failure
3428 **/
3429int igb_setup_rx_resources(struct igb_ring *rx_ring)
3430{
3431 struct device *dev = rx_ring->dev;
3432 int size;
3433
3434 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3435
3436 rx_ring->rx_buffer_info = vzalloc(size);
3437 if (!rx_ring->rx_buffer_info)
3438 goto err;
3439
3440 /* Round up to nearest 4K */
3441 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
3442 rx_ring->size = ALIGN(rx_ring->size, 4096);
3443
3444 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
3445 &rx_ring->dma, GFP_KERNEL);
3446 if (!rx_ring->desc)
3447 goto err;
3448
3449 rx_ring->next_to_alloc = 0;
3450 rx_ring->next_to_clean = 0;
3451 rx_ring->next_to_use = 0;
3452
3453 return 0;
3454
3455err:
3456 vfree(rx_ring->rx_buffer_info);
3457 rx_ring->rx_buffer_info = NULL;
3458 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
3459 return -ENOMEM;
3460}
3461
3462/**
3463 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3464 * (Descriptors) for all queues
3465 * @adapter: board private structure
3466 *
3467 * Return 0 on success, negative on failure
3468 **/
3469static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3470{
3471 struct pci_dev *pdev = adapter->pdev;
3472 int i, err = 0;
3473
3474 for (i = 0; i < adapter->num_rx_queues; i++) {
3475 err = igb_setup_rx_resources(adapter->rx_ring[i]);
3476 if (err) {
3477 dev_err(&pdev->dev,
3478 "Allocation for Rx Queue %u failed\n", i);
3479 for (i--; i >= 0; i--)
3480 igb_free_rx_resources(adapter->rx_ring[i]);
3481 break;
3482 }
3483 }
3484
3485 return err;
3486}
3487
3488/**
3489 * igb_setup_mrqc - configure the multiple receive queue control registers
3490 * @adapter: Board private structure
3491 **/
3492static void igb_setup_mrqc(struct igb_adapter *adapter)
3493{
3494 struct e1000_hw *hw = &adapter->hw;
3495 u32 mrqc, rxcsum;
3496 u32 j, num_rx_queues;
3497 u32 rss_key[10];
3498
3499 netdev_rss_key_fill(rss_key, sizeof(rss_key));
3500 for (j = 0; j < 10; j++)
3501 wr32(E1000_RSSRK(j), rss_key[j]);
3502
3503 num_rx_queues = adapter->rss_queues;
3504
3505 switch (hw->mac.type) {
3506 case e1000_82576:
3507 /* 82576 supports 2 RSS queues for SR-IOV */
3508 if (adapter->vfs_allocated_count)
3509 num_rx_queues = 2;
3510 break;
3511 default:
3512 break;
3513 }
3514
3515 if (adapter->rss_indir_tbl_init != num_rx_queues) {
3516 for (j = 0; j < IGB_RETA_SIZE; j++)
3517 adapter->rss_indir_tbl[j] =
3518 (j * num_rx_queues) / IGB_RETA_SIZE;
3519 adapter->rss_indir_tbl_init = num_rx_queues;
3520 }
3521 igb_write_rss_indir_tbl(adapter);
3522
3523 /* Disable raw packet checksumming so that RSS hash is placed in
3524 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3525 * offloads as they are enabled by default
3526 */
3527 rxcsum = rd32(E1000_RXCSUM);
3528 rxcsum |= E1000_RXCSUM_PCSD;
3529
3530 if (adapter->hw.mac.type >= e1000_82576)
3531 /* Enable Receive Checksum Offload for SCTP */
3532 rxcsum |= E1000_RXCSUM_CRCOFL;
3533
3534 /* Don't need to set TUOFL or IPOFL, they default to 1 */
3535 wr32(E1000_RXCSUM, rxcsum);
3536
3537 /* Generate RSS hash based on packet types, TCP/UDP
3538 * port numbers and/or IPv4/v6 src and dst addresses
3539 */
3540 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
3541 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3542 E1000_MRQC_RSS_FIELD_IPV6 |
3543 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3544 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
3545
3546 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
3547 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
3548 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
3549 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
3550
3551 /* If VMDq is enabled then we set the appropriate mode for that, else
3552 * we default to RSS so that an RSS hash is calculated per packet even
3553 * if we are only using one queue
3554 */
3555 if (adapter->vfs_allocated_count) {
3556 if (hw->mac.type > e1000_82575) {
3557 /* Set the default pool for the PF's first queue */
3558 u32 vtctl = rd32(E1000_VT_CTL);
3559
3560 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
3561 E1000_VT_CTL_DISABLE_DEF_POOL);
3562 vtctl |= adapter->vfs_allocated_count <<
3563 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
3564 wr32(E1000_VT_CTL, vtctl);
3565 }
3566 if (adapter->rss_queues > 1)
3567 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
3568 else
3569 mrqc |= E1000_MRQC_ENABLE_VMDQ;
3570 } else {
3571 if (hw->mac.type != e1000_i211)
3572 mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
3573 }
3574 igb_vmm_control(adapter);
3575
3576 wr32(E1000_MRQC, mrqc);
3577}
3578
3579/**
3580 * igb_setup_rctl - configure the receive control registers
3581 * @adapter: Board private structure
3582 **/
3583void igb_setup_rctl(struct igb_adapter *adapter)
3584{
3585 struct e1000_hw *hw = &adapter->hw;
3586 u32 rctl;
3587
3588 rctl = rd32(E1000_RCTL);
3589
3590 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3591 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
3592
3593 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
3594 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3595
3596 /* enable stripping of CRC. It's unlikely this will break BMC
3597 * redirection as it did with e1000. Newer features require
3598 * that the HW strips the CRC.
3599 */
3600 rctl |= E1000_RCTL_SECRC;
3601
3602 /* disable store bad packets and clear size bits. */
3603 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
3604
3605 /* enable LPE to allow for reception of jumbo frames */
3606 rctl |= E1000_RCTL_LPE;
3607
3608 /* disable queue 0 to prevent tail write w/o re-config */
3609 wr32(E1000_RXDCTL(0), 0);
3610
3611 /* Attention!!! For SR-IOV PF driver operations you must enable
3612 * queue drop for all VF and PF queues to prevent head of line blocking
3613 * if an un-trusted VF does not provide descriptors to hardware.
3614 */
3615 if (adapter->vfs_allocated_count) {
3616 /* set all queue drop enable bits */
3617 wr32(E1000_QDE, ALL_QUEUES);
3618 }
3619
3620 /* This is useful for sniffing bad packets. */
3621 if (adapter->netdev->features & NETIF_F_RXALL) {
3622 /* UPE and MPE will be handled by normal PROMISC logic
3623 * in e1000e_set_rx_mode
3624 */
3625 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3626 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3627 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3628
3629 rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
3630 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3631 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3632 * and that breaks VLANs.
3633 */
3634 }
3635
3636 wr32(E1000_RCTL, rctl);
3637}
3638
3639static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3640 int vfn)
3641{
3642 struct e1000_hw *hw = &adapter->hw;
3643 u32 vmolr;
3644
3645 if (size > MAX_JUMBO_FRAME_SIZE)
3646 size = MAX_JUMBO_FRAME_SIZE;
3647
3648 vmolr = rd32(E1000_VMOLR(vfn));
3649 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3650 vmolr |= size | E1000_VMOLR_LPE;
3651 wr32(E1000_VMOLR(vfn), vmolr);
3652
3653 return 0;
3654}
3655
3656static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
3657 int vfn, bool enable)
3658{
3659 struct e1000_hw *hw = &adapter->hw;
3660 u32 val, reg;
3661
3662 if (hw->mac.type < e1000_82576)
3663 return;
3664
3665 if (hw->mac.type == e1000_i350)
3666 reg = E1000_DVMOLR(vfn);
3667 else
3668 reg = E1000_VMOLR(vfn);
3669
3670 val = rd32(reg);
3671 if (enable)
3672 val |= E1000_VMOLR_STRVLAN;
3673 else
3674 val &= ~(E1000_VMOLR_STRVLAN);
3675 wr32(reg, val);
3676}
3677
3678static inline void igb_set_vmolr(struct igb_adapter *adapter,
3679 int vfn, bool aupe)
3680{
3681 struct e1000_hw *hw = &adapter->hw;
3682 u32 vmolr;
3683
3684 /* This register exists only on 82576 and newer so if we are older then
3685 * we should exit and do nothing
3686 */
3687 if (hw->mac.type < e1000_82576)
3688 return;
3689
3690 vmolr = rd32(E1000_VMOLR(vfn));
3691 if (aupe)
3692 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3693 else
3694 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3695
3696 /* clear all bits that might not be set */
3697 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3698
3699 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3700 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3701 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
3702 * multicast packets
3703 */
3704 if (vfn <= adapter->vfs_allocated_count)
3705 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3706
3707 wr32(E1000_VMOLR(vfn), vmolr);
3708}
3709
3710/**
3711 * igb_configure_rx_ring - Configure a receive ring after Reset
3712 * @adapter: board private structure
3713 * @ring: receive ring to be configured
3714 *
3715 * Configure the Rx unit of the MAC after a reset.
3716 **/
3717void igb_configure_rx_ring(struct igb_adapter *adapter,
3718 struct igb_ring *ring)
3719{
3720 struct e1000_hw *hw = &adapter->hw;
3721 u64 rdba = ring->dma;
3722 int reg_idx = ring->reg_idx;
3723 u32 srrctl = 0, rxdctl = 0;
3724
3725 /* disable the queue */
3726 wr32(E1000_RXDCTL(reg_idx), 0);
3727
3728 /* Set DMA base address registers */
3729 wr32(E1000_RDBAL(reg_idx),
3730 rdba & 0x00000000ffffffffULL);
3731 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3732 wr32(E1000_RDLEN(reg_idx),
3733 ring->count * sizeof(union e1000_adv_rx_desc));
3734
3735 /* initialize head and tail */
3736 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
3737 wr32(E1000_RDH(reg_idx), 0);
3738 writel(0, ring->tail);
3739
3740 /* set descriptor configuration */
3741 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3742 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3743 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3744 if (hw->mac.type >= e1000_82580)
3745 srrctl |= E1000_SRRCTL_TIMESTAMP;
3746 /* Only set Drop Enable if we are supporting multiple queues */
3747 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3748 srrctl |= E1000_SRRCTL_DROP_EN;
3749
3750 wr32(E1000_SRRCTL(reg_idx), srrctl);
3751
3752 /* set filtering for VMDQ pools */
3753 igb_set_vmolr(adapter, reg_idx & 0x7, true);
3754
3755 rxdctl |= IGB_RX_PTHRESH;
3756 rxdctl |= IGB_RX_HTHRESH << 8;
3757 rxdctl |= IGB_RX_WTHRESH << 16;
3758
3759 /* enable receive descriptor fetching */
3760 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
3761 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3762}
3763
3764/**
3765 * igb_configure_rx - Configure receive Unit after Reset
3766 * @adapter: board private structure
3767 *
3768 * Configure the Rx unit of the MAC after a reset.
3769 **/
3770static void igb_configure_rx(struct igb_adapter *adapter)
3771{
3772 int i;
3773
3774 /* set the correct pool for the PF default MAC address in entry 0 */
3775 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3776 adapter->vfs_allocated_count);
3777
3778 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3779 * the Base and Length of the Rx Descriptor Ring
3780 */
3781 for (i = 0; i < adapter->num_rx_queues; i++)
3782 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
3783}
3784
3785/**
3786 * igb_free_tx_resources - Free Tx Resources per Queue
3787 * @tx_ring: Tx descriptor ring for a specific queue
3788 *
3789 * Free all transmit software resources
3790 **/
3791void igb_free_tx_resources(struct igb_ring *tx_ring)
3792{
3793 igb_clean_tx_ring(tx_ring);
3794
3795 vfree(tx_ring->tx_buffer_info);
3796 tx_ring->tx_buffer_info = NULL;
3797
3798 /* if not set, then don't free */
3799 if (!tx_ring->desc)
3800 return;
3801
3802 dma_free_coherent(tx_ring->dev, tx_ring->size,
3803 tx_ring->desc, tx_ring->dma);
3804
3805 tx_ring->desc = NULL;
3806}
3807
3808/**
3809 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3810 * @adapter: board private structure
3811 *
3812 * Free all transmit software resources
3813 **/
3814static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3815{
3816 int i;
3817
3818 for (i = 0; i < adapter->num_tx_queues; i++)
3819 if (adapter->tx_ring[i])
3820 igb_free_tx_resources(adapter->tx_ring[i]);
3821}
3822
3823void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3824 struct igb_tx_buffer *tx_buffer)
3825{
3826 if (tx_buffer->skb) {
3827 dev_kfree_skb_any(tx_buffer->skb);
3828 if (dma_unmap_len(tx_buffer, len))
3829 dma_unmap_single(ring->dev,
3830 dma_unmap_addr(tx_buffer, dma),
3831 dma_unmap_len(tx_buffer, len),
3832 DMA_TO_DEVICE);
3833 } else if (dma_unmap_len(tx_buffer, len)) {
3834 dma_unmap_page(ring->dev,
3835 dma_unmap_addr(tx_buffer, dma),
3836 dma_unmap_len(tx_buffer, len),
3837 DMA_TO_DEVICE);
3838 }
3839 tx_buffer->next_to_watch = NULL;
3840 tx_buffer->skb = NULL;
3841 dma_unmap_len_set(tx_buffer, len, 0);
3842 /* buffer_info must be completely set up in the transmit path */
3843}
3844
3845/**
3846 * igb_clean_tx_ring - Free Tx Buffers
3847 * @tx_ring: ring to be cleaned
3848 **/
3849static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3850{
3851 struct igb_tx_buffer *buffer_info;
3852 unsigned long size;
3853 u16 i;
3854
3855 if (!tx_ring->tx_buffer_info)
3856 return;
3857 /* Free all the Tx ring sk_buffs */
3858
3859 for (i = 0; i < tx_ring->count; i++) {
3860 buffer_info = &tx_ring->tx_buffer_info[i];
3861 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3862 }
3863
3864 netdev_tx_reset_queue(txring_txq(tx_ring));
3865
3866 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3867 memset(tx_ring->tx_buffer_info, 0, size);
3868
3869 /* Zero out the descriptor ring */
3870 memset(tx_ring->desc, 0, tx_ring->size);
3871
3872 tx_ring->next_to_use = 0;
3873 tx_ring->next_to_clean = 0;
3874}
3875
3876/**
3877 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3878 * @adapter: board private structure
3879 **/
3880static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3881{
3882 int i;
3883
3884 for (i = 0; i < adapter->num_tx_queues; i++)
3885 if (adapter->tx_ring[i])
3886 igb_clean_tx_ring(adapter->tx_ring[i]);
3887}
3888
3889/**
3890 * igb_free_rx_resources - Free Rx Resources
3891 * @rx_ring: ring to clean the resources from
3892 *
3893 * Free all receive software resources
3894 **/
3895void igb_free_rx_resources(struct igb_ring *rx_ring)
3896{
3897 igb_clean_rx_ring(rx_ring);
3898
3899 vfree(rx_ring->rx_buffer_info);
3900 rx_ring->rx_buffer_info = NULL;
3901
3902 /* if not set, then don't free */
3903 if (!rx_ring->desc)
3904 return;
3905
3906 dma_free_coherent(rx_ring->dev, rx_ring->size,
3907 rx_ring->desc, rx_ring->dma);
3908
3909 rx_ring->desc = NULL;
3910}
3911
3912/**
3913 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3914 * @adapter: board private structure
3915 *
3916 * Free all receive software resources
3917 **/
3918static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3919{
3920 int i;
3921
3922 for (i = 0; i < adapter->num_rx_queues; i++)
3923 if (adapter->rx_ring[i])
3924 igb_free_rx_resources(adapter->rx_ring[i]);
3925}
3926
3927/**
3928 * igb_clean_rx_ring - Free Rx Buffers per Queue
3929 * @rx_ring: ring to free buffers from
3930 **/
3931static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3932{
3933 unsigned long size;
3934 u16 i;
3935
3936 if (rx_ring->skb)
3937 dev_kfree_skb(rx_ring->skb);
3938 rx_ring->skb = NULL;
3939
3940 if (!rx_ring->rx_buffer_info)
3941 return;
3942
3943 /* Free all the Rx ring sk_buffs */
3944 for (i = 0; i < rx_ring->count; i++) {
3945 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3946
3947 if (!buffer_info->page)
3948 continue;
3949
3950 /* Invalidate cache lines that may have been written to by
3951 * device so that we avoid corrupting memory.
3952 */
3953 dma_sync_single_range_for_cpu(rx_ring->dev,
3954 buffer_info->dma,
3955 buffer_info->page_offset,
3956 IGB_RX_BUFSZ,
3957 DMA_FROM_DEVICE);
3958
3959 /* free resources associated with mapping */
3960 dma_unmap_page_attrs(rx_ring->dev,
3961 buffer_info->dma,
3962 PAGE_SIZE,
3963 DMA_FROM_DEVICE,
3964 DMA_ATTR_SKIP_CPU_SYNC);
3965 __page_frag_cache_drain(buffer_info->page,
3966 buffer_info->pagecnt_bias);
3967
3968 buffer_info->page = NULL;
3969 }
3970
3971 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3972 memset(rx_ring->rx_buffer_info, 0, size);
3973
3974 /* Zero out the descriptor ring */
3975 memset(rx_ring->desc, 0, rx_ring->size);
3976
3977 rx_ring->next_to_alloc = 0;
3978 rx_ring->next_to_clean = 0;
3979 rx_ring->next_to_use = 0;
3980}
3981
3982/**
3983 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3984 * @adapter: board private structure
3985 **/
3986static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3987{
3988 int i;
3989
3990 for (i = 0; i < adapter->num_rx_queues; i++)
3991 if (adapter->rx_ring[i])
3992 igb_clean_rx_ring(adapter->rx_ring[i]);
3993}
3994
3995/**
3996 * igb_set_mac - Change the Ethernet Address of the NIC
3997 * @netdev: network interface device structure
3998 * @p: pointer to an address structure
3999 *
4000 * Returns 0 on success, negative on failure
4001 **/
4002static int igb_set_mac(struct net_device *netdev, void *p)
4003{
4004 struct igb_adapter *adapter = netdev_priv(netdev);
4005 struct e1000_hw *hw = &adapter->hw;
4006 struct sockaddr *addr = p;
4007
4008 if (!is_valid_ether_addr(addr->sa_data))
4009 return -EADDRNOTAVAIL;
4010
4011 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4012 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
4013
4014 /* set the correct pool for the new PF MAC address in entry 0 */
4015 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
4016 adapter->vfs_allocated_count);
4017
4018 return 0;
4019}
4020
4021/**
4022 * igb_write_mc_addr_list - write multicast addresses to MTA
4023 * @netdev: network interface device structure
4024 *
4025 * Writes multicast address list to the MTA hash table.
4026 * Returns: -ENOMEM on failure
4027 * 0 on no addresses written
4028 * X on writing X addresses to MTA
4029 **/
4030static int igb_write_mc_addr_list(struct net_device *netdev)
4031{
4032 struct igb_adapter *adapter = netdev_priv(netdev);
4033 struct e1000_hw *hw = &adapter->hw;
4034 struct netdev_hw_addr *ha;
4035 u8 *mta_list;
4036 int i;
4037
4038 if (netdev_mc_empty(netdev)) {
4039 /* nothing to program, so clear mc list */
4040 igb_update_mc_addr_list(hw, NULL, 0);
4041 igb_restore_vf_multicasts(adapter);
4042 return 0;
4043 }
4044
4045 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
4046 if (!mta_list)
4047 return -ENOMEM;
4048
4049 /* The shared function expects a packed array of only addresses. */
4050 i = 0;
4051 netdev_for_each_mc_addr(ha, netdev)
4052 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
4053
4054 igb_update_mc_addr_list(hw, mta_list, i);
4055 kfree(mta_list);
4056
4057 return netdev_mc_count(netdev);
4058}
4059
4060/**
4061 * igb_write_uc_addr_list - write unicast addresses to RAR table
4062 * @netdev: network interface device structure
4063 *
4064 * Writes unicast address list to the RAR table.
4065 * Returns: -ENOMEM on failure/insufficient address space
4066 * 0 on no addresses written
4067 * X on writing X addresses to the RAR table
4068 **/
4069static int igb_write_uc_addr_list(struct net_device *netdev)
4070{
4071 struct igb_adapter *adapter = netdev_priv(netdev);
4072 struct e1000_hw *hw = &adapter->hw;
4073 unsigned int vfn = adapter->vfs_allocated_count;
4074 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
4075 int count = 0;
4076
4077 /* return ENOMEM indicating insufficient memory for addresses */
4078 if (netdev_uc_count(netdev) > rar_entries)
4079 return -ENOMEM;
4080
4081 if (!netdev_uc_empty(netdev) && rar_entries) {
4082 struct netdev_hw_addr *ha;
4083
4084 netdev_for_each_uc_addr(ha, netdev) {
4085 if (!rar_entries)
4086 break;
4087 igb_rar_set_qsel(adapter, ha->addr,
4088 rar_entries--,
4089 vfn);
4090 count++;
4091 }
4092 }
4093 /* write the addresses in reverse order to avoid write combining */
4094 for (; rar_entries > 0 ; rar_entries--) {
4095 wr32(E1000_RAH(rar_entries), 0);
4096 wr32(E1000_RAL(rar_entries), 0);
4097 }
4098 wrfl();
4099
4100 return count;
4101}
4102
4103static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
4104{
4105 struct e1000_hw *hw = &adapter->hw;
4106 u32 i, pf_id;
4107
4108 switch (hw->mac.type) {
4109 case e1000_i210:
4110 case e1000_i211:
4111 case e1000_i350:
4112 /* VLAN filtering needed for VLAN prio filter */
4113 if (adapter->netdev->features & NETIF_F_NTUPLE)
4114 break;
4115 /* fall through */
4116 case e1000_82576:
4117 case e1000_82580:
4118 case e1000_i354:
4119 /* VLAN filtering needed for pool filtering */
4120 if (adapter->vfs_allocated_count)
4121 break;
4122 /* fall through */
4123 default:
4124 return 1;
4125 }
4126
4127 /* We are already in VLAN promisc, nothing to do */
4128 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
4129 return 0;
4130
4131 if (!adapter->vfs_allocated_count)
4132 goto set_vfta;
4133
4134 /* Add PF to all active pools */
4135 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4136
4137 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4138 u32 vlvf = rd32(E1000_VLVF(i));
4139
4140 vlvf |= BIT(pf_id);
4141 wr32(E1000_VLVF(i), vlvf);
4142 }
4143
4144set_vfta:
4145 /* Set all bits in the VLAN filter table array */
4146 for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
4147 hw->mac.ops.write_vfta(hw, i, ~0U);
4148
4149 /* Set flag so we don't redo unnecessary work */
4150 adapter->flags |= IGB_FLAG_VLAN_PROMISC;
4151
4152 return 0;
4153}
4154
4155#define VFTA_BLOCK_SIZE 8
4156static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
4157{
4158 struct e1000_hw *hw = &adapter->hw;
4159 u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
4160 u32 vid_start = vfta_offset * 32;
4161 u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
4162 u32 i, vid, word, bits, pf_id;
4163
4164 /* guarantee that we don't scrub out management VLAN */
4165 vid = adapter->mng_vlan_id;
4166 if (vid >= vid_start && vid < vid_end)
4167 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4168
4169 if (!adapter->vfs_allocated_count)
4170 goto set_vfta;
4171
4172 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
4173
4174 for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
4175 u32 vlvf = rd32(E1000_VLVF(i));
4176
4177 /* pull VLAN ID from VLVF */
4178 vid = vlvf & VLAN_VID_MASK;
4179
4180 /* only concern ourselves with a certain range */
4181 if (vid < vid_start || vid >= vid_end)
4182 continue;
4183
4184 if (vlvf & E1000_VLVF_VLANID_ENABLE) {
4185 /* record VLAN ID in VFTA */
4186 vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
4187
4188 /* if PF is part of this then continue */
4189 if (test_bit(vid, adapter->active_vlans))
4190 continue;
4191 }
4192
4193 /* remove PF from the pool */
4194 bits = ~BIT(pf_id);
4195 bits &= rd32(E1000_VLVF(i));
4196 wr32(E1000_VLVF(i), bits);
4197 }
4198
4199set_vfta:
4200 /* extract values from active_vlans and write back to VFTA */
4201 for (i = VFTA_BLOCK_SIZE; i--;) {
4202 vid = (vfta_offset + i) * 32;
4203 word = vid / BITS_PER_LONG;
4204 bits = vid % BITS_PER_LONG;
4205
4206 vfta[i] |= adapter->active_vlans[word] >> bits;
4207
4208 hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
4209 }
4210}
4211
4212static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
4213{
4214 u32 i;
4215
4216 /* We are not in VLAN promisc, nothing to do */
4217 if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
4218 return;
4219
4220 /* Set flag so we don't redo unnecessary work */
4221 adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
4222
4223 for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
4224 igb_scrub_vfta(adapter, i);
4225}
4226
4227/**
4228 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
4229 * @netdev: network interface device structure
4230 *
4231 * The set_rx_mode entry point is called whenever the unicast or multicast
4232 * address lists or the network interface flags are updated. This routine is
4233 * responsible for configuring the hardware for proper unicast, multicast,
4234 * promiscuous mode, and all-multi behavior.
4235 **/
4236static void igb_set_rx_mode(struct net_device *netdev)
4237{
4238 struct igb_adapter *adapter = netdev_priv(netdev);
4239 struct e1000_hw *hw = &adapter->hw;
4240 unsigned int vfn = adapter->vfs_allocated_count;
4241 u32 rctl = 0, vmolr = 0;
4242 int count;
4243
4244 /* Check for Promiscuous and All Multicast modes */
4245 if (netdev->flags & IFF_PROMISC) {
4246 rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
4247 vmolr |= E1000_VMOLR_MPME;
4248
4249 /* enable use of UTA filter to force packets to default pool */
4250 if (hw->mac.type == e1000_82576)
4251 vmolr |= E1000_VMOLR_ROPE;
4252 } else {
4253 if (netdev->flags & IFF_ALLMULTI) {
4254 rctl |= E1000_RCTL_MPE;
4255 vmolr |= E1000_VMOLR_MPME;
4256 } else {
4257 /* Write addresses to the MTA, if the attempt fails
4258 * then we should just turn on promiscuous mode so
4259 * that we can at least receive multicast traffic
4260 */
4261 count = igb_write_mc_addr_list(netdev);
4262 if (count < 0) {
4263 rctl |= E1000_RCTL_MPE;
4264 vmolr |= E1000_VMOLR_MPME;
4265 } else if (count) {
4266 vmolr |= E1000_VMOLR_ROMPE;
4267 }
4268 }
4269 }
4270
4271 /* Write addresses to available RAR registers, if there is not
4272 * sufficient space to store all the addresses then enable
4273 * unicast promiscuous mode
4274 */
4275 count = igb_write_uc_addr_list(netdev);
4276 if (count < 0) {
4277 rctl |= E1000_RCTL_UPE;
4278 vmolr |= E1000_VMOLR_ROPE;
4279 }
4280
4281 /* enable VLAN filtering by default */
4282 rctl |= E1000_RCTL_VFE;
4283
4284 /* disable VLAN filtering for modes that require it */
4285 if ((netdev->flags & IFF_PROMISC) ||
4286 (netdev->features & NETIF_F_RXALL)) {
4287 /* if we fail to set all rules then just clear VFE */
4288 if (igb_vlan_promisc_enable(adapter))
4289 rctl &= ~E1000_RCTL_VFE;
4290 } else {
4291 igb_vlan_promisc_disable(adapter);
4292 }
4293
4294 /* update state of unicast, multicast, and VLAN filtering modes */
4295 rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
4296 E1000_RCTL_VFE);
4297 wr32(E1000_RCTL, rctl);
4298
4299 /* In order to support SR-IOV and eventually VMDq it is necessary to set
4300 * the VMOLR to enable the appropriate modes. Without this workaround
4301 * we will have issues with VLAN tag stripping not being done for frames
4302 * that are only arriving because we are the default pool
4303 */
4304 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
4305 return;
4306
4307 /* set UTA to appropriate mode */
4308 igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
4309
4310 vmolr |= rd32(E1000_VMOLR(vfn)) &
4311 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
4312
4313 /* enable Rx jumbo frames, no need for restriction */
4314 vmolr &= ~E1000_VMOLR_RLPML_MASK;
4315 vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE;
4316
4317 wr32(E1000_VMOLR(vfn), vmolr);
4318 wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE);
4319
4320 igb_restore_vf_multicasts(adapter);
4321}
4322
4323static void igb_check_wvbr(struct igb_adapter *adapter)
4324{
4325 struct e1000_hw *hw = &adapter->hw;
4326 u32 wvbr = 0;
4327
4328 switch (hw->mac.type) {
4329 case e1000_82576:
4330 case e1000_i350:
4331 wvbr = rd32(E1000_WVBR);
4332 if (!wvbr)
4333 return;
4334 break;
4335 default:
4336 break;
4337 }
4338
4339 adapter->wvbr |= wvbr;
4340}
4341
4342#define IGB_STAGGERED_QUEUE_OFFSET 8
4343
4344static void igb_spoof_check(struct igb_adapter *adapter)
4345{
4346 int j;
4347
4348 if (!adapter->wvbr)
4349 return;
4350
4351 for (j = 0; j < adapter->vfs_allocated_count; j++) {
4352 if (adapter->wvbr & BIT(j) ||
4353 adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
4354 dev_warn(&adapter->pdev->dev,
4355 "Spoof event(s) detected on VF %d\n", j);
4356 adapter->wvbr &=
4357 ~(BIT(j) |
4358 BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
4359 }
4360 }
4361}
4362
4363/* Need to wait a few seconds after link up to get diagnostic information from
4364 * the phy
4365 */
4366static void igb_update_phy_info(unsigned long data)
4367{
4368 struct igb_adapter *adapter = (struct igb_adapter *) data;
4369 igb_get_phy_info(&adapter->hw);
4370}
4371
4372/**
4373 * igb_has_link - check shared code for link and determine up/down
4374 * @adapter: pointer to driver private info
4375 **/
4376bool igb_has_link(struct igb_adapter *adapter)
4377{
4378 struct e1000_hw *hw = &adapter->hw;
4379 bool link_active = false;
4380
4381 /* get_link_status is set on LSC (link status) interrupt or
4382 * rx sequence error interrupt. get_link_status will stay
4383 * false until the e1000_check_for_link establishes link
4384 * for copper adapters ONLY
4385 */
4386 switch (hw->phy.media_type) {
4387 case e1000_media_type_copper:
4388 if (!hw->mac.get_link_status)
4389 return true;
4390 case e1000_media_type_internal_serdes:
4391 hw->mac.ops.check_for_link(hw);
4392 link_active = !hw->mac.get_link_status;
4393 break;
4394 default:
4395 case e1000_media_type_unknown:
4396 break;
4397 }
4398
4399 if (((hw->mac.type == e1000_i210) ||
4400 (hw->mac.type == e1000_i211)) &&
4401 (hw->phy.id == I210_I_PHY_ID)) {
4402 if (!netif_carrier_ok(adapter->netdev)) {
4403 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4404 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
4405 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
4406 adapter->link_check_timeout = jiffies;
4407 }
4408 }
4409
4410 return link_active;
4411}
4412
4413static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
4414{
4415 bool ret = false;
4416 u32 ctrl_ext, thstat;
4417
4418 /* check for thermal sensor event on i350 copper only */
4419 if (hw->mac.type == e1000_i350) {
4420 thstat = rd32(E1000_THSTAT);
4421 ctrl_ext = rd32(E1000_CTRL_EXT);
4422
4423 if ((hw->phy.media_type == e1000_media_type_copper) &&
4424 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
4425 ret = !!(thstat & event);
4426 }
4427
4428 return ret;
4429}
4430
4431/**
4432 * igb_check_lvmmc - check for malformed packets received
4433 * and indicated in LVMMC register
4434 * @adapter: pointer to adapter
4435 **/
4436static void igb_check_lvmmc(struct igb_adapter *adapter)
4437{
4438 struct e1000_hw *hw = &adapter->hw;
4439 u32 lvmmc;
4440
4441 lvmmc = rd32(E1000_LVMMC);
4442 if (lvmmc) {
4443 if (unlikely(net_ratelimit())) {
4444 netdev_warn(adapter->netdev,
4445 "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
4446 lvmmc);
4447 }
4448 }
4449}
4450
4451/**
4452 * igb_watchdog - Timer Call-back
4453 * @data: pointer to adapter cast into an unsigned long
4454 **/
4455static void igb_watchdog(unsigned long data)
4456{
4457 struct igb_adapter *adapter = (struct igb_adapter *)data;
4458 /* Do the rest outside of interrupt context */
4459 schedule_work(&adapter->watchdog_task);
4460}
4461
4462static void igb_watchdog_task(struct work_struct *work)
4463{
4464 struct igb_adapter *adapter = container_of(work,
4465 struct igb_adapter,
4466 watchdog_task);
4467 struct e1000_hw *hw = &adapter->hw;
4468 struct e1000_phy_info *phy = &hw->phy;
4469 struct net_device *netdev = adapter->netdev;
4470 u32 link;
4471 int i;
4472 u32 connsw;
4473 u16 phy_data, retry_count = 20;
4474
4475 link = igb_has_link(adapter);
4476
4477 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
4478 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4479 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
4480 else
4481 link = false;
4482 }
4483
4484 /* Force link down if we have fiber to swap to */
4485 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4486 if (hw->phy.media_type == e1000_media_type_copper) {
4487 connsw = rd32(E1000_CONNSW);
4488 if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
4489 link = 0;
4490 }
4491 }
4492 if (link) {
4493 /* Perform a reset if the media type changed. */
4494 if (hw->dev_spec._82575.media_changed) {
4495 hw->dev_spec._82575.media_changed = false;
4496 adapter->flags |= IGB_FLAG_MEDIA_RESET;
4497 igb_reset(adapter);
4498 }
4499 /* Cancel scheduled suspend requests. */
4500 pm_runtime_resume(netdev->dev.parent);
4501
4502 if (!netif_carrier_ok(netdev)) {
4503 u32 ctrl;
4504
4505 hw->mac.ops.get_speed_and_duplex(hw,
4506 &adapter->link_speed,
4507 &adapter->link_duplex);
4508
4509 ctrl = rd32(E1000_CTRL);
4510 /* Links status message must follow this format */
4511 netdev_info(netdev,
4512 "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4513 netdev->name,
4514 adapter->link_speed,
4515 adapter->link_duplex == FULL_DUPLEX ?
4516 "Full" : "Half",
4517 (ctrl & E1000_CTRL_TFCE) &&
4518 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
4519 (ctrl & E1000_CTRL_RFCE) ? "RX" :
4520 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
4521
4522 /* disable EEE if enabled */
4523 if ((adapter->flags & IGB_FLAG_EEE) &&
4524 (adapter->link_duplex == HALF_DUPLEX)) {
4525 dev_info(&adapter->pdev->dev,
4526 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4527 adapter->hw.dev_spec._82575.eee_disable = true;
4528 adapter->flags &= ~IGB_FLAG_EEE;
4529 }
4530
4531 /* check if SmartSpeed worked */
4532 igb_check_downshift(hw);
4533 if (phy->speed_downgraded)
4534 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4535
4536 /* check for thermal sensor event */
4537 if (igb_thermal_sensor_event(hw,
4538 E1000_THSTAT_LINK_THROTTLE))
4539 netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
4540
4541 /* adjust timeout factor according to speed/duplex */
4542 adapter->tx_timeout_factor = 1;
4543 switch (adapter->link_speed) {
4544 case SPEED_10:
4545 adapter->tx_timeout_factor = 14;
4546 break;
4547 case SPEED_100:
4548 /* maybe add some timeout factor ? */
4549 break;
4550 }
4551
4552 if (adapter->link_speed != SPEED_1000)
4553 goto no_wait;
4554
4555 /* wait for Remote receiver status OK */
4556retry_read_status:
4557 if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
4558 &phy_data)) {
4559 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4560 retry_count) {
4561 msleep(100);
4562 retry_count--;
4563 goto retry_read_status;
4564 } else if (!retry_count) {
4565 dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
4566 }
4567 } else {
4568 dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
4569 }
4570no_wait:
4571 netif_carrier_on(netdev);
4572
4573 igb_ping_all_vfs(adapter);
4574 igb_check_vf_rate_limit(adapter);
4575
4576 /* link state has changed, schedule phy info update */
4577 if (!test_bit(__IGB_DOWN, &adapter->state))
4578 mod_timer(&adapter->phy_info_timer,
4579 round_jiffies(jiffies + 2 * HZ));
4580 }
4581 } else {
4582 if (netif_carrier_ok(netdev)) {
4583 adapter->link_speed = 0;
4584 adapter->link_duplex = 0;
4585
4586 /* check for thermal sensor event */
4587 if (igb_thermal_sensor_event(hw,
4588 E1000_THSTAT_PWR_DOWN)) {
4589 netdev_err(netdev, "The network adapter was stopped because it overheated\n");
4590 }
4591
4592 /* Links status message must follow this format */
4593 netdev_info(netdev, "igb: %s NIC Link is Down\n",
4594 netdev->name);
4595 netif_carrier_off(netdev);
4596
4597 igb_ping_all_vfs(adapter);
4598
4599 /* link state has changed, schedule phy info update */
4600 if (!test_bit(__IGB_DOWN, &adapter->state))
4601 mod_timer(&adapter->phy_info_timer,
4602 round_jiffies(jiffies + 2 * HZ));
4603
4604 /* link is down, time to check for alternate media */
4605 if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
4606 igb_check_swap_media(adapter);
4607 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
4608 schedule_work(&adapter->reset_task);
4609 /* return immediately */
4610 return;
4611 }
4612 }
4613 pm_schedule_suspend(netdev->dev.parent,
4614 MSEC_PER_SEC * 5);
4615
4616 /* also check for alternate media here */
4617 } else if (!netif_carrier_ok(netdev) &&
4618 (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
4619 igb_check_swap_media(adapter);
4620 if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
4621 schedule_work(&adapter->reset_task);
4622 /* return immediately */
4623 return;
4624 }
4625 }
4626 }
4627
4628 spin_lock(&adapter->stats64_lock);
4629 igb_update_stats(adapter, &adapter->stats64);
4630 spin_unlock(&adapter->stats64_lock);
4631
4632 for (i = 0; i < adapter->num_tx_queues; i++) {
4633 struct igb_ring *tx_ring = adapter->tx_ring[i];
4634 if (!netif_carrier_ok(netdev)) {
4635 /* We've lost link, so the controller stops DMA,
4636 * but we've got queued Tx work that's never going
4637 * to get done, so reset controller to flush Tx.
4638 * (Do the reset outside of interrupt context).
4639 */
4640 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
4641 adapter->tx_timeout_count++;
4642 schedule_work(&adapter->reset_task);
4643 /* return immediately since reset is imminent */
4644 return;
4645 }
4646 }
4647
4648 /* Force detection of hung controller every watchdog period */
4649 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4650 }
4651
4652 /* Cause software interrupt to ensure Rx ring is cleaned */
4653 if (adapter->flags & IGB_FLAG_HAS_MSIX) {
4654 u32 eics = 0;
4655
4656 for (i = 0; i < adapter->num_q_vectors; i++)
4657 eics |= adapter->q_vector[i]->eims_value;
4658 wr32(E1000_EICS, eics);
4659 } else {
4660 wr32(E1000_ICS, E1000_ICS_RXDMT0);
4661 }
4662
4663 igb_spoof_check(adapter);
4664 igb_ptp_rx_hang(adapter);
4665
4666 /* Check LVMMC register on i350/i354 only */
4667 if ((adapter->hw.mac.type == e1000_i350) ||
4668 (adapter->hw.mac.type == e1000_i354))
4669 igb_check_lvmmc(adapter);
4670
4671 /* Reset the timer */
4672 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4673 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
4674 mod_timer(&adapter->watchdog_timer,
4675 round_jiffies(jiffies + HZ));
4676 else
4677 mod_timer(&adapter->watchdog_timer,
4678 round_jiffies(jiffies + 2 * HZ));
4679 }
4680}
4681
4682enum latency_range {
4683 lowest_latency = 0,
4684 low_latency = 1,
4685 bulk_latency = 2,
4686 latency_invalid = 255
4687};
4688
4689/**
4690 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4691 * @q_vector: pointer to q_vector
4692 *
4693 * Stores a new ITR value based on strictly on packet size. This
4694 * algorithm is less sophisticated than that used in igb_update_itr,
4695 * due to the difficulty of synchronizing statistics across multiple
4696 * receive rings. The divisors and thresholds used by this function
4697 * were determined based on theoretical maximum wire speed and testing
4698 * data, in order to minimize response time while increasing bulk
4699 * throughput.
4700 * This functionality is controlled by ethtool's coalescing settings.
4701 * NOTE: This function is called only when operating in a multiqueue
4702 * receive environment.
4703 **/
4704static void igb_update_ring_itr(struct igb_q_vector *q_vector)
4705{
4706 int new_val = q_vector->itr_val;
4707 int avg_wire_size = 0;
4708 struct igb_adapter *adapter = q_vector->adapter;
4709 unsigned int packets;
4710
4711 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4712 * ints/sec - ITR timer value of 120 ticks.
4713 */
4714 if (adapter->link_speed != SPEED_1000) {
4715 new_val = IGB_4K_ITR;
4716 goto set_itr_val;
4717 }
4718
4719 packets = q_vector->rx.total_packets;
4720 if (packets)
4721 avg_wire_size = q_vector->rx.total_bytes / packets;
4722
4723 packets = q_vector->tx.total_packets;
4724 if (packets)
4725 avg_wire_size = max_t(u32, avg_wire_size,
4726 q_vector->tx.total_bytes / packets);
4727
4728 /* if avg_wire_size isn't set no work was done */
4729 if (!avg_wire_size)
4730 goto clear_counts;
4731
4732 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4733 avg_wire_size += 24;
4734
4735 /* Don't starve jumbo frames */
4736 avg_wire_size = min(avg_wire_size, 3000);
4737
4738 /* Give a little boost to mid-size frames */
4739 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
4740 new_val = avg_wire_size / 3;
4741 else
4742 new_val = avg_wire_size / 2;
4743
4744 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4745 if (new_val < IGB_20K_ITR &&
4746 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4747 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4748 new_val = IGB_20K_ITR;
4749
4750set_itr_val:
4751 if (new_val != q_vector->itr_val) {
4752 q_vector->itr_val = new_val;
4753 q_vector->set_itr = 1;
4754 }
4755clear_counts:
4756 q_vector->rx.total_bytes = 0;
4757 q_vector->rx.total_packets = 0;
4758 q_vector->tx.total_bytes = 0;
4759 q_vector->tx.total_packets = 0;
4760}
4761
4762/**
4763 * igb_update_itr - update the dynamic ITR value based on statistics
4764 * @q_vector: pointer to q_vector
4765 * @ring_container: ring info to update the itr for
4766 *
4767 * Stores a new ITR value based on packets and byte
4768 * counts during the last interrupt. The advantage of per interrupt
4769 * computation is faster updates and more accurate ITR for the current
4770 * traffic pattern. Constants in this function were computed
4771 * based on theoretical maximum wire speed and thresholds were set based
4772 * on testing data as well as attempting to minimize response time
4773 * while increasing bulk throughput.
4774 * This functionality is controlled by ethtool's coalescing settings.
4775 * NOTE: These calculations are only valid when operating in a single-
4776 * queue environment.
4777 **/
4778static void igb_update_itr(struct igb_q_vector *q_vector,
4779 struct igb_ring_container *ring_container)
4780{
4781 unsigned int packets = ring_container->total_packets;
4782 unsigned int bytes = ring_container->total_bytes;
4783 u8 itrval = ring_container->itr;
4784
4785 /* no packets, exit with status unchanged */
4786 if (packets == 0)
4787 return;
4788
4789 switch (itrval) {
4790 case lowest_latency:
4791 /* handle TSO and jumbo frames */
4792 if (bytes/packets > 8000)
4793 itrval = bulk_latency;
4794 else if ((packets < 5) && (bytes > 512))
4795 itrval = low_latency;
4796 break;
4797 case low_latency: /* 50 usec aka 20000 ints/s */
4798 if (bytes > 10000) {
4799 /* this if handles the TSO accounting */
4800 if (bytes/packets > 8000)
4801 itrval = bulk_latency;
4802 else if ((packets < 10) || ((bytes/packets) > 1200))
4803 itrval = bulk_latency;
4804 else if ((packets > 35))
4805 itrval = lowest_latency;
4806 } else if (bytes/packets > 2000) {
4807 itrval = bulk_latency;
4808 } else if (packets <= 2 && bytes < 512) {
4809 itrval = lowest_latency;
4810 }
4811 break;
4812 case bulk_latency: /* 250 usec aka 4000 ints/s */
4813 if (bytes > 25000) {
4814 if (packets > 35)
4815 itrval = low_latency;
4816 } else if (bytes < 1500) {
4817 itrval = low_latency;
4818 }
4819 break;
4820 }
4821
4822 /* clear work counters since we have the values we need */
4823 ring_container->total_bytes = 0;
4824 ring_container->total_packets = 0;
4825
4826 /* write updated itr to ring container */
4827 ring_container->itr = itrval;
4828}
4829
4830static void igb_set_itr(struct igb_q_vector *q_vector)
4831{
4832 struct igb_adapter *adapter = q_vector->adapter;
4833 u32 new_itr = q_vector->itr_val;
4834 u8 current_itr = 0;
4835
4836 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4837 if (adapter->link_speed != SPEED_1000) {
4838 current_itr = 0;
4839 new_itr = IGB_4K_ITR;
4840 goto set_itr_now;
4841 }
4842
4843 igb_update_itr(q_vector, &q_vector->tx);
4844 igb_update_itr(q_vector, &q_vector->rx);
4845
4846 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4847
4848 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4849 if (current_itr == lowest_latency &&
4850 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4851 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4852 current_itr = low_latency;
4853
4854 switch (current_itr) {
4855 /* counts and packets in update_itr are dependent on these numbers */
4856 case lowest_latency:
4857 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
4858 break;
4859 case low_latency:
4860 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
4861 break;
4862 case bulk_latency:
4863 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
4864 break;
4865 default:
4866 break;
4867 }
4868
4869set_itr_now:
4870 if (new_itr != q_vector->itr_val) {
4871 /* this attempts to bias the interrupt rate towards Bulk
4872 * by adding intermediate steps when interrupt rate is
4873 * increasing
4874 */
4875 new_itr = new_itr > q_vector->itr_val ?
4876 max((new_itr * q_vector->itr_val) /
4877 (new_itr + (q_vector->itr_val >> 2)),
4878 new_itr) : new_itr;
4879 /* Don't write the value here; it resets the adapter's
4880 * internal timer, and causes us to delay far longer than
4881 * we should between interrupts. Instead, we write the ITR
4882 * value at the beginning of the next interrupt so the timing
4883 * ends up being correct.
4884 */
4885 q_vector->itr_val = new_itr;
4886 q_vector->set_itr = 1;
4887 }
4888}
4889
4890static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4891 u32 type_tucmd, u32 mss_l4len_idx)
4892{
4893 struct e1000_adv_tx_context_desc *context_desc;
4894 u16 i = tx_ring->next_to_use;
4895
4896 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4897
4898 i++;
4899 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4900
4901 /* set bits to identify this as an advanced context descriptor */
4902 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4903
4904 /* For 82575, context index must be unique per ring. */
4905 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4906 mss_l4len_idx |= tx_ring->reg_idx << 4;
4907
4908 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4909 context_desc->seqnum_seed = 0;
4910 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4911 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4912}
4913
4914static int igb_tso(struct igb_ring *tx_ring,
4915 struct igb_tx_buffer *first,
4916 u8 *hdr_len)
4917{
4918 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
4919 struct sk_buff *skb = first->skb;
4920 union {
4921 struct iphdr *v4;
4922 struct ipv6hdr *v6;
4923 unsigned char *hdr;
4924 } ip;
4925 union {
4926 struct tcphdr *tcp;
4927 unsigned char *hdr;
4928 } l4;
4929 u32 paylen, l4_offset;
4930 int err;
4931
4932 if (skb->ip_summed != CHECKSUM_PARTIAL)
4933 return 0;
4934
4935 if (!skb_is_gso(skb))
4936 return 0;
4937
4938 err = skb_cow_head(skb, 0);
4939 if (err < 0)
4940 return err;
4941
4942 ip.hdr = skb_network_header(skb);
4943 l4.hdr = skb_checksum_start(skb);
4944
4945 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4946 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
4947
4948 /* initialize outer IP header fields */
4949 if (ip.v4->version == 4) {
4950 unsigned char *csum_start = skb_checksum_start(skb);
4951 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
4952
4953 /* IP header will have to cancel out any data that
4954 * is not a part of the outer IP header
4955 */
4956 ip.v4->check = csum_fold(csum_partial(trans_start,
4957 csum_start - trans_start,
4958 0));
4959 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4960
4961 ip.v4->tot_len = 0;
4962 first->tx_flags |= IGB_TX_FLAGS_TSO |
4963 IGB_TX_FLAGS_CSUM |
4964 IGB_TX_FLAGS_IPV4;
4965 } else {
4966 ip.v6->payload_len = 0;
4967 first->tx_flags |= IGB_TX_FLAGS_TSO |
4968 IGB_TX_FLAGS_CSUM;
4969 }
4970
4971 /* determine offset of inner transport header */
4972 l4_offset = l4.hdr - skb->data;
4973
4974 /* compute length of segmentation header */
4975 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
4976
4977 /* remove payload length from inner checksum */
4978 paylen = skb->len - l4_offset;
4979 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
4980
4981 /* update gso size and bytecount with header size */
4982 first->gso_segs = skb_shinfo(skb)->gso_segs;
4983 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4984
4985 /* MSS L4LEN IDX */
4986 mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
4987 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
4988
4989 /* VLAN MACLEN IPLEN */
4990 vlan_macip_lens = l4.hdr - ip.hdr;
4991 vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
4992 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4993
4994 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4995
4996 return 1;
4997}
4998
4999static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
5000{
5001 unsigned int offset = 0;
5002
5003 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
5004
5005 return offset == skb_checksum_start_offset(skb);
5006}
5007
5008static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
5009{
5010 struct sk_buff *skb = first->skb;
5011 u32 vlan_macip_lens = 0;
5012 u32 type_tucmd = 0;
5013
5014 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5015csum_failed:
5016 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
5017 return;
5018 goto no_csum;
5019 }
5020
5021 switch (skb->csum_offset) {
5022 case offsetof(struct tcphdr, check):
5023 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
5024 /* fall through */
5025 case offsetof(struct udphdr, check):
5026 break;
5027 case offsetof(struct sctphdr, checksum):
5028 /* validate that this is actually an SCTP request */
5029 if (((first->protocol == htons(ETH_P_IP)) &&
5030 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
5031 ((first->protocol == htons(ETH_P_IPV6)) &&
5032 igb_ipv6_csum_is_sctp(skb))) {
5033 type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
5034 break;
5035 }
5036 default:
5037 skb_checksum_help(skb);
5038 goto csum_failed;
5039 }
5040
5041 /* update TX checksum flag */
5042 first->tx_flags |= IGB_TX_FLAGS_CSUM;
5043 vlan_macip_lens = skb_checksum_start_offset(skb) -
5044 skb_network_offset(skb);
5045no_csum:
5046 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
5047 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
5048
5049 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
5050}
5051
5052#define IGB_SET_FLAG(_input, _flag, _result) \
5053 ((_flag <= _result) ? \
5054 ((u32)(_input & _flag) * (_result / _flag)) : \
5055 ((u32)(_input & _flag) / (_flag / _result)))
5056
5057static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
5058{
5059 /* set type for advanced descriptor with frame checksum insertion */
5060 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
5061 E1000_ADVTXD_DCMD_DEXT |
5062 E1000_ADVTXD_DCMD_IFCS;
5063
5064 /* set HW vlan bit if vlan is present */
5065 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
5066 (E1000_ADVTXD_DCMD_VLE));
5067
5068 /* set segmentation bits for TSO */
5069 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
5070 (E1000_ADVTXD_DCMD_TSE));
5071
5072 /* set timestamp bit if present */
5073 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
5074 (E1000_ADVTXD_MAC_TSTAMP));
5075
5076 /* insert frame checksum */
5077 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
5078
5079 return cmd_type;
5080}
5081
5082static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
5083 union e1000_adv_tx_desc *tx_desc,
5084 u32 tx_flags, unsigned int paylen)
5085{
5086 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
5087
5088 /* 82575 requires a unique index per ring */
5089 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
5090 olinfo_status |= tx_ring->reg_idx << 4;
5091
5092 /* insert L4 checksum */
5093 olinfo_status |= IGB_SET_FLAG(tx_flags,
5094 IGB_TX_FLAGS_CSUM,
5095 (E1000_TXD_POPTS_TXSM << 8));
5096
5097 /* insert IPv4 checksum */
5098 olinfo_status |= IGB_SET_FLAG(tx_flags,
5099 IGB_TX_FLAGS_IPV4,
5100 (E1000_TXD_POPTS_IXSM << 8));
5101
5102 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
5103}
5104
5105static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5106{
5107 struct net_device *netdev = tx_ring->netdev;
5108
5109 netif_stop_subqueue(netdev, tx_ring->queue_index);
5110
5111 /* Herbert's original patch had:
5112 * smp_mb__after_netif_stop_queue();
5113 * but since that doesn't exist yet, just open code it.
5114 */
5115 smp_mb();
5116
5117 /* We need to check again in a case another CPU has just
5118 * made room available.
5119 */
5120 if (igb_desc_unused(tx_ring) < size)
5121 return -EBUSY;
5122
5123 /* A reprieve! */
5124 netif_wake_subqueue(netdev, tx_ring->queue_index);
5125
5126 u64_stats_update_begin(&tx_ring->tx_syncp2);
5127 tx_ring->tx_stats.restart_queue2++;
5128 u64_stats_update_end(&tx_ring->tx_syncp2);
5129
5130 return 0;
5131}
5132
5133static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
5134{
5135 if (igb_desc_unused(tx_ring) >= size)
5136 return 0;
5137 return __igb_maybe_stop_tx(tx_ring, size);
5138}
5139
5140static void igb_tx_map(struct igb_ring *tx_ring,
5141 struct igb_tx_buffer *first,
5142 const u8 hdr_len)
5143{
5144 struct sk_buff *skb = first->skb;
5145 struct igb_tx_buffer *tx_buffer;
5146 union e1000_adv_tx_desc *tx_desc;
5147 struct skb_frag_struct *frag;
5148 dma_addr_t dma;
5149 unsigned int data_len, size;
5150 u32 tx_flags = first->tx_flags;
5151 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
5152 u16 i = tx_ring->next_to_use;
5153
5154 tx_desc = IGB_TX_DESC(tx_ring, i);
5155
5156 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
5157
5158 size = skb_headlen(skb);
5159 data_len = skb->data_len;
5160
5161 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
5162
5163 tx_buffer = first;
5164
5165 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
5166 if (dma_mapping_error(tx_ring->dev, dma))
5167 goto dma_error;
5168
5169 /* record length, and DMA address */
5170 dma_unmap_len_set(tx_buffer, len, size);
5171 dma_unmap_addr_set(tx_buffer, dma, dma);
5172
5173 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5174
5175 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
5176 tx_desc->read.cmd_type_len =
5177 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
5178
5179 i++;
5180 tx_desc++;
5181 if (i == tx_ring->count) {
5182 tx_desc = IGB_TX_DESC(tx_ring, 0);
5183 i = 0;
5184 }
5185 tx_desc->read.olinfo_status = 0;
5186
5187 dma += IGB_MAX_DATA_PER_TXD;
5188 size -= IGB_MAX_DATA_PER_TXD;
5189
5190 tx_desc->read.buffer_addr = cpu_to_le64(dma);
5191 }
5192
5193 if (likely(!data_len))
5194 break;
5195
5196 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
5197
5198 i++;
5199 tx_desc++;
5200 if (i == tx_ring->count) {
5201 tx_desc = IGB_TX_DESC(tx_ring, 0);
5202 i = 0;
5203 }
5204 tx_desc->read.olinfo_status = 0;
5205
5206 size = skb_frag_size(frag);
5207 data_len -= size;
5208
5209 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
5210 size, DMA_TO_DEVICE);
5211
5212 tx_buffer = &tx_ring->tx_buffer_info[i];
5213 }
5214
5215 /* write last descriptor with RS and EOP bits */
5216 cmd_type |= size | IGB_TXD_DCMD;
5217 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
5218
5219 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
5220
5221 /* set the timestamp */
5222 first->time_stamp = jiffies;
5223
5224 /* Force memory writes to complete before letting h/w know there
5225 * are new descriptors to fetch. (Only applicable for weak-ordered
5226 * memory model archs, such as IA-64).
5227 *
5228 * We also need this memory barrier to make certain all of the
5229 * status bits have been updated before next_to_watch is written.
5230 */
5231 wmb();
5232
5233 /* set next_to_watch value indicating a packet is present */
5234 first->next_to_watch = tx_desc;
5235
5236 i++;
5237 if (i == tx_ring->count)
5238 i = 0;
5239
5240 tx_ring->next_to_use = i;
5241
5242 /* Make sure there is space in the ring for the next send. */
5243 igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
5244
5245 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
5246 writel(i, tx_ring->tail);
5247
5248 /* we need this if more than one processor can write to our tail
5249 * at a time, it synchronizes IO on IA64/Altix systems
5250 */
5251 mmiowb();
5252 }
5253 return;
5254
5255dma_error:
5256 dev_err(tx_ring->dev, "TX DMA map failed\n");
5257
5258 /* clear dma mappings for failed tx_buffer_info map */
5259 for (;;) {
5260 tx_buffer = &tx_ring->tx_buffer_info[i];
5261 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
5262 if (tx_buffer == first)
5263 break;
5264 if (i == 0)
5265 i = tx_ring->count;
5266 i--;
5267 }
5268
5269 tx_ring->next_to_use = i;
5270}
5271
5272netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
5273 struct igb_ring *tx_ring)
5274{
5275 struct igb_tx_buffer *first;
5276 int tso;
5277 u32 tx_flags = 0;
5278 unsigned short f;
5279 u16 count = TXD_USE_COUNT(skb_headlen(skb));
5280 __be16 protocol = vlan_get_protocol(skb);
5281 u8 hdr_len = 0;
5282
5283 /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
5284 * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
5285 * + 2 desc gap to keep tail from touching head,
5286 * + 1 desc for context descriptor,
5287 * otherwise try next time
5288 */
5289 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5290 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5291
5292 if (igb_maybe_stop_tx(tx_ring, count + 3)) {
5293 /* this is a hard error */
5294 return NETDEV_TX_BUSY;
5295 }
5296
5297 /* record the location of the first descriptor for this packet */
5298 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
5299 first->skb = skb;
5300 first->bytecount = skb->len;
5301 first->gso_segs = 1;
5302
5303 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
5304 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
5305
5306 if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
5307 &adapter->state)) {
5308 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5309 tx_flags |= IGB_TX_FLAGS_TSTAMP;
5310
5311 adapter->ptp_tx_skb = skb_get(skb);
5312 adapter->ptp_tx_start = jiffies;
5313 if (adapter->hw.mac.type == e1000_82576)
5314 schedule_work(&adapter->ptp_tx_work);
5315 }
5316 }
5317
5318 skb_tx_timestamp(skb);
5319
5320 if (skb_vlan_tag_present(skb)) {
5321 tx_flags |= IGB_TX_FLAGS_VLAN;
5322 tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
5323 }
5324
5325 /* record initial flags and protocol */
5326 first->tx_flags = tx_flags;
5327 first->protocol = protocol;
5328
5329 tso = igb_tso(tx_ring, first, &hdr_len);
5330 if (tso < 0)
5331 goto out_drop;
5332 else if (!tso)
5333 igb_tx_csum(tx_ring, first);
5334
5335 igb_tx_map(tx_ring, first, hdr_len);
5336
5337 return NETDEV_TX_OK;
5338
5339out_drop:
5340 igb_unmap_and_free_tx_resource(tx_ring, first);
5341
5342 return NETDEV_TX_OK;
5343}
5344
5345static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
5346 struct sk_buff *skb)
5347{
5348 unsigned int r_idx = skb->queue_mapping;
5349
5350 if (r_idx >= adapter->num_tx_queues)
5351 r_idx = r_idx % adapter->num_tx_queues;
5352
5353 return adapter->tx_ring[r_idx];
5354}
5355
5356static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
5357 struct net_device *netdev)
5358{
5359 struct igb_adapter *adapter = netdev_priv(netdev);
5360
5361 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
5362 * in order to meet this minimum size requirement.
5363 */
5364 if (skb_put_padto(skb, 17))
5365 return NETDEV_TX_OK;
5366
5367 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
5368}
5369
5370/**
5371 * igb_tx_timeout - Respond to a Tx Hang
5372 * @netdev: network interface device structure
5373 **/
5374static void igb_tx_timeout(struct net_device *netdev)
5375{
5376 struct igb_adapter *adapter = netdev_priv(netdev);
5377 struct e1000_hw *hw = &adapter->hw;
5378
5379 /* Do the reset outside of interrupt context */
5380 adapter->tx_timeout_count++;
5381
5382 if (hw->mac.type >= e1000_82580)
5383 hw->dev_spec._82575.global_device_reset = true;
5384
5385 schedule_work(&adapter->reset_task);
5386 wr32(E1000_EICS,
5387 (adapter->eims_enable_mask & ~adapter->eims_other));
5388}
5389
5390static void igb_reset_task(struct work_struct *work)
5391{
5392 struct igb_adapter *adapter;
5393 adapter = container_of(work, struct igb_adapter, reset_task);
5394
5395 igb_dump(adapter);
5396 netdev_err(adapter->netdev, "Reset adapter\n");
5397 igb_reinit_locked(adapter);
5398}
5399
5400/**
5401 * igb_get_stats64 - Get System Network Statistics
5402 * @netdev: network interface device structure
5403 * @stats: rtnl_link_stats64 pointer
5404 **/
5405static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
5406 struct rtnl_link_stats64 *stats)
5407{
5408 struct igb_adapter *adapter = netdev_priv(netdev);
5409
5410 spin_lock(&adapter->stats64_lock);
5411 igb_update_stats(adapter, &adapter->stats64);
5412 memcpy(stats, &adapter->stats64, sizeof(*stats));
5413 spin_unlock(&adapter->stats64_lock);
5414
5415 return stats;
5416}
5417
5418/**
5419 * igb_change_mtu - Change the Maximum Transfer Unit
5420 * @netdev: network interface device structure
5421 * @new_mtu: new value for maximum frame size
5422 *
5423 * Returns 0 on success, negative on failure
5424 **/
5425static int igb_change_mtu(struct net_device *netdev, int new_mtu)
5426{
5427 struct igb_adapter *adapter = netdev_priv(netdev);
5428 struct pci_dev *pdev = adapter->pdev;
5429 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5430
5431 /* adjust max frame to be at least the size of a standard frame */
5432 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5433 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5434
5435 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
5436 usleep_range(1000, 2000);
5437
5438 /* igb_down has a dependency on max_frame_size */
5439 adapter->max_frame_size = max_frame;
5440
5441 if (netif_running(netdev))
5442 igb_down(adapter);
5443
5444 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
5445 netdev->mtu, new_mtu);
5446 netdev->mtu = new_mtu;
5447
5448 if (netif_running(netdev))
5449 igb_up(adapter);
5450 else
5451 igb_reset(adapter);
5452
5453 clear_bit(__IGB_RESETTING, &adapter->state);
5454
5455 return 0;
5456}
5457
5458/**
5459 * igb_update_stats - Update the board statistics counters
5460 * @adapter: board private structure
5461 **/
5462void igb_update_stats(struct igb_adapter *adapter,
5463 struct rtnl_link_stats64 *net_stats)
5464{
5465 struct e1000_hw *hw = &adapter->hw;
5466 struct pci_dev *pdev = adapter->pdev;
5467 u32 reg, mpc;
5468 int i;
5469 u64 bytes, packets;
5470 unsigned int start;
5471 u64 _bytes, _packets;
5472
5473 /* Prevent stats update while adapter is being reset, or if the pci
5474 * connection is down.
5475 */
5476 if (adapter->link_speed == 0)
5477 return;
5478 if (pci_channel_offline(pdev))
5479 return;
5480
5481 bytes = 0;
5482 packets = 0;
5483
5484 rcu_read_lock();
5485 for (i = 0; i < adapter->num_rx_queues; i++) {
5486 struct igb_ring *ring = adapter->rx_ring[i];
5487 u32 rqdpc = rd32(E1000_RQDPC(i));
5488 if (hw->mac.type >= e1000_i210)
5489 wr32(E1000_RQDPC(i), 0);
5490
5491 if (rqdpc) {
5492 ring->rx_stats.drops += rqdpc;
5493 net_stats->rx_fifo_errors += rqdpc;
5494 }
5495
5496 do {
5497 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
5498 _bytes = ring->rx_stats.bytes;
5499 _packets = ring->rx_stats.packets;
5500 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
5501 bytes += _bytes;
5502 packets += _packets;
5503 }
5504
5505 net_stats->rx_bytes = bytes;
5506 net_stats->rx_packets = packets;
5507
5508 bytes = 0;
5509 packets = 0;
5510 for (i = 0; i < adapter->num_tx_queues; i++) {
5511 struct igb_ring *ring = adapter->tx_ring[i];
5512 do {
5513 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
5514 _bytes = ring->tx_stats.bytes;
5515 _packets = ring->tx_stats.packets;
5516 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
5517 bytes += _bytes;
5518 packets += _packets;
5519 }
5520 net_stats->tx_bytes = bytes;
5521 net_stats->tx_packets = packets;
5522 rcu_read_unlock();
5523
5524 /* read stats registers */
5525 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
5526 adapter->stats.gprc += rd32(E1000_GPRC);
5527 adapter->stats.gorc += rd32(E1000_GORCL);
5528 rd32(E1000_GORCH); /* clear GORCL */
5529 adapter->stats.bprc += rd32(E1000_BPRC);
5530 adapter->stats.mprc += rd32(E1000_MPRC);
5531 adapter->stats.roc += rd32(E1000_ROC);
5532
5533 adapter->stats.prc64 += rd32(E1000_PRC64);
5534 adapter->stats.prc127 += rd32(E1000_PRC127);
5535 adapter->stats.prc255 += rd32(E1000_PRC255);
5536 adapter->stats.prc511 += rd32(E1000_PRC511);
5537 adapter->stats.prc1023 += rd32(E1000_PRC1023);
5538 adapter->stats.prc1522 += rd32(E1000_PRC1522);
5539 adapter->stats.symerrs += rd32(E1000_SYMERRS);
5540 adapter->stats.sec += rd32(E1000_SEC);
5541
5542 mpc = rd32(E1000_MPC);
5543 adapter->stats.mpc += mpc;
5544 net_stats->rx_fifo_errors += mpc;
5545 adapter->stats.scc += rd32(E1000_SCC);
5546 adapter->stats.ecol += rd32(E1000_ECOL);
5547 adapter->stats.mcc += rd32(E1000_MCC);
5548 adapter->stats.latecol += rd32(E1000_LATECOL);
5549 adapter->stats.dc += rd32(E1000_DC);
5550 adapter->stats.rlec += rd32(E1000_RLEC);
5551 adapter->stats.xonrxc += rd32(E1000_XONRXC);
5552 adapter->stats.xontxc += rd32(E1000_XONTXC);
5553 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
5554 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
5555 adapter->stats.fcruc += rd32(E1000_FCRUC);
5556 adapter->stats.gptc += rd32(E1000_GPTC);
5557 adapter->stats.gotc += rd32(E1000_GOTCL);
5558 rd32(E1000_GOTCH); /* clear GOTCL */
5559 adapter->stats.rnbc += rd32(E1000_RNBC);
5560 adapter->stats.ruc += rd32(E1000_RUC);
5561 adapter->stats.rfc += rd32(E1000_RFC);
5562 adapter->stats.rjc += rd32(E1000_RJC);
5563 adapter->stats.tor += rd32(E1000_TORH);
5564 adapter->stats.tot += rd32(E1000_TOTH);
5565 adapter->stats.tpr += rd32(E1000_TPR);
5566
5567 adapter->stats.ptc64 += rd32(E1000_PTC64);
5568 adapter->stats.ptc127 += rd32(E1000_PTC127);
5569 adapter->stats.ptc255 += rd32(E1000_PTC255);
5570 adapter->stats.ptc511 += rd32(E1000_PTC511);
5571 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
5572 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
5573
5574 adapter->stats.mptc += rd32(E1000_MPTC);
5575 adapter->stats.bptc += rd32(E1000_BPTC);
5576
5577 adapter->stats.tpt += rd32(E1000_TPT);
5578 adapter->stats.colc += rd32(E1000_COLC);
5579
5580 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
5581 /* read internal phy specific stats */
5582 reg = rd32(E1000_CTRL_EXT);
5583 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
5584 adapter->stats.rxerrc += rd32(E1000_RXERRC);
5585
5586 /* this stat has invalid values on i210/i211 */
5587 if ((hw->mac.type != e1000_i210) &&
5588 (hw->mac.type != e1000_i211))
5589 adapter->stats.tncrs += rd32(E1000_TNCRS);
5590 }
5591
5592 adapter->stats.tsctc += rd32(E1000_TSCTC);
5593 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
5594
5595 adapter->stats.iac += rd32(E1000_IAC);
5596 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
5597 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
5598 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
5599 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
5600 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
5601 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
5602 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
5603 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
5604
5605 /* Fill out the OS statistics structure */
5606 net_stats->multicast = adapter->stats.mprc;
5607 net_stats->collisions = adapter->stats.colc;
5608
5609 /* Rx Errors */
5610
5611 /* RLEC on some newer hardware can be incorrect so build
5612 * our own version based on RUC and ROC
5613 */
5614 net_stats->rx_errors = adapter->stats.rxerrc +
5615 adapter->stats.crcerrs + adapter->stats.algnerrc +
5616 adapter->stats.ruc + adapter->stats.roc +
5617 adapter->stats.cexterr;
5618 net_stats->rx_length_errors = adapter->stats.ruc +
5619 adapter->stats.roc;
5620 net_stats->rx_crc_errors = adapter->stats.crcerrs;
5621 net_stats->rx_frame_errors = adapter->stats.algnerrc;
5622 net_stats->rx_missed_errors = adapter->stats.mpc;
5623
5624 /* Tx Errors */
5625 net_stats->tx_errors = adapter->stats.ecol +
5626 adapter->stats.latecol;
5627 net_stats->tx_aborted_errors = adapter->stats.ecol;
5628 net_stats->tx_window_errors = adapter->stats.latecol;
5629 net_stats->tx_carrier_errors = adapter->stats.tncrs;
5630
5631 /* Tx Dropped needs to be maintained elsewhere */
5632
5633 /* Management Stats */
5634 adapter->stats.mgptc += rd32(E1000_MGTPTC);
5635 adapter->stats.mgprc += rd32(E1000_MGTPRC);
5636 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
5637
5638 /* OS2BMC Stats */
5639 reg = rd32(E1000_MANC);
5640 if (reg & E1000_MANC_EN_BMC2OS) {
5641 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
5642 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
5643 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
5644 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
5645 }
5646}
5647
5648static void igb_tsync_interrupt(struct igb_adapter *adapter)
5649{
5650 struct e1000_hw *hw = &adapter->hw;
5651 struct ptp_clock_event event;
5652 struct timespec64 ts;
5653 u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
5654
5655 if (tsicr & TSINTR_SYS_WRAP) {
5656 event.type = PTP_CLOCK_PPS;
5657 if (adapter->ptp_caps.pps)
5658 ptp_clock_event(adapter->ptp_clock, &event);
5659 else
5660 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
5661 ack |= TSINTR_SYS_WRAP;
5662 }
5663
5664 if (tsicr & E1000_TSICR_TXTS) {
5665 /* retrieve hardware timestamp */
5666 schedule_work(&adapter->ptp_tx_work);
5667 ack |= E1000_TSICR_TXTS;
5668 }
5669
5670 if (tsicr & TSINTR_TT0) {
5671 spin_lock(&adapter->tmreg_lock);
5672 ts = timespec64_add(adapter->perout[0].start,
5673 adapter->perout[0].period);
5674 /* u32 conversion of tv_sec is safe until y2106 */
5675 wr32(E1000_TRGTTIML0, ts.tv_nsec);
5676 wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
5677 tsauxc = rd32(E1000_TSAUXC);
5678 tsauxc |= TSAUXC_EN_TT0;
5679 wr32(E1000_TSAUXC, tsauxc);
5680 adapter->perout[0].start = ts;
5681 spin_unlock(&adapter->tmreg_lock);
5682 ack |= TSINTR_TT0;
5683 }
5684
5685 if (tsicr & TSINTR_TT1) {
5686 spin_lock(&adapter->tmreg_lock);
5687 ts = timespec64_add(adapter->perout[1].start,
5688 adapter->perout[1].period);
5689 wr32(E1000_TRGTTIML1, ts.tv_nsec);
5690 wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
5691 tsauxc = rd32(E1000_TSAUXC);
5692 tsauxc |= TSAUXC_EN_TT1;
5693 wr32(E1000_TSAUXC, tsauxc);
5694 adapter->perout[1].start = ts;
5695 spin_unlock(&adapter->tmreg_lock);
5696 ack |= TSINTR_TT1;
5697 }
5698
5699 if (tsicr & TSINTR_AUTT0) {
5700 nsec = rd32(E1000_AUXSTMPL0);
5701 sec = rd32(E1000_AUXSTMPH0);
5702 event.type = PTP_CLOCK_EXTTS;
5703 event.index = 0;
5704 event.timestamp = sec * 1000000000ULL + nsec;
5705 ptp_clock_event(adapter->ptp_clock, &event);
5706 ack |= TSINTR_AUTT0;
5707 }
5708
5709 if (tsicr & TSINTR_AUTT1) {
5710 nsec = rd32(E1000_AUXSTMPL1);
5711 sec = rd32(E1000_AUXSTMPH1);
5712 event.type = PTP_CLOCK_EXTTS;
5713 event.index = 1;
5714 event.timestamp = sec * 1000000000ULL + nsec;
5715 ptp_clock_event(adapter->ptp_clock, &event);
5716 ack |= TSINTR_AUTT1;
5717 }
5718
5719 /* acknowledge the interrupts */
5720 wr32(E1000_TSICR, ack);
5721}
5722
5723static irqreturn_t igb_msix_other(int irq, void *data)
5724{
5725 struct igb_adapter *adapter = data;
5726 struct e1000_hw *hw = &adapter->hw;
5727 u32 icr = rd32(E1000_ICR);
5728 /* reading ICR causes bit 31 of EICR to be cleared */
5729
5730 if (icr & E1000_ICR_DRSTA)
5731 schedule_work(&adapter->reset_task);
5732
5733 if (icr & E1000_ICR_DOUTSYNC) {
5734 /* HW is reporting DMA is out of sync */
5735 adapter->stats.doosync++;
5736 /* The DMA Out of Sync is also indication of a spoof event
5737 * in IOV mode. Check the Wrong VM Behavior register to
5738 * see if it is really a spoof event.
5739 */
5740 igb_check_wvbr(adapter);
5741 }
5742
5743 /* Check for a mailbox event */
5744 if (icr & E1000_ICR_VMMB)
5745 igb_msg_task(adapter);
5746
5747 if (icr & E1000_ICR_LSC) {
5748 hw->mac.get_link_status = 1;
5749 /* guard against interrupt when we're going down */
5750 if (!test_bit(__IGB_DOWN, &adapter->state))
5751 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5752 }
5753
5754 if (icr & E1000_ICR_TS)
5755 igb_tsync_interrupt(adapter);
5756
5757 wr32(E1000_EIMS, adapter->eims_other);
5758
5759 return IRQ_HANDLED;
5760}
5761
5762static void igb_write_itr(struct igb_q_vector *q_vector)
5763{
5764 struct igb_adapter *adapter = q_vector->adapter;
5765 u32 itr_val = q_vector->itr_val & 0x7FFC;
5766
5767 if (!q_vector->set_itr)
5768 return;
5769
5770 if (!itr_val)
5771 itr_val = 0x4;
5772
5773 if (adapter->hw.mac.type == e1000_82575)
5774 itr_val |= itr_val << 16;
5775 else
5776 itr_val |= E1000_EITR_CNT_IGNR;
5777
5778 writel(itr_val, q_vector->itr_register);
5779 q_vector->set_itr = 0;
5780}
5781
5782static irqreturn_t igb_msix_ring(int irq, void *data)
5783{
5784 struct igb_q_vector *q_vector = data;
5785
5786 /* Write the ITR value calculated from the previous interrupt. */
5787 igb_write_itr(q_vector);
5788
5789 napi_schedule(&q_vector->napi);
5790
5791 return IRQ_HANDLED;
5792}
5793
5794#ifdef CONFIG_IGB_DCA
5795static void igb_update_tx_dca(struct igb_adapter *adapter,
5796 struct igb_ring *tx_ring,
5797 int cpu)
5798{
5799 struct e1000_hw *hw = &adapter->hw;
5800 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
5801
5802 if (hw->mac.type != e1000_82575)
5803 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5804
5805 /* We can enable relaxed ordering for reads, but not writes when
5806 * DCA is enabled. This is due to a known issue in some chipsets
5807 * which will cause the DCA tag to be cleared.
5808 */
5809 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
5810 E1000_DCA_TXCTRL_DATA_RRO_EN |
5811 E1000_DCA_TXCTRL_DESC_DCA_EN;
5812
5813 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
5814}
5815
5816static void igb_update_rx_dca(struct igb_adapter *adapter,
5817 struct igb_ring *rx_ring,
5818 int cpu)
5819{
5820 struct e1000_hw *hw = &adapter->hw;
5821 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
5822
5823 if (hw->mac.type != e1000_82575)
5824 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5825
5826 /* We can enable relaxed ordering for reads, but not writes when
5827 * DCA is enabled. This is due to a known issue in some chipsets
5828 * which will cause the DCA tag to be cleared.
5829 */
5830 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
5831 E1000_DCA_RXCTRL_DESC_DCA_EN;
5832
5833 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
5834}
5835
5836static void igb_update_dca(struct igb_q_vector *q_vector)
5837{
5838 struct igb_adapter *adapter = q_vector->adapter;
5839 int cpu = get_cpu();
5840
5841 if (q_vector->cpu == cpu)
5842 goto out_no_update;
5843
5844 if (q_vector->tx.ring)
5845 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
5846
5847 if (q_vector->rx.ring)
5848 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
5849
5850 q_vector->cpu = cpu;
5851out_no_update:
5852 put_cpu();
5853}
5854
5855static void igb_setup_dca(struct igb_adapter *adapter)
5856{
5857 struct e1000_hw *hw = &adapter->hw;
5858 int i;
5859
5860 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
5861 return;
5862
5863 /* Always use CB2 mode, difference is masked in the CB driver. */
5864 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
5865
5866 for (i = 0; i < adapter->num_q_vectors; i++) {
5867 adapter->q_vector[i]->cpu = -1;
5868 igb_update_dca(adapter->q_vector[i]);
5869 }
5870}
5871
5872static int __igb_notify_dca(struct device *dev, void *data)
5873{
5874 struct net_device *netdev = dev_get_drvdata(dev);
5875 struct igb_adapter *adapter = netdev_priv(netdev);
5876 struct pci_dev *pdev = adapter->pdev;
5877 struct e1000_hw *hw = &adapter->hw;
5878 unsigned long event = *(unsigned long *)data;
5879
5880 switch (event) {
5881 case DCA_PROVIDER_ADD:
5882 /* if already enabled, don't do it again */
5883 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
5884 break;
5885 if (dca_add_requester(dev) == 0) {
5886 adapter->flags |= IGB_FLAG_DCA_ENABLED;
5887 dev_info(&pdev->dev, "DCA enabled\n");
5888 igb_setup_dca(adapter);
5889 break;
5890 }
5891 /* Fall Through since DCA is disabled. */
5892 case DCA_PROVIDER_REMOVE:
5893 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
5894 /* without this a class_device is left
5895 * hanging around in the sysfs model
5896 */
5897 dca_remove_requester(dev);
5898 dev_info(&pdev->dev, "DCA disabled\n");
5899 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
5900 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
5901 }
5902 break;
5903 }
5904
5905 return 0;
5906}
5907
5908static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5909 void *p)
5910{
5911 int ret_val;
5912
5913 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5914 __igb_notify_dca);
5915
5916 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5917}
5918#endif /* CONFIG_IGB_DCA */
5919
5920#ifdef CONFIG_PCI_IOV
5921static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5922{
5923 unsigned char mac_addr[ETH_ALEN];
5924
5925 eth_zero_addr(mac_addr);
5926 igb_set_vf_mac(adapter, vf, mac_addr);
5927
5928 /* By default spoof check is enabled for all VFs */
5929 adapter->vf_data[vf].spoofchk_enabled = true;
5930
5931 return 0;
5932}
5933
5934#endif
5935static void igb_ping_all_vfs(struct igb_adapter *adapter)
5936{
5937 struct e1000_hw *hw = &adapter->hw;
5938 u32 ping;
5939 int i;
5940
5941 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5942 ping = E1000_PF_CONTROL_MSG;
5943 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
5944 ping |= E1000_VT_MSGTYPE_CTS;
5945 igb_write_mbx(hw, &ping, 1, i);
5946 }
5947}
5948
5949static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5950{
5951 struct e1000_hw *hw = &adapter->hw;
5952 u32 vmolr = rd32(E1000_VMOLR(vf));
5953 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5954
5955 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
5956 IGB_VF_FLAG_MULTI_PROMISC);
5957 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5958
5959 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5960 vmolr |= E1000_VMOLR_MPME;
5961 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
5962 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5963 } else {
5964 /* if we have hashes and we are clearing a multicast promisc
5965 * flag we need to write the hashes to the MTA as this step
5966 * was previously skipped
5967 */
5968 if (vf_data->num_vf_mc_hashes > 30) {
5969 vmolr |= E1000_VMOLR_MPME;
5970 } else if (vf_data->num_vf_mc_hashes) {
5971 int j;
5972
5973 vmolr |= E1000_VMOLR_ROMPE;
5974 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5975 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5976 }
5977 }
5978
5979 wr32(E1000_VMOLR(vf), vmolr);
5980
5981 /* there are flags left unprocessed, likely not supported */
5982 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5983 return -EINVAL;
5984
5985 return 0;
5986}
5987
5988static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5989 u32 *msgbuf, u32 vf)
5990{
5991 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5992 u16 *hash_list = (u16 *)&msgbuf[1];
5993 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5994 int i;
5995
5996 /* salt away the number of multicast addresses assigned
5997 * to this VF for later use to restore when the PF multi cast
5998 * list changes
5999 */
6000 vf_data->num_vf_mc_hashes = n;
6001
6002 /* only up to 30 hash values supported */
6003 if (n > 30)
6004 n = 30;
6005
6006 /* store the hashes for later use */
6007 for (i = 0; i < n; i++)
6008 vf_data->vf_mc_hashes[i] = hash_list[i];
6009
6010 /* Flush and reset the mta with the new values */
6011 igb_set_rx_mode(adapter->netdev);
6012
6013 return 0;
6014}
6015
6016static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
6017{
6018 struct e1000_hw *hw = &adapter->hw;
6019 struct vf_data_storage *vf_data;
6020 int i, j;
6021
6022 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6023 u32 vmolr = rd32(E1000_VMOLR(i));
6024
6025 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
6026
6027 vf_data = &adapter->vf_data[i];
6028
6029 if ((vf_data->num_vf_mc_hashes > 30) ||
6030 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
6031 vmolr |= E1000_VMOLR_MPME;
6032 } else if (vf_data->num_vf_mc_hashes) {
6033 vmolr |= E1000_VMOLR_ROMPE;
6034 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
6035 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
6036 }
6037 wr32(E1000_VMOLR(i), vmolr);
6038 }
6039}
6040
6041static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
6042{
6043 struct e1000_hw *hw = &adapter->hw;
6044 u32 pool_mask, vlvf_mask, i;
6045
6046 /* create mask for VF and other pools */
6047 pool_mask = E1000_VLVF_POOLSEL_MASK;
6048 vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
6049
6050 /* drop PF from pool bits */
6051 pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
6052 adapter->vfs_allocated_count);
6053
6054 /* Find the vlan filter for this id */
6055 for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
6056 u32 vlvf = rd32(E1000_VLVF(i));
6057 u32 vfta_mask, vid, vfta;
6058
6059 /* remove the vf from the pool */
6060 if (!(vlvf & vlvf_mask))
6061 continue;
6062
6063 /* clear out bit from VLVF */
6064 vlvf ^= vlvf_mask;
6065
6066 /* if other pools are present, just remove ourselves */
6067 if (vlvf & pool_mask)
6068 goto update_vlvfb;
6069
6070 /* if PF is present, leave VFTA */
6071 if (vlvf & E1000_VLVF_POOLSEL_MASK)
6072 goto update_vlvf;
6073
6074 vid = vlvf & E1000_VLVF_VLANID_MASK;
6075 vfta_mask = BIT(vid % 32);
6076
6077 /* clear bit from VFTA */
6078 vfta = adapter->shadow_vfta[vid / 32];
6079 if (vfta & vfta_mask)
6080 hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
6081update_vlvf:
6082 /* clear pool selection enable */
6083 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6084 vlvf &= E1000_VLVF_POOLSEL_MASK;
6085 else
6086 vlvf = 0;
6087update_vlvfb:
6088 /* clear pool bits */
6089 wr32(E1000_VLVF(i), vlvf);
6090 }
6091}
6092
6093static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
6094{
6095 u32 vlvf;
6096 int idx;
6097
6098 /* short cut the special case */
6099 if (vlan == 0)
6100 return 0;
6101
6102 /* Search for the VLAN id in the VLVF entries */
6103 for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
6104 vlvf = rd32(E1000_VLVF(idx));
6105 if ((vlvf & VLAN_VID_MASK) == vlan)
6106 break;
6107 }
6108
6109 return idx;
6110}
6111
6112static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
6113{
6114 struct e1000_hw *hw = &adapter->hw;
6115 u32 bits, pf_id;
6116 int idx;
6117
6118 idx = igb_find_vlvf_entry(hw, vid);
6119 if (!idx)
6120 return;
6121
6122 /* See if any other pools are set for this VLAN filter
6123 * entry other than the PF.
6124 */
6125 pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
6126 bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
6127 bits &= rd32(E1000_VLVF(idx));
6128
6129 /* Disable the filter so this falls into the default pool. */
6130 if (!bits) {
6131 if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
6132 wr32(E1000_VLVF(idx), BIT(pf_id));
6133 else
6134 wr32(E1000_VLVF(idx), 0);
6135 }
6136}
6137
6138static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
6139 bool add, u32 vf)
6140{
6141 int pf_id = adapter->vfs_allocated_count;
6142 struct e1000_hw *hw = &adapter->hw;
6143 int err;
6144
6145 /* If VLAN overlaps with one the PF is currently monitoring make
6146 * sure that we are able to allocate a VLVF entry. This may be
6147 * redundant but it guarantees PF will maintain visibility to
6148 * the VLAN.
6149 */
6150 if (add && test_bit(vid, adapter->active_vlans)) {
6151 err = igb_vfta_set(hw, vid, pf_id, true, false);
6152 if (err)
6153 return err;
6154 }
6155
6156 err = igb_vfta_set(hw, vid, vf, add, false);
6157
6158 if (add && !err)
6159 return err;
6160
6161 /* If we failed to add the VF VLAN or we are removing the VF VLAN
6162 * we may need to drop the PF pool bit in order to allow us to free
6163 * up the VLVF resources.
6164 */
6165 if (test_bit(vid, adapter->active_vlans) ||
6166 (adapter->flags & IGB_FLAG_VLAN_PROMISC))
6167 igb_update_pf_vlvf(adapter, vid);
6168
6169 return err;
6170}
6171
6172static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
6173{
6174 struct e1000_hw *hw = &adapter->hw;
6175
6176 if (vid)
6177 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
6178 else
6179 wr32(E1000_VMVIR(vf), 0);
6180}
6181
6182static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
6183 u16 vlan, u8 qos)
6184{
6185 int err;
6186
6187 err = igb_set_vf_vlan(adapter, vlan, true, vf);
6188 if (err)
6189 return err;
6190
6191 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
6192 igb_set_vmolr(adapter, vf, !vlan);
6193
6194 /* revoke access to previous VLAN */
6195 if (vlan != adapter->vf_data[vf].pf_vlan)
6196 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6197 false, vf);
6198
6199 adapter->vf_data[vf].pf_vlan = vlan;
6200 adapter->vf_data[vf].pf_qos = qos;
6201 igb_set_vf_vlan_strip(adapter, vf, true);
6202 dev_info(&adapter->pdev->dev,
6203 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
6204 if (test_bit(__IGB_DOWN, &adapter->state)) {
6205 dev_warn(&adapter->pdev->dev,
6206 "The VF VLAN has been set, but the PF device is not up.\n");
6207 dev_warn(&adapter->pdev->dev,
6208 "Bring the PF device up before attempting to use the VF device.\n");
6209 }
6210
6211 return err;
6212}
6213
6214static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
6215{
6216 /* Restore tagless access via VLAN 0 */
6217 igb_set_vf_vlan(adapter, 0, true, vf);
6218
6219 igb_set_vmvir(adapter, 0, vf);
6220 igb_set_vmolr(adapter, vf, true);
6221
6222 /* Remove any PF assigned VLAN */
6223 if (adapter->vf_data[vf].pf_vlan)
6224 igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
6225 false, vf);
6226
6227 adapter->vf_data[vf].pf_vlan = 0;
6228 adapter->vf_data[vf].pf_qos = 0;
6229 igb_set_vf_vlan_strip(adapter, vf, false);
6230
6231 return 0;
6232}
6233
6234static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
6235 u16 vlan, u8 qos, __be16 vlan_proto)
6236{
6237 struct igb_adapter *adapter = netdev_priv(netdev);
6238
6239 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
6240 return -EINVAL;
6241
6242 if (vlan_proto != htons(ETH_P_8021Q))
6243 return -EPROTONOSUPPORT;
6244
6245 return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
6246 igb_disable_port_vlan(adapter, vf);
6247}
6248
6249static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
6250{
6251 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
6252 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
6253 int ret;
6254
6255 if (adapter->vf_data[vf].pf_vlan)
6256 return -1;
6257
6258 /* VLAN 0 is a special case, don't allow it to be removed */
6259 if (!vid && !add)
6260 return 0;
6261
6262 ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
6263 if (!ret)
6264 igb_set_vf_vlan_strip(adapter, vf, !!vid);
6265 return ret;
6266}
6267
6268static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
6269{
6270 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6271
6272 /* clear flags - except flag that indicates PF has set the MAC */
6273 vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
6274 vf_data->last_nack = jiffies;
6275
6276 /* reset vlans for device */
6277 igb_clear_vf_vfta(adapter, vf);
6278 igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
6279 igb_set_vmvir(adapter, vf_data->pf_vlan |
6280 (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
6281 igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
6282 igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
6283
6284 /* reset multicast table array for vf */
6285 adapter->vf_data[vf].num_vf_mc_hashes = 0;
6286
6287 /* Flush and reset the mta with the new values */
6288 igb_set_rx_mode(adapter->netdev);
6289}
6290
6291static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
6292{
6293 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6294
6295 /* clear mac address as we were hotplug removed/added */
6296 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
6297 eth_zero_addr(vf_mac);
6298
6299 /* process remaining reset events */
6300 igb_vf_reset(adapter, vf);
6301}
6302
6303static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
6304{
6305 struct e1000_hw *hw = &adapter->hw;
6306 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
6307 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
6308 u32 reg, msgbuf[3];
6309 u8 *addr = (u8 *)(&msgbuf[1]);
6310
6311 /* process all the same items cleared in a function level reset */
6312 igb_vf_reset(adapter, vf);
6313
6314 /* set vf mac address */
6315 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
6316
6317 /* enable transmit and receive for vf */
6318 reg = rd32(E1000_VFTE);
6319 wr32(E1000_VFTE, reg | BIT(vf));
6320 reg = rd32(E1000_VFRE);
6321 wr32(E1000_VFRE, reg | BIT(vf));
6322
6323 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
6324
6325 /* reply to reset with ack and vf mac address */
6326 if (!is_zero_ether_addr(vf_mac)) {
6327 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
6328 memcpy(addr, vf_mac, ETH_ALEN);
6329 } else {
6330 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
6331 }
6332 igb_write_mbx(hw, msgbuf, 3, vf);
6333}
6334
6335static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
6336{
6337 /* The VF MAC Address is stored in a packed array of bytes
6338 * starting at the second 32 bit word of the msg array
6339 */
6340 unsigned char *addr = (char *)&msg[1];
6341 int err = -1;
6342
6343 if (is_valid_ether_addr(addr))
6344 err = igb_set_vf_mac(adapter, vf, addr);
6345
6346 return err;
6347}
6348
6349static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
6350{
6351 struct e1000_hw *hw = &adapter->hw;
6352 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6353 u32 msg = E1000_VT_MSGTYPE_NACK;
6354
6355 /* if device isn't clear to send it shouldn't be reading either */
6356 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
6357 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
6358 igb_write_mbx(hw, &msg, 1, vf);
6359 vf_data->last_nack = jiffies;
6360 }
6361}
6362
6363static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
6364{
6365 struct pci_dev *pdev = adapter->pdev;
6366 u32 msgbuf[E1000_VFMAILBOX_SIZE];
6367 struct e1000_hw *hw = &adapter->hw;
6368 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
6369 s32 retval;
6370
6371 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
6372
6373 if (retval) {
6374 /* if receive failed revoke VF CTS stats and restart init */
6375 dev_err(&pdev->dev, "Error receiving message from VF\n");
6376 vf_data->flags &= ~IGB_VF_FLAG_CTS;
6377 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
6378 return;
6379 goto out;
6380 }
6381
6382 /* this is a message we already processed, do nothing */
6383 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
6384 return;
6385
6386 /* until the vf completes a reset it should not be
6387 * allowed to start any configuration.
6388 */
6389 if (msgbuf[0] == E1000_VF_RESET) {
6390 igb_vf_reset_msg(adapter, vf);
6391 return;
6392 }
6393
6394 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
6395 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
6396 return;
6397 retval = -1;
6398 goto out;
6399 }
6400
6401 switch ((msgbuf[0] & 0xFFFF)) {
6402 case E1000_VF_SET_MAC_ADDR:
6403 retval = -EINVAL;
6404 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
6405 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
6406 else
6407 dev_warn(&pdev->dev,
6408 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
6409 vf);
6410 break;
6411 case E1000_VF_SET_PROMISC:
6412 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
6413 break;
6414 case E1000_VF_SET_MULTICAST:
6415 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
6416 break;
6417 case E1000_VF_SET_LPE:
6418 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
6419 break;
6420 case E1000_VF_SET_VLAN:
6421 retval = -1;
6422 if (vf_data->pf_vlan)
6423 dev_warn(&pdev->dev,
6424 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
6425 vf);
6426 else
6427 retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
6428 break;
6429 default:
6430 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
6431 retval = -1;
6432 break;
6433 }
6434
6435 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
6436out:
6437 /* notify the VF of the results of what it sent us */
6438 if (retval)
6439 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
6440 else
6441 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
6442
6443 igb_write_mbx(hw, msgbuf, 1, vf);
6444}
6445
6446static void igb_msg_task(struct igb_adapter *adapter)
6447{
6448 struct e1000_hw *hw = &adapter->hw;
6449 u32 vf;
6450
6451 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
6452 /* process any reset requests */
6453 if (!igb_check_for_rst(hw, vf))
6454 igb_vf_reset_event(adapter, vf);
6455
6456 /* process any messages pending */
6457 if (!igb_check_for_msg(hw, vf))
6458 igb_rcv_msg_from_vf(adapter, vf);
6459
6460 /* process any acks */
6461 if (!igb_check_for_ack(hw, vf))
6462 igb_rcv_ack_from_vf(adapter, vf);
6463 }
6464}
6465
6466/**
6467 * igb_set_uta - Set unicast filter table address
6468 * @adapter: board private structure
6469 * @set: boolean indicating if we are setting or clearing bits
6470 *
6471 * The unicast table address is a register array of 32-bit registers.
6472 * The table is meant to be used in a way similar to how the MTA is used
6473 * however due to certain limitations in the hardware it is necessary to
6474 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
6475 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
6476 **/
6477static void igb_set_uta(struct igb_adapter *adapter, bool set)
6478{
6479 struct e1000_hw *hw = &adapter->hw;
6480 u32 uta = set ? ~0 : 0;
6481 int i;
6482
6483 /* we only need to do this if VMDq is enabled */
6484 if (!adapter->vfs_allocated_count)
6485 return;
6486
6487 for (i = hw->mac.uta_reg_count; i--;)
6488 array_wr32(E1000_UTA, i, uta);
6489}
6490
6491/**
6492 * igb_intr_msi - Interrupt Handler
6493 * @irq: interrupt number
6494 * @data: pointer to a network interface device structure
6495 **/
6496static irqreturn_t igb_intr_msi(int irq, void *data)
6497{
6498 struct igb_adapter *adapter = data;
6499 struct igb_q_vector *q_vector = adapter->q_vector[0];
6500 struct e1000_hw *hw = &adapter->hw;
6501 /* read ICR disables interrupts using IAM */
6502 u32 icr = rd32(E1000_ICR);
6503
6504 igb_write_itr(q_vector);
6505
6506 if (icr & E1000_ICR_DRSTA)
6507 schedule_work(&adapter->reset_task);
6508
6509 if (icr & E1000_ICR_DOUTSYNC) {
6510 /* HW is reporting DMA is out of sync */
6511 adapter->stats.doosync++;
6512 }
6513
6514 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
6515 hw->mac.get_link_status = 1;
6516 if (!test_bit(__IGB_DOWN, &adapter->state))
6517 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6518 }
6519
6520 if (icr & E1000_ICR_TS)
6521 igb_tsync_interrupt(adapter);
6522
6523 napi_schedule(&q_vector->napi);
6524
6525 return IRQ_HANDLED;
6526}
6527
6528/**
6529 * igb_intr - Legacy Interrupt Handler
6530 * @irq: interrupt number
6531 * @data: pointer to a network interface device structure
6532 **/
6533static irqreturn_t igb_intr(int irq, void *data)
6534{
6535 struct igb_adapter *adapter = data;
6536 struct igb_q_vector *q_vector = adapter->q_vector[0];
6537 struct e1000_hw *hw = &adapter->hw;
6538 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
6539 * need for the IMC write
6540 */
6541 u32 icr = rd32(E1000_ICR);
6542
6543 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
6544 * not set, then the adapter didn't send an interrupt
6545 */
6546 if (!(icr & E1000_ICR_INT_ASSERTED))
6547 return IRQ_NONE;
6548
6549 igb_write_itr(q_vector);
6550
6551 if (icr & E1000_ICR_DRSTA)
6552 schedule_work(&adapter->reset_task);
6553
6554 if (icr & E1000_ICR_DOUTSYNC) {
6555 /* HW is reporting DMA is out of sync */
6556 adapter->stats.doosync++;
6557 }
6558
6559 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
6560 hw->mac.get_link_status = 1;
6561 /* guard against interrupt when we're going down */
6562 if (!test_bit(__IGB_DOWN, &adapter->state))
6563 mod_timer(&adapter->watchdog_timer, jiffies + 1);
6564 }
6565
6566 if (icr & E1000_ICR_TS)
6567 igb_tsync_interrupt(adapter);
6568
6569 napi_schedule(&q_vector->napi);
6570
6571 return IRQ_HANDLED;
6572}
6573
6574static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
6575{
6576 struct igb_adapter *adapter = q_vector->adapter;
6577 struct e1000_hw *hw = &adapter->hw;
6578
6579 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
6580 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
6581 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
6582 igb_set_itr(q_vector);
6583 else
6584 igb_update_ring_itr(q_vector);
6585 }
6586
6587 if (!test_bit(__IGB_DOWN, &adapter->state)) {
6588 if (adapter->flags & IGB_FLAG_HAS_MSIX)
6589 wr32(E1000_EIMS, q_vector->eims_value);
6590 else
6591 igb_irq_enable(adapter);
6592 }
6593}
6594
6595/**
6596 * igb_poll - NAPI Rx polling callback
6597 * @napi: napi polling structure
6598 * @budget: count of how many packets we should handle
6599 **/
6600static int igb_poll(struct napi_struct *napi, int budget)
6601{
6602 struct igb_q_vector *q_vector = container_of(napi,
6603 struct igb_q_vector,
6604 napi);
6605 bool clean_complete = true;
6606 int work_done = 0;
6607
6608#ifdef CONFIG_IGB_DCA
6609 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
6610 igb_update_dca(q_vector);
6611#endif
6612 if (q_vector->tx.ring)
6613 clean_complete = igb_clean_tx_irq(q_vector, budget);
6614
6615 if (q_vector->rx.ring) {
6616 int cleaned = igb_clean_rx_irq(q_vector, budget);
6617
6618 work_done += cleaned;
6619 if (cleaned >= budget)
6620 clean_complete = false;
6621 }
6622
6623 /* If all work not completed, return budget and keep polling */
6624 if (!clean_complete)
6625 return budget;
6626
6627 /* If not enough Rx work done, exit the polling mode */
6628 napi_complete_done(napi, work_done);
6629 igb_ring_irq_enable(q_vector);
6630
6631 return 0;
6632}
6633
6634/**
6635 * igb_clean_tx_irq - Reclaim resources after transmit completes
6636 * @q_vector: pointer to q_vector containing needed info
6637 * @napi_budget: Used to determine if we are in netpoll
6638 *
6639 * returns true if ring is completely cleaned
6640 **/
6641static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
6642{
6643 struct igb_adapter *adapter = q_vector->adapter;
6644 struct igb_ring *tx_ring = q_vector->tx.ring;
6645 struct igb_tx_buffer *tx_buffer;
6646 union e1000_adv_tx_desc *tx_desc;
6647 unsigned int total_bytes = 0, total_packets = 0;
6648 unsigned int budget = q_vector->tx.work_limit;
6649 unsigned int i = tx_ring->next_to_clean;
6650
6651 if (test_bit(__IGB_DOWN, &adapter->state))
6652 return true;
6653
6654 tx_buffer = &tx_ring->tx_buffer_info[i];
6655 tx_desc = IGB_TX_DESC(tx_ring, i);
6656 i -= tx_ring->count;
6657
6658 do {
6659 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
6660
6661 /* if next_to_watch is not set then there is no work pending */
6662 if (!eop_desc)
6663 break;
6664
6665 /* prevent any other reads prior to eop_desc */
6666 read_barrier_depends();
6667
6668 /* if DD is not set pending work has not been completed */
6669 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
6670 break;
6671
6672 /* clear next_to_watch to prevent false hangs */
6673 tx_buffer->next_to_watch = NULL;
6674
6675 /* update the statistics for this packet */
6676 total_bytes += tx_buffer->bytecount;
6677 total_packets += tx_buffer->gso_segs;
6678
6679 /* free the skb */
6680 napi_consume_skb(tx_buffer->skb, napi_budget);
6681
6682 /* unmap skb header data */
6683 dma_unmap_single(tx_ring->dev,
6684 dma_unmap_addr(tx_buffer, dma),
6685 dma_unmap_len(tx_buffer, len),
6686 DMA_TO_DEVICE);
6687
6688 /* clear tx_buffer data */
6689 tx_buffer->skb = NULL;
6690 dma_unmap_len_set(tx_buffer, len, 0);
6691
6692 /* clear last DMA location and unmap remaining buffers */
6693 while (tx_desc != eop_desc) {
6694 tx_buffer++;
6695 tx_desc++;
6696 i++;
6697 if (unlikely(!i)) {
6698 i -= tx_ring->count;
6699 tx_buffer = tx_ring->tx_buffer_info;
6700 tx_desc = IGB_TX_DESC(tx_ring, 0);
6701 }
6702
6703 /* unmap any remaining paged data */
6704 if (dma_unmap_len(tx_buffer, len)) {
6705 dma_unmap_page(tx_ring->dev,
6706 dma_unmap_addr(tx_buffer, dma),
6707 dma_unmap_len(tx_buffer, len),
6708 DMA_TO_DEVICE);
6709 dma_unmap_len_set(tx_buffer, len, 0);
6710 }
6711 }
6712
6713 /* move us one more past the eop_desc for start of next pkt */
6714 tx_buffer++;
6715 tx_desc++;
6716 i++;
6717 if (unlikely(!i)) {
6718 i -= tx_ring->count;
6719 tx_buffer = tx_ring->tx_buffer_info;
6720 tx_desc = IGB_TX_DESC(tx_ring, 0);
6721 }
6722
6723 /* issue prefetch for next Tx descriptor */
6724 prefetch(tx_desc);
6725
6726 /* update budget accounting */
6727 budget--;
6728 } while (likely(budget));
6729
6730 netdev_tx_completed_queue(txring_txq(tx_ring),
6731 total_packets, total_bytes);
6732 i += tx_ring->count;
6733 tx_ring->next_to_clean = i;
6734 u64_stats_update_begin(&tx_ring->tx_syncp);
6735 tx_ring->tx_stats.bytes += total_bytes;
6736 tx_ring->tx_stats.packets += total_packets;
6737 u64_stats_update_end(&tx_ring->tx_syncp);
6738 q_vector->tx.total_bytes += total_bytes;
6739 q_vector->tx.total_packets += total_packets;
6740
6741 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
6742 struct e1000_hw *hw = &adapter->hw;
6743
6744 /* Detect a transmit hang in hardware, this serializes the
6745 * check with the clearing of time_stamp and movement of i
6746 */
6747 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6748 if (tx_buffer->next_to_watch &&
6749 time_after(jiffies, tx_buffer->time_stamp +
6750 (adapter->tx_timeout_factor * HZ)) &&
6751 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
6752
6753 /* detected Tx unit hang */
6754 dev_err(tx_ring->dev,
6755 "Detected Tx Unit Hang\n"
6756 " Tx Queue <%d>\n"
6757 " TDH <%x>\n"
6758 " TDT <%x>\n"
6759 " next_to_use <%x>\n"
6760 " next_to_clean <%x>\n"
6761 "buffer_info[next_to_clean]\n"
6762 " time_stamp <%lx>\n"
6763 " next_to_watch <%p>\n"
6764 " jiffies <%lx>\n"
6765 " desc.status <%x>\n",
6766 tx_ring->queue_index,
6767 rd32(E1000_TDH(tx_ring->reg_idx)),
6768 readl(tx_ring->tail),
6769 tx_ring->next_to_use,
6770 tx_ring->next_to_clean,
6771 tx_buffer->time_stamp,
6772 tx_buffer->next_to_watch,
6773 jiffies,
6774 tx_buffer->next_to_watch->wb.status);
6775 netif_stop_subqueue(tx_ring->netdev,
6776 tx_ring->queue_index);
6777
6778 /* we are about to reset, no point in enabling stuff */
6779 return true;
6780 }
6781 }
6782
6783#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6784 if (unlikely(total_packets &&
6785 netif_carrier_ok(tx_ring->netdev) &&
6786 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6787 /* Make sure that anybody stopping the queue after this
6788 * sees the new next_to_clean.
6789 */
6790 smp_mb();
6791 if (__netif_subqueue_stopped(tx_ring->netdev,
6792 tx_ring->queue_index) &&
6793 !(test_bit(__IGB_DOWN, &adapter->state))) {
6794 netif_wake_subqueue(tx_ring->netdev,
6795 tx_ring->queue_index);
6796
6797 u64_stats_update_begin(&tx_ring->tx_syncp);
6798 tx_ring->tx_stats.restart_queue++;
6799 u64_stats_update_end(&tx_ring->tx_syncp);
6800 }
6801 }
6802
6803 return !!budget;
6804}
6805
6806/**
6807 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6808 * @rx_ring: rx descriptor ring to store buffers on
6809 * @old_buff: donor buffer to have page reused
6810 *
6811 * Synchronizes page for reuse by the adapter
6812 **/
6813static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6814 struct igb_rx_buffer *old_buff)
6815{
6816 struct igb_rx_buffer *new_buff;
6817 u16 nta = rx_ring->next_to_alloc;
6818
6819 new_buff = &rx_ring->rx_buffer_info[nta];
6820
6821 /* update, and store next to alloc */
6822 nta++;
6823 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
6824
6825 /* transfer page from old buffer to new buffer */
6826 *new_buff = *old_buff;
6827}
6828
6829static inline bool igb_page_is_reserved(struct page *page)
6830{
6831 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
6832}
6833
6834static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6835 struct page *page,
6836 unsigned int truesize)
6837{
6838 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
6839
6840 /* avoid re-using remote pages */
6841 if (unlikely(igb_page_is_reserved(page)))
6842 return false;
6843
6844#if (PAGE_SIZE < 8192)
6845 /* if we are only owner of page we can reuse it */
6846 if (unlikely(page_ref_count(page) != pagecnt_bias))
6847 return false;
6848
6849 /* flip page offset to other buffer */
6850 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
6851#else
6852 /* move offset up to the next cache line */
6853 rx_buffer->page_offset += truesize;
6854
6855 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
6856 return false;
6857#endif
6858
6859 /* If we have drained the page fragment pool we need to update
6860 * the pagecnt_bias and page count so that we fully restock the
6861 * number of references the driver holds.
6862 */
6863 if (unlikely(pagecnt_bias == 1)) {
6864 page_ref_add(page, USHRT_MAX);
6865 rx_buffer->pagecnt_bias = USHRT_MAX;
6866 }
6867
6868 return true;
6869}
6870
6871/**
6872 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6873 * @rx_ring: rx descriptor ring to transact packets on
6874 * @rx_buffer: buffer containing page to add
6875 * @rx_desc: descriptor containing length of buffer written by hardware
6876 * @skb: sk_buff to place the data into
6877 *
6878 * This function will add the data contained in rx_buffer->page to the skb.
6879 * This is done either through a direct copy if the data in the buffer is
6880 * less than the skb header size, otherwise it will just attach the page as
6881 * a frag to the skb.
6882 *
6883 * The function will then update the page offset if necessary and return
6884 * true if the buffer can be reused by the adapter.
6885 **/
6886static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6887 struct igb_rx_buffer *rx_buffer,
6888 unsigned int size,
6889 union e1000_adv_rx_desc *rx_desc,
6890 struct sk_buff *skb)
6891{
6892 struct page *page = rx_buffer->page;
6893 unsigned char *va = page_address(page) + rx_buffer->page_offset;
6894#if (PAGE_SIZE < 8192)
6895 unsigned int truesize = IGB_RX_BUFSZ;
6896#else
6897 unsigned int truesize = SKB_DATA_ALIGN(size);
6898#endif
6899 unsigned int pull_len;
6900
6901 if (unlikely(skb_is_nonlinear(skb)))
6902 goto add_tail_frag;
6903
6904 if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
6905 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
6906 va += IGB_TS_HDR_LEN;
6907 size -= IGB_TS_HDR_LEN;
6908 }
6909
6910 if (likely(size <= IGB_RX_HDR_LEN)) {
6911 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6912
6913 /* page is not reserved, we can reuse buffer as-is */
6914 if (likely(!igb_page_is_reserved(page)))
6915 return true;
6916
6917 /* this page cannot be reused so discard it */
6918 return false;
6919 }
6920
6921 /* we need the header to contain the greater of either ETH_HLEN or
6922 * 60 bytes if the skb->len is less than 60 for skb_pad.
6923 */
6924 pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
6925
6926 /* align pull length to size of long to optimize memcpy performance */
6927 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
6928
6929 /* update all of the pointers */
6930 va += pull_len;
6931 size -= pull_len;
6932
6933add_tail_frag:
6934 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
6935 (unsigned long)va & ~PAGE_MASK, size, truesize);
6936
6937 return igb_can_reuse_rx_page(rx_buffer, page, truesize);
6938}
6939
6940static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6941 union e1000_adv_rx_desc *rx_desc,
6942 struct sk_buff *skb)
6943{
6944 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
6945 struct igb_rx_buffer *rx_buffer;
6946 struct page *page;
6947
6948 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6949 page = rx_buffer->page;
6950 prefetchw(page);
6951
6952 /* we are reusing so sync this buffer for CPU use */
6953 dma_sync_single_range_for_cpu(rx_ring->dev,
6954 rx_buffer->dma,
6955 rx_buffer->page_offset,
6956 size,
6957 DMA_FROM_DEVICE);
6958
6959 if (likely(!skb)) {
6960 void *page_addr = page_address(page) +
6961 rx_buffer->page_offset;
6962
6963 /* prefetch first cache line of first page */
6964 prefetch(page_addr);
6965#if L1_CACHE_BYTES < 128
6966 prefetch(page_addr + L1_CACHE_BYTES);
6967#endif
6968
6969 /* allocate a skb to store the frags */
6970 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
6971 if (unlikely(!skb)) {
6972 rx_ring->rx_stats.alloc_failed++;
6973 return NULL;
6974 }
6975
6976 /* we will be copying header into skb->data in
6977 * pskb_may_pull so it is in our interest to prefetch
6978 * it now to avoid a possible cache miss
6979 */
6980 prefetchw(skb->data);
6981 }
6982
6983 /* pull page into skb */
6984 if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
6985 /* hand second half of page back to the ring */
6986 igb_reuse_rx_page(rx_ring, rx_buffer);
6987 } else {
6988 /* We are not reusing the buffer so unmap it and free
6989 * any references we are holding to it
6990 */
6991 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
6992 PAGE_SIZE, DMA_FROM_DEVICE,
6993 DMA_ATTR_SKIP_CPU_SYNC);
6994 __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
6995 }
6996
6997 /* clear contents of rx_buffer */
6998 rx_buffer->page = NULL;
6999
7000 return skb;
7001}
7002
7003static inline void igb_rx_checksum(struct igb_ring *ring,
7004 union e1000_adv_rx_desc *rx_desc,
7005 struct sk_buff *skb)
7006{
7007 skb_checksum_none_assert(skb);
7008
7009 /* Ignore Checksum bit is set */
7010 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
7011 return;
7012
7013 /* Rx checksum disabled via ethtool */
7014 if (!(ring->netdev->features & NETIF_F_RXCSUM))
7015 return;
7016
7017 /* TCP/UDP checksum error bit is set */
7018 if (igb_test_staterr(rx_desc,
7019 E1000_RXDEXT_STATERR_TCPE |
7020 E1000_RXDEXT_STATERR_IPE)) {
7021 /* work around errata with sctp packets where the TCPE aka
7022 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
7023 * packets, (aka let the stack check the crc32c)
7024 */
7025 if (!((skb->len == 60) &&
7026 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
7027 u64_stats_update_begin(&ring->rx_syncp);
7028 ring->rx_stats.csum_err++;
7029 u64_stats_update_end(&ring->rx_syncp);
7030 }
7031 /* let the stack verify checksum errors */
7032 return;
7033 }
7034 /* It must be a TCP or UDP packet with a valid checksum */
7035 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
7036 E1000_RXD_STAT_UDPCS))
7037 skb->ip_summed = CHECKSUM_UNNECESSARY;
7038
7039 dev_dbg(ring->dev, "cksum success: bits %08X\n",
7040 le32_to_cpu(rx_desc->wb.upper.status_error));
7041}
7042
7043static inline void igb_rx_hash(struct igb_ring *ring,
7044 union e1000_adv_rx_desc *rx_desc,
7045 struct sk_buff *skb)
7046{
7047 if (ring->netdev->features & NETIF_F_RXHASH)
7048 skb_set_hash(skb,
7049 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
7050 PKT_HASH_TYPE_L3);
7051}
7052
7053/**
7054 * igb_is_non_eop - process handling of non-EOP buffers
7055 * @rx_ring: Rx ring being processed
7056 * @rx_desc: Rx descriptor for current buffer
7057 * @skb: current socket buffer containing buffer in progress
7058 *
7059 * This function updates next to clean. If the buffer is an EOP buffer
7060 * this function exits returning false, otherwise it will place the
7061 * sk_buff in the next buffer to be chained and return true indicating
7062 * that this is in fact a non-EOP buffer.
7063 **/
7064static bool igb_is_non_eop(struct igb_ring *rx_ring,
7065 union e1000_adv_rx_desc *rx_desc)
7066{
7067 u32 ntc = rx_ring->next_to_clean + 1;
7068
7069 /* fetch, update, and store next to clean */
7070 ntc = (ntc < rx_ring->count) ? ntc : 0;
7071 rx_ring->next_to_clean = ntc;
7072
7073 prefetch(IGB_RX_DESC(rx_ring, ntc));
7074
7075 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
7076 return false;
7077
7078 return true;
7079}
7080
7081/**
7082 * igb_cleanup_headers - Correct corrupted or empty headers
7083 * @rx_ring: rx descriptor ring packet is being transacted on
7084 * @rx_desc: pointer to the EOP Rx descriptor
7085 * @skb: pointer to current skb being fixed
7086 *
7087 * Address the case where we are pulling data in on pages only
7088 * and as such no data is present in the skb header.
7089 *
7090 * In addition if skb is not at least 60 bytes we need to pad it so that
7091 * it is large enough to qualify as a valid Ethernet frame.
7092 *
7093 * Returns true if an error was encountered and skb was freed.
7094 **/
7095static bool igb_cleanup_headers(struct igb_ring *rx_ring,
7096 union e1000_adv_rx_desc *rx_desc,
7097 struct sk_buff *skb)
7098{
7099 if (unlikely((igb_test_staterr(rx_desc,
7100 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
7101 struct net_device *netdev = rx_ring->netdev;
7102 if (!(netdev->features & NETIF_F_RXALL)) {
7103 dev_kfree_skb_any(skb);
7104 return true;
7105 }
7106 }
7107
7108 /* if eth_skb_pad returns an error the skb was freed */
7109 if (eth_skb_pad(skb))
7110 return true;
7111
7112 return false;
7113}
7114
7115/**
7116 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
7117 * @rx_ring: rx descriptor ring packet is being transacted on
7118 * @rx_desc: pointer to the EOP Rx descriptor
7119 * @skb: pointer to current skb being populated
7120 *
7121 * This function checks the ring, descriptor, and packet information in
7122 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
7123 * other fields within the skb.
7124 **/
7125static void igb_process_skb_fields(struct igb_ring *rx_ring,
7126 union e1000_adv_rx_desc *rx_desc,
7127 struct sk_buff *skb)
7128{
7129 struct net_device *dev = rx_ring->netdev;
7130
7131 igb_rx_hash(rx_ring, rx_desc, skb);
7132
7133 igb_rx_checksum(rx_ring, rx_desc, skb);
7134
7135 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
7136 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
7137 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
7138
7139 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
7140 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
7141 u16 vid;
7142
7143 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
7144 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
7145 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
7146 else
7147 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
7148
7149 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
7150 }
7151
7152 skb_record_rx_queue(skb, rx_ring->queue_index);
7153
7154 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
7155}
7156
7157static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
7158{
7159 struct igb_ring *rx_ring = q_vector->rx.ring;
7160 struct sk_buff *skb = rx_ring->skb;
7161 unsigned int total_bytes = 0, total_packets = 0;
7162 u16 cleaned_count = igb_desc_unused(rx_ring);
7163
7164 while (likely(total_packets < budget)) {
7165 union e1000_adv_rx_desc *rx_desc;
7166
7167 /* return some buffers to hardware, one at a time is too slow */
7168 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
7169 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7170 cleaned_count = 0;
7171 }
7172
7173 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
7174
7175 if (!rx_desc->wb.upper.status_error)
7176 break;
7177
7178 /* This memory barrier is needed to keep us from reading
7179 * any other fields out of the rx_desc until we know the
7180 * descriptor has been written back
7181 */
7182 dma_rmb();
7183
7184 /* retrieve a buffer from the ring */
7185 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
7186
7187 /* exit if we failed to retrieve a buffer */
7188 if (!skb)
7189 break;
7190
7191 cleaned_count++;
7192
7193 /* fetch next buffer in frame if non-eop */
7194 if (igb_is_non_eop(rx_ring, rx_desc))
7195 continue;
7196
7197 /* verify the packet layout is correct */
7198 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
7199 skb = NULL;
7200 continue;
7201 }
7202
7203 /* probably a little skewed due to removing CRC */
7204 total_bytes += skb->len;
7205
7206 /* populate checksum, timestamp, VLAN, and protocol */
7207 igb_process_skb_fields(rx_ring, rx_desc, skb);
7208
7209 napi_gro_receive(&q_vector->napi, skb);
7210
7211 /* reset skb pointer */
7212 skb = NULL;
7213
7214 /* update budget accounting */
7215 total_packets++;
7216 }
7217
7218 /* place incomplete frames back on ring for completion */
7219 rx_ring->skb = skb;
7220
7221 u64_stats_update_begin(&rx_ring->rx_syncp);
7222 rx_ring->rx_stats.packets += total_packets;
7223 rx_ring->rx_stats.bytes += total_bytes;
7224 u64_stats_update_end(&rx_ring->rx_syncp);
7225 q_vector->rx.total_packets += total_packets;
7226 q_vector->rx.total_bytes += total_bytes;
7227
7228 if (cleaned_count)
7229 igb_alloc_rx_buffers(rx_ring, cleaned_count);
7230
7231 return total_packets;
7232}
7233
7234static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
7235 struct igb_rx_buffer *bi)
7236{
7237 struct page *page = bi->page;
7238 dma_addr_t dma;
7239
7240 /* since we are recycling buffers we should seldom need to alloc */
7241 if (likely(page))
7242 return true;
7243
7244 /* alloc new page for storage */
7245 page = dev_alloc_page();
7246 if (unlikely(!page)) {
7247 rx_ring->rx_stats.alloc_failed++;
7248 return false;
7249 }
7250
7251 /* map page for use */
7252 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
7253 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
7254
7255 /* if mapping failed free memory back to system since
7256 * there isn't much point in holding memory we can't use
7257 */
7258 if (dma_mapping_error(rx_ring->dev, dma)) {
7259 __free_page(page);
7260
7261 rx_ring->rx_stats.alloc_failed++;
7262 return false;
7263 }
7264
7265 bi->dma = dma;
7266 bi->page = page;
7267 bi->page_offset = 0;
7268 bi->pagecnt_bias = 1;
7269
7270 return true;
7271}
7272
7273/**
7274 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
7275 * @adapter: address of board private structure
7276 **/
7277void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
7278{
7279 union e1000_adv_rx_desc *rx_desc;
7280 struct igb_rx_buffer *bi;
7281 u16 i = rx_ring->next_to_use;
7282
7283 /* nothing to do */
7284 if (!cleaned_count)
7285 return;
7286
7287 rx_desc = IGB_RX_DESC(rx_ring, i);
7288 bi = &rx_ring->rx_buffer_info[i];
7289 i -= rx_ring->count;
7290
7291 do {
7292 if (!igb_alloc_mapped_page(rx_ring, bi))
7293 break;
7294
7295 /* sync the buffer for use by the device */
7296 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
7297 bi->page_offset,
7298 IGB_RX_BUFSZ,
7299 DMA_FROM_DEVICE);
7300
7301 /* Refresh the desc even if buffer_addrs didn't change
7302 * because each write-back erases this info.
7303 */
7304 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
7305
7306 rx_desc++;
7307 bi++;
7308 i++;
7309 if (unlikely(!i)) {
7310 rx_desc = IGB_RX_DESC(rx_ring, 0);
7311 bi = rx_ring->rx_buffer_info;
7312 i -= rx_ring->count;
7313 }
7314
7315 /* clear the status bits for the next_to_use descriptor */
7316 rx_desc->wb.upper.status_error = 0;
7317
7318 cleaned_count--;
7319 } while (cleaned_count);
7320
7321 i += rx_ring->count;
7322
7323 if (rx_ring->next_to_use != i) {
7324 /* record the next descriptor to use */
7325 rx_ring->next_to_use = i;
7326
7327 /* update next to alloc since we have filled the ring */
7328 rx_ring->next_to_alloc = i;
7329
7330 /* Force memory writes to complete before letting h/w
7331 * know there are new descriptors to fetch. (Only
7332 * applicable for weak-ordered memory model archs,
7333 * such as IA-64).
7334 */
7335 wmb();
7336 writel(i, rx_ring->tail);
7337 }
7338}
7339
7340/**
7341 * igb_mii_ioctl -
7342 * @netdev:
7343 * @ifreq:
7344 * @cmd:
7345 **/
7346static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7347{
7348 struct igb_adapter *adapter = netdev_priv(netdev);
7349 struct mii_ioctl_data *data = if_mii(ifr);
7350
7351 if (adapter->hw.phy.media_type != e1000_media_type_copper)
7352 return -EOPNOTSUPP;
7353
7354 switch (cmd) {
7355 case SIOCGMIIPHY:
7356 data->phy_id = adapter->hw.phy.addr;
7357 break;
7358 case SIOCGMIIREG:
7359 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
7360 &data->val_out))
7361 return -EIO;
7362 break;
7363 case SIOCSMIIREG:
7364 default:
7365 return -EOPNOTSUPP;
7366 }
7367 return 0;
7368}
7369
7370/**
7371 * igb_ioctl -
7372 * @netdev:
7373 * @ifreq:
7374 * @cmd:
7375 **/
7376static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7377{
7378 switch (cmd) {
7379 case SIOCGMIIPHY:
7380 case SIOCGMIIREG:
7381 case SIOCSMIIREG:
7382 return igb_mii_ioctl(netdev, ifr, cmd);
7383 case SIOCGHWTSTAMP:
7384 return igb_ptp_get_ts_config(netdev, ifr);
7385 case SIOCSHWTSTAMP:
7386 return igb_ptp_set_ts_config(netdev, ifr);
7387 default:
7388 return -EOPNOTSUPP;
7389 }
7390}
7391
7392void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7393{
7394 struct igb_adapter *adapter = hw->back;
7395
7396 pci_read_config_word(adapter->pdev, reg, value);
7397}
7398
7399void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7400{
7401 struct igb_adapter *adapter = hw->back;
7402
7403 pci_write_config_word(adapter->pdev, reg, *value);
7404}
7405
7406s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7407{
7408 struct igb_adapter *adapter = hw->back;
7409
7410 if (pcie_capability_read_word(adapter->pdev, reg, value))
7411 return -E1000_ERR_CONFIG;
7412
7413 return 0;
7414}
7415
7416s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7417{
7418 struct igb_adapter *adapter = hw->back;
7419
7420 if (pcie_capability_write_word(adapter->pdev, reg, *value))
7421 return -E1000_ERR_CONFIG;
7422
7423 return 0;
7424}
7425
7426static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
7427{
7428 struct igb_adapter *adapter = netdev_priv(netdev);
7429 struct e1000_hw *hw = &adapter->hw;
7430 u32 ctrl, rctl;
7431 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
7432
7433 if (enable) {
7434 /* enable VLAN tag insert/strip */
7435 ctrl = rd32(E1000_CTRL);
7436 ctrl |= E1000_CTRL_VME;
7437 wr32(E1000_CTRL, ctrl);
7438
7439 /* Disable CFI check */
7440 rctl = rd32(E1000_RCTL);
7441 rctl &= ~E1000_RCTL_CFIEN;
7442 wr32(E1000_RCTL, rctl);
7443 } else {
7444 /* disable VLAN tag insert/strip */
7445 ctrl = rd32(E1000_CTRL);
7446 ctrl &= ~E1000_CTRL_VME;
7447 wr32(E1000_CTRL, ctrl);
7448 }
7449
7450 igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
7451}
7452
7453static int igb_vlan_rx_add_vid(struct net_device *netdev,
7454 __be16 proto, u16 vid)
7455{
7456 struct igb_adapter *adapter = netdev_priv(netdev);
7457 struct e1000_hw *hw = &adapter->hw;
7458 int pf_id = adapter->vfs_allocated_count;
7459
7460 /* add the filter since PF can receive vlans w/o entry in vlvf */
7461 if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
7462 igb_vfta_set(hw, vid, pf_id, true, !!vid);
7463
7464 set_bit(vid, adapter->active_vlans);
7465
7466 return 0;
7467}
7468
7469static int igb_vlan_rx_kill_vid(struct net_device *netdev,
7470 __be16 proto, u16 vid)
7471{
7472 struct igb_adapter *adapter = netdev_priv(netdev);
7473 int pf_id = adapter->vfs_allocated_count;
7474 struct e1000_hw *hw = &adapter->hw;
7475
7476 /* remove VID from filter table */
7477 if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
7478 igb_vfta_set(hw, vid, pf_id, false, true);
7479
7480 clear_bit(vid, adapter->active_vlans);
7481
7482 return 0;
7483}
7484
7485static void igb_restore_vlan(struct igb_adapter *adapter)
7486{
7487 u16 vid = 1;
7488
7489 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
7490 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
7491
7492 for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
7493 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
7494}
7495
7496int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
7497{
7498 struct pci_dev *pdev = adapter->pdev;
7499 struct e1000_mac_info *mac = &adapter->hw.mac;
7500
7501 mac->autoneg = 0;
7502
7503 /* Make sure dplx is at most 1 bit and lsb of speed is not set
7504 * for the switch() below to work
7505 */
7506 if ((spd & 1) || (dplx & ~1))
7507 goto err_inval;
7508
7509 /* Fiber NIC's only allow 1000 gbps Full duplex
7510 * and 100Mbps Full duplex for 100baseFx sfp
7511 */
7512 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
7513 switch (spd + dplx) {
7514 case SPEED_10 + DUPLEX_HALF:
7515 case SPEED_10 + DUPLEX_FULL:
7516 case SPEED_100 + DUPLEX_HALF:
7517 goto err_inval;
7518 default:
7519 break;
7520 }
7521 }
7522
7523 switch (spd + dplx) {
7524 case SPEED_10 + DUPLEX_HALF:
7525 mac->forced_speed_duplex = ADVERTISE_10_HALF;
7526 break;
7527 case SPEED_10 + DUPLEX_FULL:
7528 mac->forced_speed_duplex = ADVERTISE_10_FULL;
7529 break;
7530 case SPEED_100 + DUPLEX_HALF:
7531 mac->forced_speed_duplex = ADVERTISE_100_HALF;
7532 break;
7533 case SPEED_100 + DUPLEX_FULL:
7534 mac->forced_speed_duplex = ADVERTISE_100_FULL;
7535 break;
7536 case SPEED_1000 + DUPLEX_FULL:
7537 mac->autoneg = 1;
7538 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
7539 break;
7540 case SPEED_1000 + DUPLEX_HALF: /* not supported */
7541 default:
7542 goto err_inval;
7543 }
7544
7545 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
7546 adapter->hw.phy.mdix = AUTO_ALL_MODES;
7547
7548 return 0;
7549
7550err_inval:
7551 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
7552 return -EINVAL;
7553}
7554
7555static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7556 bool runtime)
7557{
7558 struct net_device *netdev = pci_get_drvdata(pdev);
7559 struct igb_adapter *adapter = netdev_priv(netdev);
7560 struct e1000_hw *hw = &adapter->hw;
7561 u32 ctrl, rctl, status;
7562 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
7563#ifdef CONFIG_PM
7564 int retval = 0;
7565#endif
7566
7567 netif_device_detach(netdev);
7568
7569 if (netif_running(netdev))
7570 __igb_close(netdev, true);
7571
7572 igb_ptp_suspend(adapter);
7573
7574 igb_clear_interrupt_scheme(adapter);
7575
7576#ifdef CONFIG_PM
7577 retval = pci_save_state(pdev);
7578 if (retval)
7579 return retval;
7580#endif
7581
7582 status = rd32(E1000_STATUS);
7583 if (status & E1000_STATUS_LU)
7584 wufc &= ~E1000_WUFC_LNKC;
7585
7586 if (wufc) {
7587 igb_setup_rctl(adapter);
7588 igb_set_rx_mode(netdev);
7589
7590 /* turn on all-multi mode if wake on multicast is enabled */
7591 if (wufc & E1000_WUFC_MC) {
7592 rctl = rd32(E1000_RCTL);
7593 rctl |= E1000_RCTL_MPE;
7594 wr32(E1000_RCTL, rctl);
7595 }
7596
7597 ctrl = rd32(E1000_CTRL);
7598 /* advertise wake from D3Cold */
7599 #define E1000_CTRL_ADVD3WUC 0x00100000
7600 /* phy power management enable */
7601 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
7602 ctrl |= E1000_CTRL_ADVD3WUC;
7603 wr32(E1000_CTRL, ctrl);
7604
7605 /* Allow time for pending master requests to run */
7606 igb_disable_pcie_master(hw);
7607
7608 wr32(E1000_WUC, E1000_WUC_PME_EN);
7609 wr32(E1000_WUFC, wufc);
7610 } else {
7611 wr32(E1000_WUC, 0);
7612 wr32(E1000_WUFC, 0);
7613 }
7614
7615 *enable_wake = wufc || adapter->en_mng_pt;
7616 if (!*enable_wake)
7617 igb_power_down_link(adapter);
7618 else
7619 igb_power_up_link(adapter);
7620
7621 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7622 * would have already happened in close and is redundant.
7623 */
7624 igb_release_hw_control(adapter);
7625
7626 pci_disable_device(pdev);
7627
7628 return 0;
7629}
7630
7631#ifdef CONFIG_PM
7632#ifdef CONFIG_PM_SLEEP
7633static int igb_suspend(struct device *dev)
7634{
7635 int retval;
7636 bool wake;
7637 struct pci_dev *pdev = to_pci_dev(dev);
7638
7639 retval = __igb_shutdown(pdev, &wake, 0);
7640 if (retval)
7641 return retval;
7642
7643 if (wake) {
7644 pci_prepare_to_sleep(pdev);
7645 } else {
7646 pci_wake_from_d3(pdev, false);
7647 pci_set_power_state(pdev, PCI_D3hot);
7648 }
7649
7650 return 0;
7651}
7652#endif /* CONFIG_PM_SLEEP */
7653
7654static int igb_resume(struct device *dev)
7655{
7656 struct pci_dev *pdev = to_pci_dev(dev);
7657 struct net_device *netdev = pci_get_drvdata(pdev);
7658 struct igb_adapter *adapter = netdev_priv(netdev);
7659 struct e1000_hw *hw = &adapter->hw;
7660 u32 err;
7661
7662 pci_set_power_state(pdev, PCI_D0);
7663 pci_restore_state(pdev);
7664 pci_save_state(pdev);
7665
7666 if (!pci_device_is_present(pdev))
7667 return -ENODEV;
7668 err = pci_enable_device_mem(pdev);
7669 if (err) {
7670 dev_err(&pdev->dev,
7671 "igb: Cannot enable PCI device from suspend\n");
7672 return err;
7673 }
7674 pci_set_master(pdev);
7675
7676 pci_enable_wake(pdev, PCI_D3hot, 0);
7677 pci_enable_wake(pdev, PCI_D3cold, 0);
7678
7679 if (igb_init_interrupt_scheme(adapter, true)) {
7680 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7681 return -ENOMEM;
7682 }
7683
7684 igb_reset(adapter);
7685
7686 /* let the f/w know that the h/w is now under the control of the
7687 * driver.
7688 */
7689 igb_get_hw_control(adapter);
7690
7691 wr32(E1000_WUS, ~0);
7692
7693 if (netdev->flags & IFF_UP) {
7694 rtnl_lock();
7695 err = __igb_open(netdev, true);
7696 rtnl_unlock();
7697 if (err)
7698 return err;
7699 }
7700
7701 netif_device_attach(netdev);
7702 return 0;
7703}
7704
7705static int igb_runtime_idle(struct device *dev)
7706{
7707 struct pci_dev *pdev = to_pci_dev(dev);
7708 struct net_device *netdev = pci_get_drvdata(pdev);
7709 struct igb_adapter *adapter = netdev_priv(netdev);
7710
7711 if (!igb_has_link(adapter))
7712 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7713
7714 return -EBUSY;
7715}
7716
7717static int igb_runtime_suspend(struct device *dev)
7718{
7719 struct pci_dev *pdev = to_pci_dev(dev);
7720 int retval;
7721 bool wake;
7722
7723 retval = __igb_shutdown(pdev, &wake, 1);
7724 if (retval)
7725 return retval;
7726
7727 if (wake) {
7728 pci_prepare_to_sleep(pdev);
7729 } else {
7730 pci_wake_from_d3(pdev, false);
7731 pci_set_power_state(pdev, PCI_D3hot);
7732 }
7733
7734 return 0;
7735}
7736
7737static int igb_runtime_resume(struct device *dev)
7738{
7739 return igb_resume(dev);
7740}
7741#endif /* CONFIG_PM */
7742
7743static void igb_shutdown(struct pci_dev *pdev)
7744{
7745 bool wake;
7746
7747 __igb_shutdown(pdev, &wake, 0);
7748
7749 if (system_state == SYSTEM_POWER_OFF) {
7750 pci_wake_from_d3(pdev, wake);
7751 pci_set_power_state(pdev, PCI_D3hot);
7752 }
7753}
7754
7755#ifdef CONFIG_PCI_IOV
7756static int igb_sriov_reinit(struct pci_dev *dev)
7757{
7758 struct net_device *netdev = pci_get_drvdata(dev);
7759 struct igb_adapter *adapter = netdev_priv(netdev);
7760 struct pci_dev *pdev = adapter->pdev;
7761
7762 rtnl_lock();
7763
7764 if (netif_running(netdev))
7765 igb_close(netdev);
7766 else
7767 igb_reset(adapter);
7768
7769 igb_clear_interrupt_scheme(adapter);
7770
7771 igb_init_queue_configuration(adapter);
7772
7773 if (igb_init_interrupt_scheme(adapter, true)) {
7774 rtnl_unlock();
7775 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
7776 return -ENOMEM;
7777 }
7778
7779 if (netif_running(netdev))
7780 igb_open(netdev);
7781
7782 rtnl_unlock();
7783
7784 return 0;
7785}
7786
7787static int igb_pci_disable_sriov(struct pci_dev *dev)
7788{
7789 int err = igb_disable_sriov(dev);
7790
7791 if (!err)
7792 err = igb_sriov_reinit(dev);
7793
7794 return err;
7795}
7796
7797static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
7798{
7799 int err = igb_enable_sriov(dev, num_vfs);
7800
7801 if (err)
7802 goto out;
7803
7804 err = igb_sriov_reinit(dev);
7805 if (!err)
7806 return num_vfs;
7807
7808out:
7809 return err;
7810}
7811
7812#endif
7813static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7814{
7815#ifdef CONFIG_PCI_IOV
7816 if (num_vfs == 0)
7817 return igb_pci_disable_sriov(dev);
7818 else
7819 return igb_pci_enable_sriov(dev, num_vfs);
7820#endif
7821 return 0;
7822}
7823
7824#ifdef CONFIG_NET_POLL_CONTROLLER
7825/* Polling 'interrupt' - used by things like netconsole to send skbs
7826 * without having to re-enable interrupts. It's not called while
7827 * the interrupt routine is executing.
7828 */
7829static void igb_netpoll(struct net_device *netdev)
7830{
7831 struct igb_adapter *adapter = netdev_priv(netdev);
7832 struct e1000_hw *hw = &adapter->hw;
7833 struct igb_q_vector *q_vector;
7834 int i;
7835
7836 for (i = 0; i < adapter->num_q_vectors; i++) {
7837 q_vector = adapter->q_vector[i];
7838 if (adapter->flags & IGB_FLAG_HAS_MSIX)
7839 wr32(E1000_EIMC, q_vector->eims_value);
7840 else
7841 igb_irq_disable(adapter);
7842 napi_schedule(&q_vector->napi);
7843 }
7844}
7845#endif /* CONFIG_NET_POLL_CONTROLLER */
7846
7847/**
7848 * igb_io_error_detected - called when PCI error is detected
7849 * @pdev: Pointer to PCI device
7850 * @state: The current pci connection state
7851 *
7852 * This function is called after a PCI bus error affecting
7853 * this device has been detected.
7854 **/
7855static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7856 pci_channel_state_t state)
7857{
7858 struct net_device *netdev = pci_get_drvdata(pdev);
7859 struct igb_adapter *adapter = netdev_priv(netdev);
7860
7861 netif_device_detach(netdev);
7862
7863 if (state == pci_channel_io_perm_failure)
7864 return PCI_ERS_RESULT_DISCONNECT;
7865
7866 if (netif_running(netdev))
7867 igb_down(adapter);
7868 pci_disable_device(pdev);
7869
7870 /* Request a slot slot reset. */
7871 return PCI_ERS_RESULT_NEED_RESET;
7872}
7873
7874/**
7875 * igb_io_slot_reset - called after the pci bus has been reset.
7876 * @pdev: Pointer to PCI device
7877 *
7878 * Restart the card from scratch, as if from a cold-boot. Implementation
7879 * resembles the first-half of the igb_resume routine.
7880 **/
7881static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7882{
7883 struct net_device *netdev = pci_get_drvdata(pdev);
7884 struct igb_adapter *adapter = netdev_priv(netdev);
7885 struct e1000_hw *hw = &adapter->hw;
7886 pci_ers_result_t result;
7887 int err;
7888
7889 if (pci_enable_device_mem(pdev)) {
7890 dev_err(&pdev->dev,
7891 "Cannot re-enable PCI device after reset.\n");
7892 result = PCI_ERS_RESULT_DISCONNECT;
7893 } else {
7894 pci_set_master(pdev);
7895 pci_restore_state(pdev);
7896 pci_save_state(pdev);
7897
7898 pci_enable_wake(pdev, PCI_D3hot, 0);
7899 pci_enable_wake(pdev, PCI_D3cold, 0);
7900
7901 igb_reset(adapter);
7902 wr32(E1000_WUS, ~0);
7903 result = PCI_ERS_RESULT_RECOVERED;
7904 }
7905
7906 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7907 if (err) {
7908 dev_err(&pdev->dev,
7909 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7910 err);
7911 /* non-fatal, continue */
7912 }
7913
7914 return result;
7915}
7916
7917/**
7918 * igb_io_resume - called when traffic can start flowing again.
7919 * @pdev: Pointer to PCI device
7920 *
7921 * This callback is called when the error recovery driver tells us that
7922 * its OK to resume normal operation. Implementation resembles the
7923 * second-half of the igb_resume routine.
7924 */
7925static void igb_io_resume(struct pci_dev *pdev)
7926{
7927 struct net_device *netdev = pci_get_drvdata(pdev);
7928 struct igb_adapter *adapter = netdev_priv(netdev);
7929
7930 if (netif_running(netdev)) {
7931 if (igb_up(adapter)) {
7932 dev_err(&pdev->dev, "igb_up failed after reset\n");
7933 return;
7934 }
7935 }
7936
7937 netif_device_attach(netdev);
7938
7939 /* let the f/w know that the h/w is now under the control of the
7940 * driver.
7941 */
7942 igb_get_hw_control(adapter);
7943}
7944
7945static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7946 u8 qsel)
7947{
7948 struct e1000_hw *hw = &adapter->hw;
7949 u32 rar_low, rar_high;
7950
7951 /* HW expects these to be in network order when they are plugged
7952 * into the registers which are little endian. In order to guarantee
7953 * that ordering we need to do an leXX_to_cpup here in order to be
7954 * ready for the byteswap that occurs with writel
7955 */
7956 rar_low = le32_to_cpup((__le32 *)(addr));
7957 rar_high = le16_to_cpup((__le16 *)(addr + 4));
7958
7959 /* Indicate to hardware the Address is Valid. */
7960 rar_high |= E1000_RAH_AV;
7961
7962 if (hw->mac.type == e1000_82575)
7963 rar_high |= E1000_RAH_POOL_1 * qsel;
7964 else
7965 rar_high |= E1000_RAH_POOL_1 << qsel;
7966
7967 wr32(E1000_RAL(index), rar_low);
7968 wrfl();
7969 wr32(E1000_RAH(index), rar_high);
7970 wrfl();
7971}
7972
7973static int igb_set_vf_mac(struct igb_adapter *adapter,
7974 int vf, unsigned char *mac_addr)
7975{
7976 struct e1000_hw *hw = &adapter->hw;
7977 /* VF MAC addresses start at end of receive addresses and moves
7978 * towards the first, as a result a collision should not be possible
7979 */
7980 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
7981
7982 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
7983
7984 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
7985
7986 return 0;
7987}
7988
7989static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7990{
7991 struct igb_adapter *adapter = netdev_priv(netdev);
7992 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
7993 return -EINVAL;
7994 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7995 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7996 dev_info(&adapter->pdev->dev,
7997 "Reload the VF driver to make this change effective.");
7998 if (test_bit(__IGB_DOWN, &adapter->state)) {
7999 dev_warn(&adapter->pdev->dev,
8000 "The VF MAC address has been set, but the PF device is not up.\n");
8001 dev_warn(&adapter->pdev->dev,
8002 "Bring the PF device up before attempting to use the VF device.\n");
8003 }
8004 return igb_set_vf_mac(adapter, vf, mac);
8005}
8006
8007static int igb_link_mbps(int internal_link_speed)
8008{
8009 switch (internal_link_speed) {
8010 case SPEED_100:
8011 return 100;
8012 case SPEED_1000:
8013 return 1000;
8014 default:
8015 return 0;
8016 }
8017}
8018
8019static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
8020 int link_speed)
8021{
8022 int rf_dec, rf_int;
8023 u32 bcnrc_val;
8024
8025 if (tx_rate != 0) {
8026 /* Calculate the rate factor values to set */
8027 rf_int = link_speed / tx_rate;
8028 rf_dec = (link_speed - (rf_int * tx_rate));
8029 rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
8030 tx_rate;
8031
8032 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
8033 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
8034 E1000_RTTBCNRC_RF_INT_MASK);
8035 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
8036 } else {
8037 bcnrc_val = 0;
8038 }
8039
8040 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
8041 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
8042 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
8043 */
8044 wr32(E1000_RTTBCNRM, 0x14);
8045 wr32(E1000_RTTBCNRC, bcnrc_val);
8046}
8047
8048static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
8049{
8050 int actual_link_speed, i;
8051 bool reset_rate = false;
8052
8053 /* VF TX rate limit was not set or not supported */
8054 if ((adapter->vf_rate_link_speed == 0) ||
8055 (adapter->hw.mac.type != e1000_82576))
8056 return;
8057
8058 actual_link_speed = igb_link_mbps(adapter->link_speed);
8059 if (actual_link_speed != adapter->vf_rate_link_speed) {
8060 reset_rate = true;
8061 adapter->vf_rate_link_speed = 0;
8062 dev_info(&adapter->pdev->dev,
8063 "Link speed has been changed. VF Transmit rate is disabled\n");
8064 }
8065
8066 for (i = 0; i < adapter->vfs_allocated_count; i++) {
8067 if (reset_rate)
8068 adapter->vf_data[i].tx_rate = 0;
8069
8070 igb_set_vf_rate_limit(&adapter->hw, i,
8071 adapter->vf_data[i].tx_rate,
8072 actual_link_speed);
8073 }
8074}
8075
8076static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
8077 int min_tx_rate, int max_tx_rate)
8078{
8079 struct igb_adapter *adapter = netdev_priv(netdev);
8080 struct e1000_hw *hw = &adapter->hw;
8081 int actual_link_speed;
8082
8083 if (hw->mac.type != e1000_82576)
8084 return -EOPNOTSUPP;
8085
8086 if (min_tx_rate)
8087 return -EINVAL;
8088
8089 actual_link_speed = igb_link_mbps(adapter->link_speed);
8090 if ((vf >= adapter->vfs_allocated_count) ||
8091 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
8092 (max_tx_rate < 0) ||
8093 (max_tx_rate > actual_link_speed))
8094 return -EINVAL;
8095
8096 adapter->vf_rate_link_speed = actual_link_speed;
8097 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
8098 igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
8099
8100 return 0;
8101}
8102
8103static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
8104 bool setting)
8105{
8106 struct igb_adapter *adapter = netdev_priv(netdev);
8107 struct e1000_hw *hw = &adapter->hw;
8108 u32 reg_val, reg_offset;
8109
8110 if (!adapter->vfs_allocated_count)
8111 return -EOPNOTSUPP;
8112
8113 if (vf >= adapter->vfs_allocated_count)
8114 return -EINVAL;
8115
8116 reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
8117 reg_val = rd32(reg_offset);
8118 if (setting)
8119 reg_val |= (BIT(vf) |
8120 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
8121 else
8122 reg_val &= ~(BIT(vf) |
8123 BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
8124 wr32(reg_offset, reg_val);
8125
8126 adapter->vf_data[vf].spoofchk_enabled = setting;
8127 return 0;
8128}
8129
8130static int igb_ndo_get_vf_config(struct net_device *netdev,
8131 int vf, struct ifla_vf_info *ivi)
8132{
8133 struct igb_adapter *adapter = netdev_priv(netdev);
8134 if (vf >= adapter->vfs_allocated_count)
8135 return -EINVAL;
8136 ivi->vf = vf;
8137 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
8138 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
8139 ivi->min_tx_rate = 0;
8140 ivi->vlan = adapter->vf_data[vf].pf_vlan;
8141 ivi->qos = adapter->vf_data[vf].pf_qos;
8142 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
8143 return 0;
8144}
8145
8146static void igb_vmm_control(struct igb_adapter *adapter)
8147{
8148 struct e1000_hw *hw = &adapter->hw;
8149 u32 reg;
8150
8151 switch (hw->mac.type) {
8152 case e1000_82575:
8153 case e1000_i210:
8154 case e1000_i211:
8155 case e1000_i354:
8156 default:
8157 /* replication is not supported for 82575 */
8158 return;
8159 case e1000_82576:
8160 /* notify HW that the MAC is adding vlan tags */
8161 reg = rd32(E1000_DTXCTL);
8162 reg |= E1000_DTXCTL_VLAN_ADDED;
8163 wr32(E1000_DTXCTL, reg);
8164 /* Fall through */
8165 case e1000_82580:
8166 /* enable replication vlan tag stripping */
8167 reg = rd32(E1000_RPLOLR);
8168 reg |= E1000_RPLOLR_STRVLAN;
8169 wr32(E1000_RPLOLR, reg);
8170 /* Fall through */
8171 case e1000_i350:
8172 /* none of the above registers are supported by i350 */
8173 break;
8174 }
8175
8176 if (adapter->vfs_allocated_count) {
8177 igb_vmdq_set_loopback_pf(hw, true);
8178 igb_vmdq_set_replication_pf(hw, true);
8179 igb_vmdq_set_anti_spoofing_pf(hw, true,
8180 adapter->vfs_allocated_count);
8181 } else {
8182 igb_vmdq_set_loopback_pf(hw, false);
8183 igb_vmdq_set_replication_pf(hw, false);
8184 }
8185}
8186
8187static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
8188{
8189 struct e1000_hw *hw = &adapter->hw;
8190 u32 dmac_thr;
8191 u16 hwm;
8192
8193 if (hw->mac.type > e1000_82580) {
8194 if (adapter->flags & IGB_FLAG_DMAC) {
8195 u32 reg;
8196
8197 /* force threshold to 0. */
8198 wr32(E1000_DMCTXTH, 0);
8199
8200 /* DMA Coalescing high water mark needs to be greater
8201 * than the Rx threshold. Set hwm to PBA - max frame
8202 * size in 16B units, capping it at PBA - 6KB.
8203 */
8204 hwm = 64 * (pba - 6);
8205 reg = rd32(E1000_FCRTC);
8206 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
8207 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
8208 & E1000_FCRTC_RTH_COAL_MASK);
8209 wr32(E1000_FCRTC, reg);
8210
8211 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
8212 * frame size, capping it at PBA - 10KB.
8213 */
8214 dmac_thr = pba - 10;
8215 reg = rd32(E1000_DMACR);
8216 reg &= ~E1000_DMACR_DMACTHR_MASK;
8217 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
8218 & E1000_DMACR_DMACTHR_MASK);
8219
8220 /* transition to L0x or L1 if available..*/
8221 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
8222
8223 /* watchdog timer= +-1000 usec in 32usec intervals */
8224 reg |= (1000 >> 5);
8225
8226 /* Disable BMC-to-OS Watchdog Enable */
8227 if (hw->mac.type != e1000_i354)
8228 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
8229
8230 wr32(E1000_DMACR, reg);
8231
8232 /* no lower threshold to disable
8233 * coalescing(smart fifb)-UTRESH=0
8234 */
8235 wr32(E1000_DMCRTRH, 0);
8236
8237 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
8238
8239 wr32(E1000_DMCTLX, reg);
8240
8241 /* free space in tx packet buffer to wake from
8242 * DMA coal
8243 */
8244 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
8245 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
8246
8247 /* make low power state decision controlled
8248 * by DMA coal
8249 */
8250 reg = rd32(E1000_PCIEMISC);
8251 reg &= ~E1000_PCIEMISC_LX_DECISION;
8252 wr32(E1000_PCIEMISC, reg);
8253 } /* endif adapter->dmac is not disabled */
8254 } else if (hw->mac.type == e1000_82580) {
8255 u32 reg = rd32(E1000_PCIEMISC);
8256
8257 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
8258 wr32(E1000_DMACR, 0);
8259 }
8260}
8261
8262/**
8263 * igb_read_i2c_byte - Reads 8 bit word over I2C
8264 * @hw: pointer to hardware structure
8265 * @byte_offset: byte offset to read
8266 * @dev_addr: device address
8267 * @data: value read
8268 *
8269 * Performs byte read operation over I2C interface at
8270 * a specified device address.
8271 **/
8272s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8273 u8 dev_addr, u8 *data)
8274{
8275 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
8276 struct i2c_client *this_client = adapter->i2c_client;
8277 s32 status;
8278 u16 swfw_mask = 0;
8279
8280 if (!this_client)
8281 return E1000_ERR_I2C;
8282
8283 swfw_mask = E1000_SWFW_PHY0_SM;
8284
8285 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8286 return E1000_ERR_SWFW_SYNC;
8287
8288 status = i2c_smbus_read_byte_data(this_client, byte_offset);
8289 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
8290
8291 if (status < 0)
8292 return E1000_ERR_I2C;
8293 else {
8294 *data = status;
8295 return 0;
8296 }
8297}
8298
8299/**
8300 * igb_write_i2c_byte - Writes 8 bit word over I2C
8301 * @hw: pointer to hardware structure
8302 * @byte_offset: byte offset to write
8303 * @dev_addr: device address
8304 * @data: value to write
8305 *
8306 * Performs byte write operation over I2C interface at
8307 * a specified device address.
8308 **/
8309s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
8310 u8 dev_addr, u8 data)
8311{
8312 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
8313 struct i2c_client *this_client = adapter->i2c_client;
8314 s32 status;
8315 u16 swfw_mask = E1000_SWFW_PHY0_SM;
8316
8317 if (!this_client)
8318 return E1000_ERR_I2C;
8319
8320 if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
8321 return E1000_ERR_SWFW_SYNC;
8322 status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
8323 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
8324
8325 if (status)
8326 return E1000_ERR_I2C;
8327 else
8328 return 0;
8329
8330}
8331
8332int igb_reinit_queues(struct igb_adapter *adapter)
8333{
8334 struct net_device *netdev = adapter->netdev;
8335 struct pci_dev *pdev = adapter->pdev;
8336 int err = 0;
8337
8338 if (netif_running(netdev))
8339 igb_close(netdev);
8340
8341 igb_reset_interrupt_capability(adapter);
8342
8343 if (igb_init_interrupt_scheme(adapter, true)) {
8344 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
8345 return -ENOMEM;
8346 }
8347
8348 if (netif_running(netdev))
8349 err = igb_open(netdev);
8350
8351 return err;
8352}
8353
8354static void igb_nfc_filter_exit(struct igb_adapter *adapter)
8355{
8356 struct igb_nfc_filter *rule;
8357
8358 spin_lock(&adapter->nfc_lock);
8359
8360 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
8361 igb_erase_filter(adapter, rule);
8362
8363 spin_unlock(&adapter->nfc_lock);
8364}
8365
8366static void igb_nfc_filter_restore(struct igb_adapter *adapter)
8367{
8368 struct igb_nfc_filter *rule;
8369
8370 spin_lock(&adapter->nfc_lock);
8371
8372 hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
8373 igb_add_filter(adapter, rule);
8374
8375 spin_unlock(&adapter->nfc_lock);
8376}
8377/* igb_main.c */