Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018 Intel Corporation */
3
4#include <linux/module.h>
5#include <linux/types.h>
6#include <linux/if_vlan.h>
7#include <linux/aer.h>
8#include <linux/tcp.h>
9#include <linux/udp.h>
10#include <linux/ip.h>
11#include <linux/pm_runtime.h>
12#include <net/pkt_sched.h>
13#include <linux/bpf_trace.h>
14#include <net/xdp_sock_drv.h>
15#include <linux/pci.h>
16
17#include <net/ipv6.h>
18
19#include "igc.h"
20#include "igc_hw.h"
21#include "igc_tsn.h"
22#include "igc_xdp.h"
23
24#define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
25
26#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
27
28#define IGC_XDP_PASS 0
29#define IGC_XDP_CONSUMED BIT(0)
30#define IGC_XDP_TX BIT(1)
31#define IGC_XDP_REDIRECT BIT(2)
32
33static int debug = -1;
34
35MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
36MODULE_DESCRIPTION(DRV_SUMMARY);
37MODULE_LICENSE("GPL v2");
38module_param(debug, int, 0);
39MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
40
41char igc_driver_name[] = "igc";
42static const char igc_driver_string[] = DRV_SUMMARY;
43static const char igc_copyright[] =
44 "Copyright(c) 2018 Intel Corporation.";
45
46static const struct igc_info *igc_info_tbl[] = {
47 [board_base] = &igc_base_info,
48};
49
50static const struct pci_device_id igc_pci_tbl[] = {
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
66 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
67 /* required last entry */
68 {0, }
69};
70
71MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
72
73enum latency_range {
74 lowest_latency = 0,
75 low_latency = 1,
76 bulk_latency = 2,
77 latency_invalid = 255
78};
79
80void igc_reset(struct igc_adapter *adapter)
81{
82 struct net_device *dev = adapter->netdev;
83 struct igc_hw *hw = &adapter->hw;
84 struct igc_fc_info *fc = &hw->fc;
85 u32 pba, hwm;
86
87 /* Repartition PBA for greater than 9k MTU if required */
88 pba = IGC_PBA_34K;
89
90 /* flow control settings
91 * The high water mark must be low enough to fit one full frame
92 * after transmitting the pause frame. As such we must have enough
93 * space to allow for us to complete our current transmit and then
94 * receive the frame that is in progress from the link partner.
95 * Set it to:
96 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
97 */
98 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
99
100 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
101 fc->low_water = fc->high_water - 16;
102 fc->pause_time = 0xFFFF;
103 fc->send_xon = 1;
104 fc->current_mode = fc->requested_mode;
105
106 hw->mac.ops.reset_hw(hw);
107
108 if (hw->mac.ops.init_hw(hw))
109 netdev_err(dev, "Error on hardware initialization\n");
110
111 /* Re-establish EEE setting */
112 igc_set_eee_i225(hw, true, true, true);
113
114 if (!netif_running(adapter->netdev))
115 igc_power_down_phy_copper_base(&adapter->hw);
116
117 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
118 wr32(IGC_VET, ETH_P_8021Q);
119
120 /* Re-enable PTP, where applicable. */
121 igc_ptp_reset(adapter);
122
123 /* Re-enable TSN offloading, where applicable. */
124 igc_tsn_reset(adapter);
125
126 igc_get_phy_info(hw);
127}
128
129/**
130 * igc_power_up_link - Power up the phy link
131 * @adapter: address of board private structure
132 */
133static void igc_power_up_link(struct igc_adapter *adapter)
134{
135 igc_reset_phy(&adapter->hw);
136
137 igc_power_up_phy_copper(&adapter->hw);
138
139 igc_setup_link(&adapter->hw);
140}
141
142/**
143 * igc_release_hw_control - release control of the h/w to f/w
144 * @adapter: address of board private structure
145 *
146 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
147 * For ASF and Pass Through versions of f/w this means that the
148 * driver is no longer loaded.
149 */
150static void igc_release_hw_control(struct igc_adapter *adapter)
151{
152 struct igc_hw *hw = &adapter->hw;
153 u32 ctrl_ext;
154
155 if (!pci_device_is_present(adapter->pdev))
156 return;
157
158 /* Let firmware take over control of h/w */
159 ctrl_ext = rd32(IGC_CTRL_EXT);
160 wr32(IGC_CTRL_EXT,
161 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
162}
163
164/**
165 * igc_get_hw_control - get control of the h/w from f/w
166 * @adapter: address of board private structure
167 *
168 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
169 * For ASF and Pass Through versions of f/w this means that
170 * the driver is loaded.
171 */
172static void igc_get_hw_control(struct igc_adapter *adapter)
173{
174 struct igc_hw *hw = &adapter->hw;
175 u32 ctrl_ext;
176
177 /* Let firmware know the driver has taken over */
178 ctrl_ext = rd32(IGC_CTRL_EXT);
179 wr32(IGC_CTRL_EXT,
180 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
181}
182
183static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
184{
185 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
186 dma_unmap_len(buf, len), DMA_TO_DEVICE);
187
188 dma_unmap_len_set(buf, len, 0);
189}
190
191/**
192 * igc_clean_tx_ring - Free Tx Buffers
193 * @tx_ring: ring to be cleaned
194 */
195static void igc_clean_tx_ring(struct igc_ring *tx_ring)
196{
197 u16 i = tx_ring->next_to_clean;
198 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
199 u32 xsk_frames = 0;
200
201 while (i != tx_ring->next_to_use) {
202 union igc_adv_tx_desc *eop_desc, *tx_desc;
203
204 switch (tx_buffer->type) {
205 case IGC_TX_BUFFER_TYPE_XSK:
206 xsk_frames++;
207 break;
208 case IGC_TX_BUFFER_TYPE_XDP:
209 xdp_return_frame(tx_buffer->xdpf);
210 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
211 break;
212 case IGC_TX_BUFFER_TYPE_SKB:
213 dev_kfree_skb_any(tx_buffer->skb);
214 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
215 break;
216 default:
217 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
218 break;
219 }
220
221 /* check for eop_desc to determine the end of the packet */
222 eop_desc = tx_buffer->next_to_watch;
223 tx_desc = IGC_TX_DESC(tx_ring, i);
224
225 /* unmap remaining buffers */
226 while (tx_desc != eop_desc) {
227 tx_buffer++;
228 tx_desc++;
229 i++;
230 if (unlikely(i == tx_ring->count)) {
231 i = 0;
232 tx_buffer = tx_ring->tx_buffer_info;
233 tx_desc = IGC_TX_DESC(tx_ring, 0);
234 }
235
236 /* unmap any remaining paged data */
237 if (dma_unmap_len(tx_buffer, len))
238 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
239 }
240
241 tx_buffer->next_to_watch = NULL;
242
243 /* move us one more past the eop_desc for start of next pkt */
244 tx_buffer++;
245 i++;
246 if (unlikely(i == tx_ring->count)) {
247 i = 0;
248 tx_buffer = tx_ring->tx_buffer_info;
249 }
250 }
251
252 if (tx_ring->xsk_pool && xsk_frames)
253 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
254
255 /* reset BQL for queue */
256 netdev_tx_reset_queue(txring_txq(tx_ring));
257
258 /* reset next_to_use and next_to_clean */
259 tx_ring->next_to_use = 0;
260 tx_ring->next_to_clean = 0;
261}
262
263/**
264 * igc_free_tx_resources - Free Tx Resources per Queue
265 * @tx_ring: Tx descriptor ring for a specific queue
266 *
267 * Free all transmit software resources
268 */
269void igc_free_tx_resources(struct igc_ring *tx_ring)
270{
271 igc_clean_tx_ring(tx_ring);
272
273 vfree(tx_ring->tx_buffer_info);
274 tx_ring->tx_buffer_info = NULL;
275
276 /* if not set, then don't free */
277 if (!tx_ring->desc)
278 return;
279
280 dma_free_coherent(tx_ring->dev, tx_ring->size,
281 tx_ring->desc, tx_ring->dma);
282
283 tx_ring->desc = NULL;
284}
285
286/**
287 * igc_free_all_tx_resources - Free Tx Resources for All Queues
288 * @adapter: board private structure
289 *
290 * Free all transmit software resources
291 */
292static void igc_free_all_tx_resources(struct igc_adapter *adapter)
293{
294 int i;
295
296 for (i = 0; i < adapter->num_tx_queues; i++)
297 igc_free_tx_resources(adapter->tx_ring[i]);
298}
299
300/**
301 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
302 * @adapter: board private structure
303 */
304static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
305{
306 int i;
307
308 for (i = 0; i < adapter->num_tx_queues; i++)
309 if (adapter->tx_ring[i])
310 igc_clean_tx_ring(adapter->tx_ring[i]);
311}
312
313/**
314 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
315 * @tx_ring: tx descriptor ring (for a specific queue) to setup
316 *
317 * Return 0 on success, negative on failure
318 */
319int igc_setup_tx_resources(struct igc_ring *tx_ring)
320{
321 struct net_device *ndev = tx_ring->netdev;
322 struct device *dev = tx_ring->dev;
323 int size = 0;
324
325 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
326 tx_ring->tx_buffer_info = vzalloc(size);
327 if (!tx_ring->tx_buffer_info)
328 goto err;
329
330 /* round up to nearest 4K */
331 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
332 tx_ring->size = ALIGN(tx_ring->size, 4096);
333
334 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
335 &tx_ring->dma, GFP_KERNEL);
336
337 if (!tx_ring->desc)
338 goto err;
339
340 tx_ring->next_to_use = 0;
341 tx_ring->next_to_clean = 0;
342
343 return 0;
344
345err:
346 vfree(tx_ring->tx_buffer_info);
347 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
348 return -ENOMEM;
349}
350
351/**
352 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
353 * @adapter: board private structure
354 *
355 * Return 0 on success, negative on failure
356 */
357static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
358{
359 struct net_device *dev = adapter->netdev;
360 int i, err = 0;
361
362 for (i = 0; i < adapter->num_tx_queues; i++) {
363 err = igc_setup_tx_resources(adapter->tx_ring[i]);
364 if (err) {
365 netdev_err(dev, "Error on Tx queue %u setup\n", i);
366 for (i--; i >= 0; i--)
367 igc_free_tx_resources(adapter->tx_ring[i]);
368 break;
369 }
370 }
371
372 return err;
373}
374
375static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
376{
377 u16 i = rx_ring->next_to_clean;
378
379 dev_kfree_skb(rx_ring->skb);
380 rx_ring->skb = NULL;
381
382 /* Free all the Rx ring sk_buffs */
383 while (i != rx_ring->next_to_alloc) {
384 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
385
386 /* Invalidate cache lines that may have been written to by
387 * device so that we avoid corrupting memory.
388 */
389 dma_sync_single_range_for_cpu(rx_ring->dev,
390 buffer_info->dma,
391 buffer_info->page_offset,
392 igc_rx_bufsz(rx_ring),
393 DMA_FROM_DEVICE);
394
395 /* free resources associated with mapping */
396 dma_unmap_page_attrs(rx_ring->dev,
397 buffer_info->dma,
398 igc_rx_pg_size(rx_ring),
399 DMA_FROM_DEVICE,
400 IGC_RX_DMA_ATTR);
401 __page_frag_cache_drain(buffer_info->page,
402 buffer_info->pagecnt_bias);
403
404 i++;
405 if (i == rx_ring->count)
406 i = 0;
407 }
408}
409
410static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
411{
412 struct igc_rx_buffer *bi;
413 u16 i;
414
415 for (i = 0; i < ring->count; i++) {
416 bi = &ring->rx_buffer_info[i];
417 if (!bi->xdp)
418 continue;
419
420 xsk_buff_free(bi->xdp);
421 bi->xdp = NULL;
422 }
423}
424
425/**
426 * igc_clean_rx_ring - Free Rx Buffers per Queue
427 * @ring: ring to free buffers from
428 */
429static void igc_clean_rx_ring(struct igc_ring *ring)
430{
431 if (ring->xsk_pool)
432 igc_clean_rx_ring_xsk_pool(ring);
433 else
434 igc_clean_rx_ring_page_shared(ring);
435
436 clear_ring_uses_large_buffer(ring);
437
438 ring->next_to_alloc = 0;
439 ring->next_to_clean = 0;
440 ring->next_to_use = 0;
441}
442
443/**
444 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
445 * @adapter: board private structure
446 */
447static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
448{
449 int i;
450
451 for (i = 0; i < adapter->num_rx_queues; i++)
452 if (adapter->rx_ring[i])
453 igc_clean_rx_ring(adapter->rx_ring[i]);
454}
455
456/**
457 * igc_free_rx_resources - Free Rx Resources
458 * @rx_ring: ring to clean the resources from
459 *
460 * Free all receive software resources
461 */
462void igc_free_rx_resources(struct igc_ring *rx_ring)
463{
464 igc_clean_rx_ring(rx_ring);
465
466 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
467
468 vfree(rx_ring->rx_buffer_info);
469 rx_ring->rx_buffer_info = NULL;
470
471 /* if not set, then don't free */
472 if (!rx_ring->desc)
473 return;
474
475 dma_free_coherent(rx_ring->dev, rx_ring->size,
476 rx_ring->desc, rx_ring->dma);
477
478 rx_ring->desc = NULL;
479}
480
481/**
482 * igc_free_all_rx_resources - Free Rx Resources for All Queues
483 * @adapter: board private structure
484 *
485 * Free all receive software resources
486 */
487static void igc_free_all_rx_resources(struct igc_adapter *adapter)
488{
489 int i;
490
491 for (i = 0; i < adapter->num_rx_queues; i++)
492 igc_free_rx_resources(adapter->rx_ring[i]);
493}
494
495/**
496 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
497 * @rx_ring: rx descriptor ring (for a specific queue) to setup
498 *
499 * Returns 0 on success, negative on failure
500 */
501int igc_setup_rx_resources(struct igc_ring *rx_ring)
502{
503 struct net_device *ndev = rx_ring->netdev;
504 struct device *dev = rx_ring->dev;
505 u8 index = rx_ring->queue_index;
506 int size, desc_len, res;
507
508 /* XDP RX-queue info */
509 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
510 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
511 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
512 rx_ring->q_vector->napi.napi_id);
513 if (res < 0) {
514 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
515 index);
516 return res;
517 }
518
519 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
520 rx_ring->rx_buffer_info = vzalloc(size);
521 if (!rx_ring->rx_buffer_info)
522 goto err;
523
524 desc_len = sizeof(union igc_adv_rx_desc);
525
526 /* Round up to nearest 4K */
527 rx_ring->size = rx_ring->count * desc_len;
528 rx_ring->size = ALIGN(rx_ring->size, 4096);
529
530 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
531 &rx_ring->dma, GFP_KERNEL);
532
533 if (!rx_ring->desc)
534 goto err;
535
536 rx_ring->next_to_alloc = 0;
537 rx_ring->next_to_clean = 0;
538 rx_ring->next_to_use = 0;
539
540 return 0;
541
542err:
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
544 vfree(rx_ring->rx_buffer_info);
545 rx_ring->rx_buffer_info = NULL;
546 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
547 return -ENOMEM;
548}
549
550/**
551 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
552 * (Descriptors) for all queues
553 * @adapter: board private structure
554 *
555 * Return 0 on success, negative on failure
556 */
557static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
558{
559 struct net_device *dev = adapter->netdev;
560 int i, err = 0;
561
562 for (i = 0; i < adapter->num_rx_queues; i++) {
563 err = igc_setup_rx_resources(adapter->rx_ring[i]);
564 if (err) {
565 netdev_err(dev, "Error on Rx queue %u setup\n", i);
566 for (i--; i >= 0; i--)
567 igc_free_rx_resources(adapter->rx_ring[i]);
568 break;
569 }
570 }
571
572 return err;
573}
574
575static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
576 struct igc_ring *ring)
577{
578 if (!igc_xdp_is_enabled(adapter) ||
579 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
580 return NULL;
581
582 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
583}
584
585/**
586 * igc_configure_rx_ring - Configure a receive ring after Reset
587 * @adapter: board private structure
588 * @ring: receive ring to be configured
589 *
590 * Configure the Rx unit of the MAC after a reset.
591 */
592static void igc_configure_rx_ring(struct igc_adapter *adapter,
593 struct igc_ring *ring)
594{
595 struct igc_hw *hw = &adapter->hw;
596 union igc_adv_rx_desc *rx_desc;
597 int reg_idx = ring->reg_idx;
598 u32 srrctl = 0, rxdctl = 0;
599 u64 rdba = ring->dma;
600 u32 buf_size;
601
602 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
603 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
604 if (ring->xsk_pool) {
605 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
606 MEM_TYPE_XSK_BUFF_POOL,
607 NULL));
608 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
609 } else {
610 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
611 MEM_TYPE_PAGE_SHARED,
612 NULL));
613 }
614
615 if (igc_xdp_is_enabled(adapter))
616 set_ring_uses_large_buffer(ring);
617
618 /* disable the queue */
619 wr32(IGC_RXDCTL(reg_idx), 0);
620
621 /* Set DMA base address registers */
622 wr32(IGC_RDBAL(reg_idx),
623 rdba & 0x00000000ffffffffULL);
624 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
625 wr32(IGC_RDLEN(reg_idx),
626 ring->count * sizeof(union igc_adv_rx_desc));
627
628 /* initialize head and tail */
629 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
630 wr32(IGC_RDH(reg_idx), 0);
631 writel(0, ring->tail);
632
633 /* reset next-to- use/clean to place SW in sync with hardware */
634 ring->next_to_clean = 0;
635 ring->next_to_use = 0;
636
637 if (ring->xsk_pool)
638 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
639 else if (ring_uses_large_buffer(ring))
640 buf_size = IGC_RXBUFFER_3072;
641 else
642 buf_size = IGC_RXBUFFER_2048;
643
644 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
645 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
646 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
647
648 wr32(IGC_SRRCTL(reg_idx), srrctl);
649
650 rxdctl |= IGC_RX_PTHRESH;
651 rxdctl |= IGC_RX_HTHRESH << 8;
652 rxdctl |= IGC_RX_WTHRESH << 16;
653
654 /* initialize rx_buffer_info */
655 memset(ring->rx_buffer_info, 0,
656 sizeof(struct igc_rx_buffer) * ring->count);
657
658 /* initialize Rx descriptor 0 */
659 rx_desc = IGC_RX_DESC(ring, 0);
660 rx_desc->wb.upper.length = 0;
661
662 /* enable receive descriptor fetching */
663 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
664
665 wr32(IGC_RXDCTL(reg_idx), rxdctl);
666}
667
668/**
669 * igc_configure_rx - Configure receive Unit after Reset
670 * @adapter: board private structure
671 *
672 * Configure the Rx unit of the MAC after a reset.
673 */
674static void igc_configure_rx(struct igc_adapter *adapter)
675{
676 int i;
677
678 /* Setup the HW Rx Head and Tail Descriptor Pointers and
679 * the Base and Length of the Rx Descriptor Ring
680 */
681 for (i = 0; i < adapter->num_rx_queues; i++)
682 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
683}
684
685/**
686 * igc_configure_tx_ring - Configure transmit ring after Reset
687 * @adapter: board private structure
688 * @ring: tx ring to configure
689 *
690 * Configure a transmit ring after a reset.
691 */
692static void igc_configure_tx_ring(struct igc_adapter *adapter,
693 struct igc_ring *ring)
694{
695 struct igc_hw *hw = &adapter->hw;
696 int reg_idx = ring->reg_idx;
697 u64 tdba = ring->dma;
698 u32 txdctl = 0;
699
700 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
701
702 /* disable the queue */
703 wr32(IGC_TXDCTL(reg_idx), 0);
704 wrfl();
705 mdelay(10);
706
707 wr32(IGC_TDLEN(reg_idx),
708 ring->count * sizeof(union igc_adv_tx_desc));
709 wr32(IGC_TDBAL(reg_idx),
710 tdba & 0x00000000ffffffffULL);
711 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
712
713 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
714 wr32(IGC_TDH(reg_idx), 0);
715 writel(0, ring->tail);
716
717 txdctl |= IGC_TX_PTHRESH;
718 txdctl |= IGC_TX_HTHRESH << 8;
719 txdctl |= IGC_TX_WTHRESH << 16;
720
721 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
722 wr32(IGC_TXDCTL(reg_idx), txdctl);
723}
724
725/**
726 * igc_configure_tx - Configure transmit Unit after Reset
727 * @adapter: board private structure
728 *
729 * Configure the Tx unit of the MAC after a reset.
730 */
731static void igc_configure_tx(struct igc_adapter *adapter)
732{
733 int i;
734
735 for (i = 0; i < adapter->num_tx_queues; i++)
736 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
737}
738
739/**
740 * igc_setup_mrqc - configure the multiple receive queue control registers
741 * @adapter: Board private structure
742 */
743static void igc_setup_mrqc(struct igc_adapter *adapter)
744{
745 struct igc_hw *hw = &adapter->hw;
746 u32 j, num_rx_queues;
747 u32 mrqc, rxcsum;
748 u32 rss_key[10];
749
750 netdev_rss_key_fill(rss_key, sizeof(rss_key));
751 for (j = 0; j < 10; j++)
752 wr32(IGC_RSSRK(j), rss_key[j]);
753
754 num_rx_queues = adapter->rss_queues;
755
756 if (adapter->rss_indir_tbl_init != num_rx_queues) {
757 for (j = 0; j < IGC_RETA_SIZE; j++)
758 adapter->rss_indir_tbl[j] =
759 (j * num_rx_queues) / IGC_RETA_SIZE;
760 adapter->rss_indir_tbl_init = num_rx_queues;
761 }
762 igc_write_rss_indir_tbl(adapter);
763
764 /* Disable raw packet checksumming so that RSS hash is placed in
765 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
766 * offloads as they are enabled by default
767 */
768 rxcsum = rd32(IGC_RXCSUM);
769 rxcsum |= IGC_RXCSUM_PCSD;
770
771 /* Enable Receive Checksum Offload for SCTP */
772 rxcsum |= IGC_RXCSUM_CRCOFL;
773
774 /* Don't need to set TUOFL or IPOFL, they default to 1 */
775 wr32(IGC_RXCSUM, rxcsum);
776
777 /* Generate RSS hash based on packet types, TCP/UDP
778 * port numbers and/or IPv4/v6 src and dst addresses
779 */
780 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
781 IGC_MRQC_RSS_FIELD_IPV4_TCP |
782 IGC_MRQC_RSS_FIELD_IPV6 |
783 IGC_MRQC_RSS_FIELD_IPV6_TCP |
784 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
785
786 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
787 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
788 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
789 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
790
791 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
792
793 wr32(IGC_MRQC, mrqc);
794}
795
796/**
797 * igc_setup_rctl - configure the receive control registers
798 * @adapter: Board private structure
799 */
800static void igc_setup_rctl(struct igc_adapter *adapter)
801{
802 struct igc_hw *hw = &adapter->hw;
803 u32 rctl;
804
805 rctl = rd32(IGC_RCTL);
806
807 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
808 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
809
810 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
811 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
812
813 /* enable stripping of CRC. Newer features require
814 * that the HW strips the CRC.
815 */
816 rctl |= IGC_RCTL_SECRC;
817
818 /* disable store bad packets and clear size bits. */
819 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
820
821 /* enable LPE to allow for reception of jumbo frames */
822 rctl |= IGC_RCTL_LPE;
823
824 /* disable queue 0 to prevent tail write w/o re-config */
825 wr32(IGC_RXDCTL(0), 0);
826
827 /* This is useful for sniffing bad packets. */
828 if (adapter->netdev->features & NETIF_F_RXALL) {
829 /* UPE and MPE will be handled by normal PROMISC logic
830 * in set_rx_mode
831 */
832 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
833 IGC_RCTL_BAM | /* RX All Bcast Pkts */
834 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
835
836 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
837 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
838 }
839
840 wr32(IGC_RCTL, rctl);
841}
842
843/**
844 * igc_setup_tctl - configure the transmit control registers
845 * @adapter: Board private structure
846 */
847static void igc_setup_tctl(struct igc_adapter *adapter)
848{
849 struct igc_hw *hw = &adapter->hw;
850 u32 tctl;
851
852 /* disable queue 0 which icould be enabled by default */
853 wr32(IGC_TXDCTL(0), 0);
854
855 /* Program the Transmit Control Register */
856 tctl = rd32(IGC_TCTL);
857 tctl &= ~IGC_TCTL_CT;
858 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
859 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
860
861 /* Enable transmits */
862 tctl |= IGC_TCTL_EN;
863
864 wr32(IGC_TCTL, tctl);
865}
866
867/**
868 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
869 * @adapter: Pointer to adapter where the filter should be set
870 * @index: Filter index
871 * @type: MAC address filter type (source or destination)
872 * @addr: MAC address
873 * @queue: If non-negative, queue assignment feature is enabled and frames
874 * matching the filter are enqueued onto 'queue'. Otherwise, queue
875 * assignment is disabled.
876 */
877static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
878 enum igc_mac_filter_type type,
879 const u8 *addr, int queue)
880{
881 struct net_device *dev = adapter->netdev;
882 struct igc_hw *hw = &adapter->hw;
883 u32 ral, rah;
884
885 if (WARN_ON(index >= hw->mac.rar_entry_count))
886 return;
887
888 ral = le32_to_cpup((__le32 *)(addr));
889 rah = le16_to_cpup((__le16 *)(addr + 4));
890
891 if (type == IGC_MAC_FILTER_TYPE_SRC) {
892 rah &= ~IGC_RAH_ASEL_MASK;
893 rah |= IGC_RAH_ASEL_SRC_ADDR;
894 }
895
896 if (queue >= 0) {
897 rah &= ~IGC_RAH_QSEL_MASK;
898 rah |= (queue << IGC_RAH_QSEL_SHIFT);
899 rah |= IGC_RAH_QSEL_ENABLE;
900 }
901
902 rah |= IGC_RAH_AV;
903
904 wr32(IGC_RAL(index), ral);
905 wr32(IGC_RAH(index), rah);
906
907 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
908}
909
910/**
911 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
912 * @adapter: Pointer to adapter where the filter should be cleared
913 * @index: Filter index
914 */
915static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
916{
917 struct net_device *dev = adapter->netdev;
918 struct igc_hw *hw = &adapter->hw;
919
920 if (WARN_ON(index >= hw->mac.rar_entry_count))
921 return;
922
923 wr32(IGC_RAL(index), 0);
924 wr32(IGC_RAH(index), 0);
925
926 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
927}
928
929/* Set default MAC address for the PF in the first RAR entry */
930static void igc_set_default_mac_filter(struct igc_adapter *adapter)
931{
932 struct net_device *dev = adapter->netdev;
933 u8 *addr = adapter->hw.mac.addr;
934
935 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
936
937 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
938}
939
940/**
941 * igc_set_mac - Change the Ethernet Address of the NIC
942 * @netdev: network interface device structure
943 * @p: pointer to an address structure
944 *
945 * Returns 0 on success, negative on failure
946 */
947static int igc_set_mac(struct net_device *netdev, void *p)
948{
949 struct igc_adapter *adapter = netdev_priv(netdev);
950 struct igc_hw *hw = &adapter->hw;
951 struct sockaddr *addr = p;
952
953 if (!is_valid_ether_addr(addr->sa_data))
954 return -EADDRNOTAVAIL;
955
956 eth_hw_addr_set(netdev, addr->sa_data);
957 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
958
959 /* set the correct pool for the new PF MAC address in entry 0 */
960 igc_set_default_mac_filter(adapter);
961
962 return 0;
963}
964
965/**
966 * igc_write_mc_addr_list - write multicast addresses to MTA
967 * @netdev: network interface device structure
968 *
969 * Writes multicast address list to the MTA hash table.
970 * Returns: -ENOMEM on failure
971 * 0 on no addresses written
972 * X on writing X addresses to MTA
973 **/
974static int igc_write_mc_addr_list(struct net_device *netdev)
975{
976 struct igc_adapter *adapter = netdev_priv(netdev);
977 struct igc_hw *hw = &adapter->hw;
978 struct netdev_hw_addr *ha;
979 u8 *mta_list;
980 int i;
981
982 if (netdev_mc_empty(netdev)) {
983 /* nothing to program, so clear mc list */
984 igc_update_mc_addr_list(hw, NULL, 0);
985 return 0;
986 }
987
988 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
989 if (!mta_list)
990 return -ENOMEM;
991
992 /* The shared function expects a packed array of only addresses. */
993 i = 0;
994 netdev_for_each_mc_addr(ha, netdev)
995 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
996
997 igc_update_mc_addr_list(hw, mta_list, i);
998 kfree(mta_list);
999
1000 return netdev_mc_count(netdev);
1001}
1002
1003static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
1004 bool *first_flag, bool *insert_empty)
1005{
1006 struct igc_adapter *adapter = netdev_priv(ring->netdev);
1007 ktime_t cycle_time = adapter->cycle_time;
1008 ktime_t base_time = adapter->base_time;
1009 ktime_t now = ktime_get_clocktai();
1010 ktime_t baset_est, end_of_cycle;
1011 u32 launchtime;
1012 s64 n;
1013
1014 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
1015
1016 baset_est = ktime_add_ns(base_time, cycle_time * (n));
1017 end_of_cycle = ktime_add_ns(baset_est, cycle_time);
1018
1019 if (ktime_compare(txtime, end_of_cycle) >= 0) {
1020 if (baset_est != ring->last_ff_cycle) {
1021 *first_flag = true;
1022 ring->last_ff_cycle = baset_est;
1023
1024 if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
1025 *insert_empty = true;
1026 }
1027 }
1028
1029 /* Introducing a window at end of cycle on which packets
1030 * potentially not honor launchtime. Window of 5us chosen
1031 * considering software update the tail pointer and packets
1032 * are dma'ed to packet buffer.
1033 */
1034 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
1035 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
1036 txtime);
1037
1038 ring->last_tx_cycle = end_of_cycle;
1039
1040 launchtime = ktime_sub_ns(txtime, baset_est);
1041 if (launchtime > 0)
1042 div_s64_rem(launchtime, cycle_time, &launchtime);
1043 else
1044 launchtime = 0;
1045
1046 return cpu_to_le32(launchtime);
1047}
1048
1049static int igc_init_empty_frame(struct igc_ring *ring,
1050 struct igc_tx_buffer *buffer,
1051 struct sk_buff *skb)
1052{
1053 unsigned int size;
1054 dma_addr_t dma;
1055
1056 size = skb_headlen(skb);
1057
1058 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
1059 if (dma_mapping_error(ring->dev, dma)) {
1060 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
1061 return -ENOMEM;
1062 }
1063
1064 buffer->skb = skb;
1065 buffer->protocol = 0;
1066 buffer->bytecount = skb->len;
1067 buffer->gso_segs = 1;
1068 buffer->time_stamp = jiffies;
1069 dma_unmap_len_set(buffer, len, skb->len);
1070 dma_unmap_addr_set(buffer, dma, dma);
1071
1072 return 0;
1073}
1074
1075static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
1076 struct sk_buff *skb,
1077 struct igc_tx_buffer *first)
1078{
1079 union igc_adv_tx_desc *desc;
1080 u32 cmd_type, olinfo_status;
1081 int err;
1082
1083 if (!igc_desc_unused(ring))
1084 return -EBUSY;
1085
1086 err = igc_init_empty_frame(ring, first, skb);
1087 if (err)
1088 return err;
1089
1090 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1091 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1092 first->bytecount;
1093 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1094
1095 desc = IGC_TX_DESC(ring, ring->next_to_use);
1096 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1097 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1098 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
1099
1100 netdev_tx_sent_queue(txring_txq(ring), skb->len);
1101
1102 first->next_to_watch = desc;
1103
1104 ring->next_to_use++;
1105 if (ring->next_to_use == ring->count)
1106 ring->next_to_use = 0;
1107
1108 return 0;
1109}
1110
1111#define IGC_EMPTY_FRAME_SIZE 60
1112
1113static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1114 __le32 launch_time, bool first_flag,
1115 u32 vlan_macip_lens, u32 type_tucmd,
1116 u32 mss_l4len_idx)
1117{
1118 struct igc_adv_tx_context_desc *context_desc;
1119 u16 i = tx_ring->next_to_use;
1120
1121 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1122
1123 i++;
1124 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1125
1126 /* set bits to identify this as an advanced context descriptor */
1127 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1128
1129 /* For i225, context index must be unique per ring. */
1130 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1131 mss_l4len_idx |= tx_ring->reg_idx << 4;
1132
1133 if (first_flag)
1134 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1135
1136 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1137 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1138 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1139 context_desc->launch_time = launch_time;
1140}
1141
1142static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1143 __le32 launch_time, bool first_flag)
1144{
1145 struct sk_buff *skb = first->skb;
1146 u32 vlan_macip_lens = 0;
1147 u32 type_tucmd = 0;
1148
1149 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1150csum_failed:
1151 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1152 !tx_ring->launchtime_enable)
1153 return;
1154 goto no_csum;
1155 }
1156
1157 switch (skb->csum_offset) {
1158 case offsetof(struct tcphdr, check):
1159 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1160 fallthrough;
1161 case offsetof(struct udphdr, check):
1162 break;
1163 case offsetof(struct sctphdr, checksum):
1164 /* validate that this is actually an SCTP request */
1165 if (skb_csum_is_sctp(skb)) {
1166 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1167 break;
1168 }
1169 fallthrough;
1170 default:
1171 skb_checksum_help(skb);
1172 goto csum_failed;
1173 }
1174
1175 /* update TX checksum flag */
1176 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1177 vlan_macip_lens = skb_checksum_start_offset(skb) -
1178 skb_network_offset(skb);
1179no_csum:
1180 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1181 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1182
1183 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1184 vlan_macip_lens, type_tucmd, 0);
1185}
1186
1187static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1188{
1189 struct net_device *netdev = tx_ring->netdev;
1190
1191 netif_stop_subqueue(netdev, tx_ring->queue_index);
1192
1193 /* memory barriier comment */
1194 smp_mb();
1195
1196 /* We need to check again in a case another CPU has just
1197 * made room available.
1198 */
1199 if (igc_desc_unused(tx_ring) < size)
1200 return -EBUSY;
1201
1202 /* A reprieve! */
1203 netif_wake_subqueue(netdev, tx_ring->queue_index);
1204
1205 u64_stats_update_begin(&tx_ring->tx_syncp2);
1206 tx_ring->tx_stats.restart_queue2++;
1207 u64_stats_update_end(&tx_ring->tx_syncp2);
1208
1209 return 0;
1210}
1211
1212static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1213{
1214 if (igc_desc_unused(tx_ring) >= size)
1215 return 0;
1216 return __igc_maybe_stop_tx(tx_ring, size);
1217}
1218
1219#define IGC_SET_FLAG(_input, _flag, _result) \
1220 (((_flag) <= (_result)) ? \
1221 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1222 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1223
1224static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1225{
1226 /* set type for advanced descriptor with frame checksum insertion */
1227 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1228 IGC_ADVTXD_DCMD_DEXT |
1229 IGC_ADVTXD_DCMD_IFCS;
1230
1231 /* set HW vlan bit if vlan is present */
1232 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1233 IGC_ADVTXD_DCMD_VLE);
1234
1235 /* set segmentation bits for TSO */
1236 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1237 (IGC_ADVTXD_DCMD_TSE));
1238
1239 /* set timestamp bit if present */
1240 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1241 (IGC_ADVTXD_MAC_TSTAMP));
1242
1243 /* insert frame checksum */
1244 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1245
1246 return cmd_type;
1247}
1248
1249static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1250 union igc_adv_tx_desc *tx_desc,
1251 u32 tx_flags, unsigned int paylen)
1252{
1253 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1254
1255 /* insert L4 checksum */
1256 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1257 ((IGC_TXD_POPTS_TXSM << 8) /
1258 IGC_TX_FLAGS_CSUM);
1259
1260 /* insert IPv4 checksum */
1261 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1262 (((IGC_TXD_POPTS_IXSM << 8)) /
1263 IGC_TX_FLAGS_IPV4);
1264
1265 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1266}
1267
1268static int igc_tx_map(struct igc_ring *tx_ring,
1269 struct igc_tx_buffer *first,
1270 const u8 hdr_len)
1271{
1272 struct sk_buff *skb = first->skb;
1273 struct igc_tx_buffer *tx_buffer;
1274 union igc_adv_tx_desc *tx_desc;
1275 u32 tx_flags = first->tx_flags;
1276 skb_frag_t *frag;
1277 u16 i = tx_ring->next_to_use;
1278 unsigned int data_len, size;
1279 dma_addr_t dma;
1280 u32 cmd_type;
1281
1282 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1283 tx_desc = IGC_TX_DESC(tx_ring, i);
1284
1285 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1286
1287 size = skb_headlen(skb);
1288 data_len = skb->data_len;
1289
1290 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1291
1292 tx_buffer = first;
1293
1294 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1295 if (dma_mapping_error(tx_ring->dev, dma))
1296 goto dma_error;
1297
1298 /* record length, and DMA address */
1299 dma_unmap_len_set(tx_buffer, len, size);
1300 dma_unmap_addr_set(tx_buffer, dma, dma);
1301
1302 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1303
1304 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1305 tx_desc->read.cmd_type_len =
1306 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1307
1308 i++;
1309 tx_desc++;
1310 if (i == tx_ring->count) {
1311 tx_desc = IGC_TX_DESC(tx_ring, 0);
1312 i = 0;
1313 }
1314 tx_desc->read.olinfo_status = 0;
1315
1316 dma += IGC_MAX_DATA_PER_TXD;
1317 size -= IGC_MAX_DATA_PER_TXD;
1318
1319 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1320 }
1321
1322 if (likely(!data_len))
1323 break;
1324
1325 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1326
1327 i++;
1328 tx_desc++;
1329 if (i == tx_ring->count) {
1330 tx_desc = IGC_TX_DESC(tx_ring, 0);
1331 i = 0;
1332 }
1333 tx_desc->read.olinfo_status = 0;
1334
1335 size = skb_frag_size(frag);
1336 data_len -= size;
1337
1338 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1339 size, DMA_TO_DEVICE);
1340
1341 tx_buffer = &tx_ring->tx_buffer_info[i];
1342 }
1343
1344 /* write last descriptor with RS and EOP bits */
1345 cmd_type |= size | IGC_TXD_DCMD;
1346 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1347
1348 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1349
1350 /* set the timestamp */
1351 first->time_stamp = jiffies;
1352
1353 skb_tx_timestamp(skb);
1354
1355 /* Force memory writes to complete before letting h/w know there
1356 * are new descriptors to fetch. (Only applicable for weak-ordered
1357 * memory model archs, such as IA-64).
1358 *
1359 * We also need this memory barrier to make certain all of the
1360 * status bits have been updated before next_to_watch is written.
1361 */
1362 wmb();
1363
1364 /* set next_to_watch value indicating a packet is present */
1365 first->next_to_watch = tx_desc;
1366
1367 i++;
1368 if (i == tx_ring->count)
1369 i = 0;
1370
1371 tx_ring->next_to_use = i;
1372
1373 /* Make sure there is space in the ring for the next send. */
1374 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1375
1376 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1377 writel(i, tx_ring->tail);
1378 }
1379
1380 return 0;
1381dma_error:
1382 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1383 tx_buffer = &tx_ring->tx_buffer_info[i];
1384
1385 /* clear dma mappings for failed tx_buffer_info map */
1386 while (tx_buffer != first) {
1387 if (dma_unmap_len(tx_buffer, len))
1388 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1389
1390 if (i-- == 0)
1391 i += tx_ring->count;
1392 tx_buffer = &tx_ring->tx_buffer_info[i];
1393 }
1394
1395 if (dma_unmap_len(tx_buffer, len))
1396 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1397
1398 dev_kfree_skb_any(tx_buffer->skb);
1399 tx_buffer->skb = NULL;
1400
1401 tx_ring->next_to_use = i;
1402
1403 return -1;
1404}
1405
1406static int igc_tso(struct igc_ring *tx_ring,
1407 struct igc_tx_buffer *first,
1408 __le32 launch_time, bool first_flag,
1409 u8 *hdr_len)
1410{
1411 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1412 struct sk_buff *skb = first->skb;
1413 union {
1414 struct iphdr *v4;
1415 struct ipv6hdr *v6;
1416 unsigned char *hdr;
1417 } ip;
1418 union {
1419 struct tcphdr *tcp;
1420 struct udphdr *udp;
1421 unsigned char *hdr;
1422 } l4;
1423 u32 paylen, l4_offset;
1424 int err;
1425
1426 if (skb->ip_summed != CHECKSUM_PARTIAL)
1427 return 0;
1428
1429 if (!skb_is_gso(skb))
1430 return 0;
1431
1432 err = skb_cow_head(skb, 0);
1433 if (err < 0)
1434 return err;
1435
1436 ip.hdr = skb_network_header(skb);
1437 l4.hdr = skb_checksum_start(skb);
1438
1439 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1440 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1441
1442 /* initialize outer IP header fields */
1443 if (ip.v4->version == 4) {
1444 unsigned char *csum_start = skb_checksum_start(skb);
1445 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1446
1447 /* IP header will have to cancel out any data that
1448 * is not a part of the outer IP header
1449 */
1450 ip.v4->check = csum_fold(csum_partial(trans_start,
1451 csum_start - trans_start,
1452 0));
1453 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1454
1455 ip.v4->tot_len = 0;
1456 first->tx_flags |= IGC_TX_FLAGS_TSO |
1457 IGC_TX_FLAGS_CSUM |
1458 IGC_TX_FLAGS_IPV4;
1459 } else {
1460 ip.v6->payload_len = 0;
1461 first->tx_flags |= IGC_TX_FLAGS_TSO |
1462 IGC_TX_FLAGS_CSUM;
1463 }
1464
1465 /* determine offset of inner transport header */
1466 l4_offset = l4.hdr - skb->data;
1467
1468 /* remove payload length from inner checksum */
1469 paylen = skb->len - l4_offset;
1470 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1471 /* compute length of segmentation header */
1472 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1473 csum_replace_by_diff(&l4.tcp->check,
1474 (__force __wsum)htonl(paylen));
1475 } else {
1476 /* compute length of segmentation header */
1477 *hdr_len = sizeof(*l4.udp) + l4_offset;
1478 csum_replace_by_diff(&l4.udp->check,
1479 (__force __wsum)htonl(paylen));
1480 }
1481
1482 /* update gso size and bytecount with header size */
1483 first->gso_segs = skb_shinfo(skb)->gso_segs;
1484 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1485
1486 /* MSS L4LEN IDX */
1487 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1488 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1489
1490 /* VLAN MACLEN IPLEN */
1491 vlan_macip_lens = l4.hdr - ip.hdr;
1492 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1493 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1494
1495 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1496 vlan_macip_lens, type_tucmd, mss_l4len_idx);
1497
1498 return 1;
1499}
1500
1501static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1502 struct igc_ring *tx_ring)
1503{
1504 bool first_flag = false, insert_empty = false;
1505 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1506 __be16 protocol = vlan_get_protocol(skb);
1507 struct igc_tx_buffer *first;
1508 __le32 launch_time = 0;
1509 u32 tx_flags = 0;
1510 unsigned short f;
1511 ktime_t txtime;
1512 u8 hdr_len = 0;
1513 int tso = 0;
1514
1515 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1516 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1517 * + 2 desc gap to keep tail from touching head,
1518 * + 1 desc for context descriptor,
1519 * otherwise try next time
1520 */
1521 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1522 count += TXD_USE_COUNT(skb_frag_size(
1523 &skb_shinfo(skb)->frags[f]));
1524
1525 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1526 /* this is a hard error */
1527 return NETDEV_TX_BUSY;
1528 }
1529
1530 if (!tx_ring->launchtime_enable)
1531 goto done;
1532
1533 txtime = skb->tstamp;
1534 skb->tstamp = ktime_set(0, 0);
1535 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1536
1537 if (insert_empty) {
1538 struct igc_tx_buffer *empty_info;
1539 struct sk_buff *empty;
1540 void *data;
1541
1542 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1543 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1544 if (!empty)
1545 goto done;
1546
1547 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
1548 memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1549
1550 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1551
1552 if (igc_init_tx_empty_descriptor(tx_ring,
1553 empty,
1554 empty_info) < 0)
1555 dev_kfree_skb_any(empty);
1556 }
1557
1558done:
1559 /* record the location of the first descriptor for this packet */
1560 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1561 first->type = IGC_TX_BUFFER_TYPE_SKB;
1562 first->skb = skb;
1563 first->bytecount = skb->len;
1564 first->gso_segs = 1;
1565
1566 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1567 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1568
1569 /* FIXME: add support for retrieving timestamps from
1570 * the other timer registers before skipping the
1571 * timestamping request.
1572 */
1573 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1574 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1575 &adapter->state)) {
1576 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1577 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1578
1579 adapter->ptp_tx_skb = skb_get(skb);
1580 adapter->ptp_tx_start = jiffies;
1581 } else {
1582 adapter->tx_hwtstamp_skipped++;
1583 }
1584 }
1585
1586 if (skb_vlan_tag_present(skb)) {
1587 tx_flags |= IGC_TX_FLAGS_VLAN;
1588 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1589 }
1590
1591 /* record initial flags and protocol */
1592 first->tx_flags = tx_flags;
1593 first->protocol = protocol;
1594
1595 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1596 if (tso < 0)
1597 goto out_drop;
1598 else if (!tso)
1599 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1600
1601 igc_tx_map(tx_ring, first, hdr_len);
1602
1603 return NETDEV_TX_OK;
1604
1605out_drop:
1606 dev_kfree_skb_any(first->skb);
1607 first->skb = NULL;
1608
1609 return NETDEV_TX_OK;
1610}
1611
1612static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1613 struct sk_buff *skb)
1614{
1615 unsigned int r_idx = skb->queue_mapping;
1616
1617 if (r_idx >= adapter->num_tx_queues)
1618 r_idx = r_idx % adapter->num_tx_queues;
1619
1620 return adapter->tx_ring[r_idx];
1621}
1622
1623static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1624 struct net_device *netdev)
1625{
1626 struct igc_adapter *adapter = netdev_priv(netdev);
1627
1628 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1629 * in order to meet this minimum size requirement.
1630 */
1631 if (skb->len < 17) {
1632 if (skb_padto(skb, 17))
1633 return NETDEV_TX_OK;
1634 skb->len = 17;
1635 }
1636
1637 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1638}
1639
1640static void igc_rx_checksum(struct igc_ring *ring,
1641 union igc_adv_rx_desc *rx_desc,
1642 struct sk_buff *skb)
1643{
1644 skb_checksum_none_assert(skb);
1645
1646 /* Ignore Checksum bit is set */
1647 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1648 return;
1649
1650 /* Rx checksum disabled via ethtool */
1651 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1652 return;
1653
1654 /* TCP/UDP checksum error bit is set */
1655 if (igc_test_staterr(rx_desc,
1656 IGC_RXDEXT_STATERR_L4E |
1657 IGC_RXDEXT_STATERR_IPE)) {
1658 /* work around errata with sctp packets where the TCPE aka
1659 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1660 * packets (aka let the stack check the crc32c)
1661 */
1662 if (!(skb->len == 60 &&
1663 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1664 u64_stats_update_begin(&ring->rx_syncp);
1665 ring->rx_stats.csum_err++;
1666 u64_stats_update_end(&ring->rx_syncp);
1667 }
1668 /* let the stack verify checksum errors */
1669 return;
1670 }
1671 /* It must be a TCP or UDP packet with a valid checksum */
1672 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1673 IGC_RXD_STAT_UDPCS))
1674 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675
1676 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1677 le32_to_cpu(rx_desc->wb.upper.status_error));
1678}
1679
1680static inline void igc_rx_hash(struct igc_ring *ring,
1681 union igc_adv_rx_desc *rx_desc,
1682 struct sk_buff *skb)
1683{
1684 if (ring->netdev->features & NETIF_F_RXHASH)
1685 skb_set_hash(skb,
1686 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1687 PKT_HASH_TYPE_L3);
1688}
1689
1690static void igc_rx_vlan(struct igc_ring *rx_ring,
1691 union igc_adv_rx_desc *rx_desc,
1692 struct sk_buff *skb)
1693{
1694 struct net_device *dev = rx_ring->netdev;
1695 u16 vid;
1696
1697 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1698 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1699 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1700 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1701 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1702 else
1703 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1704
1705 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1706 }
1707}
1708
1709/**
1710 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1711 * @rx_ring: rx descriptor ring packet is being transacted on
1712 * @rx_desc: pointer to the EOP Rx descriptor
1713 * @skb: pointer to current skb being populated
1714 *
1715 * This function checks the ring, descriptor, and packet information in order
1716 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1717 * skb.
1718 */
1719static void igc_process_skb_fields(struct igc_ring *rx_ring,
1720 union igc_adv_rx_desc *rx_desc,
1721 struct sk_buff *skb)
1722{
1723 igc_rx_hash(rx_ring, rx_desc, skb);
1724
1725 igc_rx_checksum(rx_ring, rx_desc, skb);
1726
1727 igc_rx_vlan(rx_ring, rx_desc, skb);
1728
1729 skb_record_rx_queue(skb, rx_ring->queue_index);
1730
1731 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1732}
1733
1734static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1735{
1736 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1737 struct igc_adapter *adapter = netdev_priv(netdev);
1738 struct igc_hw *hw = &adapter->hw;
1739 u32 ctrl;
1740
1741 ctrl = rd32(IGC_CTRL);
1742
1743 if (enable) {
1744 /* enable VLAN tag insert/strip */
1745 ctrl |= IGC_CTRL_VME;
1746 } else {
1747 /* disable VLAN tag insert/strip */
1748 ctrl &= ~IGC_CTRL_VME;
1749 }
1750 wr32(IGC_CTRL, ctrl);
1751}
1752
1753static void igc_restore_vlan(struct igc_adapter *adapter)
1754{
1755 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1756}
1757
1758static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1759 const unsigned int size,
1760 int *rx_buffer_pgcnt)
1761{
1762 struct igc_rx_buffer *rx_buffer;
1763
1764 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1765 *rx_buffer_pgcnt =
1766#if (PAGE_SIZE < 8192)
1767 page_count(rx_buffer->page);
1768#else
1769 0;
1770#endif
1771 prefetchw(rx_buffer->page);
1772
1773 /* we are reusing so sync this buffer for CPU use */
1774 dma_sync_single_range_for_cpu(rx_ring->dev,
1775 rx_buffer->dma,
1776 rx_buffer->page_offset,
1777 size,
1778 DMA_FROM_DEVICE);
1779
1780 rx_buffer->pagecnt_bias--;
1781
1782 return rx_buffer;
1783}
1784
1785static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1786 unsigned int truesize)
1787{
1788#if (PAGE_SIZE < 8192)
1789 buffer->page_offset ^= truesize;
1790#else
1791 buffer->page_offset += truesize;
1792#endif
1793}
1794
1795static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1796 unsigned int size)
1797{
1798 unsigned int truesize;
1799
1800#if (PAGE_SIZE < 8192)
1801 truesize = igc_rx_pg_size(ring) / 2;
1802#else
1803 truesize = ring_uses_build_skb(ring) ?
1804 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1805 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1806 SKB_DATA_ALIGN(size);
1807#endif
1808 return truesize;
1809}
1810
1811/**
1812 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1813 * @rx_ring: rx descriptor ring to transact packets on
1814 * @rx_buffer: buffer containing page to add
1815 * @skb: sk_buff to place the data into
1816 * @size: size of buffer to be added
1817 *
1818 * This function will add the data contained in rx_buffer->page to the skb.
1819 */
1820static void igc_add_rx_frag(struct igc_ring *rx_ring,
1821 struct igc_rx_buffer *rx_buffer,
1822 struct sk_buff *skb,
1823 unsigned int size)
1824{
1825 unsigned int truesize;
1826
1827#if (PAGE_SIZE < 8192)
1828 truesize = igc_rx_pg_size(rx_ring) / 2;
1829#else
1830 truesize = ring_uses_build_skb(rx_ring) ?
1831 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1832 SKB_DATA_ALIGN(size);
1833#endif
1834 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1835 rx_buffer->page_offset, size, truesize);
1836
1837 igc_rx_buffer_flip(rx_buffer, truesize);
1838}
1839
1840static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1841 struct igc_rx_buffer *rx_buffer,
1842 struct xdp_buff *xdp)
1843{
1844 unsigned int size = xdp->data_end - xdp->data;
1845 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1846 unsigned int metasize = xdp->data - xdp->data_meta;
1847 struct sk_buff *skb;
1848
1849 /* prefetch first cache line of first page */
1850 net_prefetch(xdp->data_meta);
1851
1852 /* build an skb around the page buffer */
1853 skb = napi_build_skb(xdp->data_hard_start, truesize);
1854 if (unlikely(!skb))
1855 return NULL;
1856
1857 /* update pointers within the skb to store the data */
1858 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1859 __skb_put(skb, size);
1860 if (metasize)
1861 skb_metadata_set(skb, metasize);
1862
1863 igc_rx_buffer_flip(rx_buffer, truesize);
1864 return skb;
1865}
1866
1867static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1868 struct igc_rx_buffer *rx_buffer,
1869 struct xdp_buff *xdp,
1870 ktime_t timestamp)
1871{
1872 unsigned int metasize = xdp->data - xdp->data_meta;
1873 unsigned int size = xdp->data_end - xdp->data;
1874 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1875 void *va = xdp->data;
1876 unsigned int headlen;
1877 struct sk_buff *skb;
1878
1879 /* prefetch first cache line of first page */
1880 net_prefetch(xdp->data_meta);
1881
1882 /* allocate a skb to store the frags */
1883 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1884 IGC_RX_HDR_LEN + metasize);
1885 if (unlikely(!skb))
1886 return NULL;
1887
1888 if (timestamp)
1889 skb_hwtstamps(skb)->hwtstamp = timestamp;
1890
1891 /* Determine available headroom for copy */
1892 headlen = size;
1893 if (headlen > IGC_RX_HDR_LEN)
1894 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1895
1896 /* align pull length to size of long to optimize memcpy performance */
1897 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1898 ALIGN(headlen + metasize, sizeof(long)));
1899
1900 if (metasize) {
1901 skb_metadata_set(skb, metasize);
1902 __skb_pull(skb, metasize);
1903 }
1904
1905 /* update all of the pointers */
1906 size -= headlen;
1907 if (size) {
1908 skb_add_rx_frag(skb, 0, rx_buffer->page,
1909 (va + headlen) - page_address(rx_buffer->page),
1910 size, truesize);
1911 igc_rx_buffer_flip(rx_buffer, truesize);
1912 } else {
1913 rx_buffer->pagecnt_bias++;
1914 }
1915
1916 return skb;
1917}
1918
1919/**
1920 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1921 * @rx_ring: rx descriptor ring to store buffers on
1922 * @old_buff: donor buffer to have page reused
1923 *
1924 * Synchronizes page for reuse by the adapter
1925 */
1926static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1927 struct igc_rx_buffer *old_buff)
1928{
1929 u16 nta = rx_ring->next_to_alloc;
1930 struct igc_rx_buffer *new_buff;
1931
1932 new_buff = &rx_ring->rx_buffer_info[nta];
1933
1934 /* update, and store next to alloc */
1935 nta++;
1936 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1937
1938 /* Transfer page from old buffer to new buffer.
1939 * Move each member individually to avoid possible store
1940 * forwarding stalls.
1941 */
1942 new_buff->dma = old_buff->dma;
1943 new_buff->page = old_buff->page;
1944 new_buff->page_offset = old_buff->page_offset;
1945 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1946}
1947
1948static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1949 int rx_buffer_pgcnt)
1950{
1951 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1952 struct page *page = rx_buffer->page;
1953
1954 /* avoid re-using remote and pfmemalloc pages */
1955 if (!dev_page_is_reusable(page))
1956 return false;
1957
1958#if (PAGE_SIZE < 8192)
1959 /* if we are only owner of page we can reuse it */
1960 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1961 return false;
1962#else
1963#define IGC_LAST_OFFSET \
1964 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1965
1966 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1967 return false;
1968#endif
1969
1970 /* If we have drained the page fragment pool we need to update
1971 * the pagecnt_bias and page count so that we fully restock the
1972 * number of references the driver holds.
1973 */
1974 if (unlikely(pagecnt_bias == 1)) {
1975 page_ref_add(page, USHRT_MAX - 1);
1976 rx_buffer->pagecnt_bias = USHRT_MAX;
1977 }
1978
1979 return true;
1980}
1981
1982/**
1983 * igc_is_non_eop - process handling of non-EOP buffers
1984 * @rx_ring: Rx ring being processed
1985 * @rx_desc: Rx descriptor for current buffer
1986 *
1987 * This function updates next to clean. If the buffer is an EOP buffer
1988 * this function exits returning false, otherwise it will place the
1989 * sk_buff in the next buffer to be chained and return true indicating
1990 * that this is in fact a non-EOP buffer.
1991 */
1992static bool igc_is_non_eop(struct igc_ring *rx_ring,
1993 union igc_adv_rx_desc *rx_desc)
1994{
1995 u32 ntc = rx_ring->next_to_clean + 1;
1996
1997 /* fetch, update, and store next to clean */
1998 ntc = (ntc < rx_ring->count) ? ntc : 0;
1999 rx_ring->next_to_clean = ntc;
2000
2001 prefetch(IGC_RX_DESC(rx_ring, ntc));
2002
2003 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
2004 return false;
2005
2006 return true;
2007}
2008
2009/**
2010 * igc_cleanup_headers - Correct corrupted or empty headers
2011 * @rx_ring: rx descriptor ring packet is being transacted on
2012 * @rx_desc: pointer to the EOP Rx descriptor
2013 * @skb: pointer to current skb being fixed
2014 *
2015 * Address the case where we are pulling data in on pages only
2016 * and as such no data is present in the skb header.
2017 *
2018 * In addition if skb is not at least 60 bytes we need to pad it so that
2019 * it is large enough to qualify as a valid Ethernet frame.
2020 *
2021 * Returns true if an error was encountered and skb was freed.
2022 */
2023static bool igc_cleanup_headers(struct igc_ring *rx_ring,
2024 union igc_adv_rx_desc *rx_desc,
2025 struct sk_buff *skb)
2026{
2027 /* XDP packets use error pointer so abort at this point */
2028 if (IS_ERR(skb))
2029 return true;
2030
2031 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
2032 struct net_device *netdev = rx_ring->netdev;
2033
2034 if (!(netdev->features & NETIF_F_RXALL)) {
2035 dev_kfree_skb_any(skb);
2036 return true;
2037 }
2038 }
2039
2040 /* if eth_skb_pad returns an error the skb was freed */
2041 if (eth_skb_pad(skb))
2042 return true;
2043
2044 return false;
2045}
2046
2047static void igc_put_rx_buffer(struct igc_ring *rx_ring,
2048 struct igc_rx_buffer *rx_buffer,
2049 int rx_buffer_pgcnt)
2050{
2051 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2052 /* hand second half of page back to the ring */
2053 igc_reuse_rx_page(rx_ring, rx_buffer);
2054 } else {
2055 /* We are not reusing the buffer so unmap it and free
2056 * any references we are holding to it
2057 */
2058 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2059 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
2060 IGC_RX_DMA_ATTR);
2061 __page_frag_cache_drain(rx_buffer->page,
2062 rx_buffer->pagecnt_bias);
2063 }
2064
2065 /* clear contents of rx_buffer */
2066 rx_buffer->page = NULL;
2067}
2068
2069static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
2070{
2071 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
2072
2073 if (ring_uses_build_skb(rx_ring))
2074 return IGC_SKB_PAD;
2075 if (igc_xdp_is_enabled(adapter))
2076 return XDP_PACKET_HEADROOM;
2077
2078 return 0;
2079}
2080
2081static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
2082 struct igc_rx_buffer *bi)
2083{
2084 struct page *page = bi->page;
2085 dma_addr_t dma;
2086
2087 /* since we are recycling buffers we should seldom need to alloc */
2088 if (likely(page))
2089 return true;
2090
2091 /* alloc new page for storage */
2092 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
2093 if (unlikely(!page)) {
2094 rx_ring->rx_stats.alloc_failed++;
2095 return false;
2096 }
2097
2098 /* map page for use */
2099 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
2100 igc_rx_pg_size(rx_ring),
2101 DMA_FROM_DEVICE,
2102 IGC_RX_DMA_ATTR);
2103
2104 /* if mapping failed free memory back to system since
2105 * there isn't much point in holding memory we can't use
2106 */
2107 if (dma_mapping_error(rx_ring->dev, dma)) {
2108 __free_page(page);
2109
2110 rx_ring->rx_stats.alloc_failed++;
2111 return false;
2112 }
2113
2114 bi->dma = dma;
2115 bi->page = page;
2116 bi->page_offset = igc_rx_offset(rx_ring);
2117 page_ref_add(page, USHRT_MAX - 1);
2118 bi->pagecnt_bias = USHRT_MAX;
2119
2120 return true;
2121}
2122
2123/**
2124 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2125 * @rx_ring: rx descriptor ring
2126 * @cleaned_count: number of buffers to clean
2127 */
2128static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
2129{
2130 union igc_adv_rx_desc *rx_desc;
2131 u16 i = rx_ring->next_to_use;
2132 struct igc_rx_buffer *bi;
2133 u16 bufsz;
2134
2135 /* nothing to do */
2136 if (!cleaned_count)
2137 return;
2138
2139 rx_desc = IGC_RX_DESC(rx_ring, i);
2140 bi = &rx_ring->rx_buffer_info[i];
2141 i -= rx_ring->count;
2142
2143 bufsz = igc_rx_bufsz(rx_ring);
2144
2145 do {
2146 if (!igc_alloc_mapped_page(rx_ring, bi))
2147 break;
2148
2149 /* sync the buffer for use by the device */
2150 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2151 bi->page_offset, bufsz,
2152 DMA_FROM_DEVICE);
2153
2154 /* Refresh the desc even if buffer_addrs didn't change
2155 * because each write-back erases this info.
2156 */
2157 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2158
2159 rx_desc++;
2160 bi++;
2161 i++;
2162 if (unlikely(!i)) {
2163 rx_desc = IGC_RX_DESC(rx_ring, 0);
2164 bi = rx_ring->rx_buffer_info;
2165 i -= rx_ring->count;
2166 }
2167
2168 /* clear the length for the next_to_use descriptor */
2169 rx_desc->wb.upper.length = 0;
2170
2171 cleaned_count--;
2172 } while (cleaned_count);
2173
2174 i += rx_ring->count;
2175
2176 if (rx_ring->next_to_use != i) {
2177 /* record the next descriptor to use */
2178 rx_ring->next_to_use = i;
2179
2180 /* update next to alloc since we have filled the ring */
2181 rx_ring->next_to_alloc = i;
2182
2183 /* Force memory writes to complete before letting h/w
2184 * know there are new descriptors to fetch. (Only
2185 * applicable for weak-ordered memory model archs,
2186 * such as IA-64).
2187 */
2188 wmb();
2189 writel(i, rx_ring->tail);
2190 }
2191}
2192
2193static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2194{
2195 union igc_adv_rx_desc *desc;
2196 u16 i = ring->next_to_use;
2197 struct igc_rx_buffer *bi;
2198 dma_addr_t dma;
2199 bool ok = true;
2200
2201 if (!count)
2202 return ok;
2203
2204 desc = IGC_RX_DESC(ring, i);
2205 bi = &ring->rx_buffer_info[i];
2206 i -= ring->count;
2207
2208 do {
2209 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2210 if (!bi->xdp) {
2211 ok = false;
2212 break;
2213 }
2214
2215 dma = xsk_buff_xdp_get_dma(bi->xdp);
2216 desc->read.pkt_addr = cpu_to_le64(dma);
2217
2218 desc++;
2219 bi++;
2220 i++;
2221 if (unlikely(!i)) {
2222 desc = IGC_RX_DESC(ring, 0);
2223 bi = ring->rx_buffer_info;
2224 i -= ring->count;
2225 }
2226
2227 /* Clear the length for the next_to_use descriptor. */
2228 desc->wb.upper.length = 0;
2229
2230 count--;
2231 } while (count);
2232
2233 i += ring->count;
2234
2235 if (ring->next_to_use != i) {
2236 ring->next_to_use = i;
2237
2238 /* Force memory writes to complete before letting h/w
2239 * know there are new descriptors to fetch. (Only
2240 * applicable for weak-ordered memory model archs,
2241 * such as IA-64).
2242 */
2243 wmb();
2244 writel(i, ring->tail);
2245 }
2246
2247 return ok;
2248}
2249
2250/* This function requires __netif_tx_lock is held by the caller. */
2251static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2252 struct xdp_frame *xdpf)
2253{
2254 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2255 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2256 u16 count, index = ring->next_to_use;
2257 struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2258 struct igc_tx_buffer *buffer = head;
2259 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2260 u32 olinfo_status, len = xdpf->len, cmd_type;
2261 void *data = xdpf->data;
2262 u16 i;
2263
2264 count = TXD_USE_COUNT(len);
2265 for (i = 0; i < nr_frags; i++)
2266 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
2267
2268 if (igc_maybe_stop_tx(ring, count + 3)) {
2269 /* this is a hard error */
2270 return -EBUSY;
2271 }
2272
2273 i = 0;
2274 head->bytecount = xdp_get_frame_len(xdpf);
2275 head->type = IGC_TX_BUFFER_TYPE_XDP;
2276 head->gso_segs = 1;
2277 head->xdpf = xdpf;
2278
2279 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2280 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2281
2282 for (;;) {
2283 dma_addr_t dma;
2284
2285 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2286 if (dma_mapping_error(ring->dev, dma)) {
2287 netdev_err_once(ring->netdev,
2288 "Failed to map DMA for TX\n");
2289 goto unmap;
2290 }
2291
2292 dma_unmap_len_set(buffer, len, len);
2293 dma_unmap_addr_set(buffer, dma, dma);
2294
2295 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2296 IGC_ADVTXD_DCMD_IFCS | len;
2297
2298 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2299 desc->read.buffer_addr = cpu_to_le64(dma);
2300
2301 buffer->protocol = 0;
2302
2303 if (++index == ring->count)
2304 index = 0;
2305
2306 if (i == nr_frags)
2307 break;
2308
2309 buffer = &ring->tx_buffer_info[index];
2310 desc = IGC_TX_DESC(ring, index);
2311 desc->read.olinfo_status = 0;
2312
2313 data = skb_frag_address(&sinfo->frags[i]);
2314 len = skb_frag_size(&sinfo->frags[i]);
2315 i++;
2316 }
2317 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2318
2319 netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2320 /* set the timestamp */
2321 head->time_stamp = jiffies;
2322 /* set next_to_watch value indicating a packet is present */
2323 head->next_to_watch = desc;
2324 ring->next_to_use = index;
2325
2326 return 0;
2327
2328unmap:
2329 for (;;) {
2330 buffer = &ring->tx_buffer_info[index];
2331 if (dma_unmap_len(buffer, len))
2332 dma_unmap_page(ring->dev,
2333 dma_unmap_addr(buffer, dma),
2334 dma_unmap_len(buffer, len),
2335 DMA_TO_DEVICE);
2336 dma_unmap_len_set(buffer, len, 0);
2337 if (buffer == head)
2338 break;
2339
2340 if (!index)
2341 index += ring->count;
2342 index--;
2343 }
2344
2345 return -ENOMEM;
2346}
2347
2348static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2349 int cpu)
2350{
2351 int index = cpu;
2352
2353 if (unlikely(index < 0))
2354 index = 0;
2355
2356 while (index >= adapter->num_tx_queues)
2357 index -= adapter->num_tx_queues;
2358
2359 return adapter->tx_ring[index];
2360}
2361
2362static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2363{
2364 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2365 int cpu = smp_processor_id();
2366 struct netdev_queue *nq;
2367 struct igc_ring *ring;
2368 int res;
2369
2370 if (unlikely(!xdpf))
2371 return -EFAULT;
2372
2373 ring = igc_xdp_get_tx_ring(adapter, cpu);
2374 nq = txring_txq(ring);
2375
2376 __netif_tx_lock(nq, cpu);
2377 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2378 __netif_tx_unlock(nq);
2379 return res;
2380}
2381
2382/* This function assumes rcu_read_lock() is held by the caller. */
2383static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2384 struct bpf_prog *prog,
2385 struct xdp_buff *xdp)
2386{
2387 u32 act = bpf_prog_run_xdp(prog, xdp);
2388
2389 switch (act) {
2390 case XDP_PASS:
2391 return IGC_XDP_PASS;
2392 case XDP_TX:
2393 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2394 goto out_failure;
2395 return IGC_XDP_TX;
2396 case XDP_REDIRECT:
2397 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2398 goto out_failure;
2399 return IGC_XDP_REDIRECT;
2400 break;
2401 default:
2402 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
2403 fallthrough;
2404 case XDP_ABORTED:
2405out_failure:
2406 trace_xdp_exception(adapter->netdev, prog, act);
2407 fallthrough;
2408 case XDP_DROP:
2409 return IGC_XDP_CONSUMED;
2410 }
2411}
2412
2413static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2414 struct xdp_buff *xdp)
2415{
2416 struct bpf_prog *prog;
2417 int res;
2418
2419 prog = READ_ONCE(adapter->xdp_prog);
2420 if (!prog) {
2421 res = IGC_XDP_PASS;
2422 goto out;
2423 }
2424
2425 res = __igc_xdp_run_prog(adapter, prog, xdp);
2426
2427out:
2428 return ERR_PTR(-res);
2429}
2430
2431/* This function assumes __netif_tx_lock is held by the caller. */
2432static void igc_flush_tx_descriptors(struct igc_ring *ring)
2433{
2434 /* Once tail pointer is updated, hardware can fetch the descriptors
2435 * any time so we issue a write membar here to ensure all memory
2436 * writes are complete before the tail pointer is updated.
2437 */
2438 wmb();
2439 writel(ring->next_to_use, ring->tail);
2440}
2441
2442static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2443{
2444 int cpu = smp_processor_id();
2445 struct netdev_queue *nq;
2446 struct igc_ring *ring;
2447
2448 if (status & IGC_XDP_TX) {
2449 ring = igc_xdp_get_tx_ring(adapter, cpu);
2450 nq = txring_txq(ring);
2451
2452 __netif_tx_lock(nq, cpu);
2453 igc_flush_tx_descriptors(ring);
2454 __netif_tx_unlock(nq);
2455 }
2456
2457 if (status & IGC_XDP_REDIRECT)
2458 xdp_do_flush();
2459}
2460
2461static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2462 unsigned int packets, unsigned int bytes)
2463{
2464 struct igc_ring *ring = q_vector->rx.ring;
2465
2466 u64_stats_update_begin(&ring->rx_syncp);
2467 ring->rx_stats.packets += packets;
2468 ring->rx_stats.bytes += bytes;
2469 u64_stats_update_end(&ring->rx_syncp);
2470
2471 q_vector->rx.total_packets += packets;
2472 q_vector->rx.total_bytes += bytes;
2473}
2474
2475static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2476{
2477 unsigned int total_bytes = 0, total_packets = 0;
2478 struct igc_adapter *adapter = q_vector->adapter;
2479 struct igc_ring *rx_ring = q_vector->rx.ring;
2480 struct sk_buff *skb = rx_ring->skb;
2481 u16 cleaned_count = igc_desc_unused(rx_ring);
2482 int xdp_status = 0, rx_buffer_pgcnt;
2483
2484 while (likely(total_packets < budget)) {
2485 union igc_adv_rx_desc *rx_desc;
2486 struct igc_rx_buffer *rx_buffer;
2487 unsigned int size, truesize;
2488 ktime_t timestamp = 0;
2489 struct xdp_buff xdp;
2490 int pkt_offset = 0;
2491 void *pktbuf;
2492
2493 /* return some buffers to hardware, one at a time is too slow */
2494 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2495 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2496 cleaned_count = 0;
2497 }
2498
2499 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2500 size = le16_to_cpu(rx_desc->wb.upper.length);
2501 if (!size)
2502 break;
2503
2504 /* This memory barrier is needed to keep us from reading
2505 * any other fields out of the rx_desc until we know the
2506 * descriptor has been written back
2507 */
2508 dma_rmb();
2509
2510 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2511 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2512
2513 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2514
2515 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2516 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2517 pktbuf);
2518 pkt_offset = IGC_TS_HDR_LEN;
2519 size -= IGC_TS_HDR_LEN;
2520 }
2521
2522 if (!skb) {
2523 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
2524 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
2525 igc_rx_offset(rx_ring) + pkt_offset,
2526 size, true);
2527 xdp_buff_clear_frags_flag(&xdp);
2528
2529 skb = igc_xdp_run_prog(adapter, &xdp);
2530 }
2531
2532 if (IS_ERR(skb)) {
2533 unsigned int xdp_res = -PTR_ERR(skb);
2534
2535 switch (xdp_res) {
2536 case IGC_XDP_CONSUMED:
2537 rx_buffer->pagecnt_bias++;
2538 break;
2539 case IGC_XDP_TX:
2540 case IGC_XDP_REDIRECT:
2541 igc_rx_buffer_flip(rx_buffer, truesize);
2542 xdp_status |= xdp_res;
2543 break;
2544 }
2545
2546 total_packets++;
2547 total_bytes += size;
2548 } else if (skb)
2549 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2550 else if (ring_uses_build_skb(rx_ring))
2551 skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
2552 else
2553 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
2554 timestamp);
2555
2556 /* exit if we failed to retrieve a buffer */
2557 if (!skb) {
2558 rx_ring->rx_stats.alloc_failed++;
2559 rx_buffer->pagecnt_bias++;
2560 break;
2561 }
2562
2563 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2564 cleaned_count++;
2565
2566 /* fetch next buffer in frame if non-eop */
2567 if (igc_is_non_eop(rx_ring, rx_desc))
2568 continue;
2569
2570 /* verify the packet layout is correct */
2571 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2572 skb = NULL;
2573 continue;
2574 }
2575
2576 /* probably a little skewed due to removing CRC */
2577 total_bytes += skb->len;
2578
2579 /* populate checksum, VLAN, and protocol */
2580 igc_process_skb_fields(rx_ring, rx_desc, skb);
2581
2582 napi_gro_receive(&q_vector->napi, skb);
2583
2584 /* reset skb pointer */
2585 skb = NULL;
2586
2587 /* update budget accounting */
2588 total_packets++;
2589 }
2590
2591 if (xdp_status)
2592 igc_finalize_xdp(adapter, xdp_status);
2593
2594 /* place incomplete frames back on ring for completion */
2595 rx_ring->skb = skb;
2596
2597 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2598
2599 if (cleaned_count)
2600 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2601
2602 return total_packets;
2603}
2604
2605static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2606 struct xdp_buff *xdp)
2607{
2608 unsigned int totalsize = xdp->data_end - xdp->data_meta;
2609 unsigned int metasize = xdp->data - xdp->data_meta;
2610 struct sk_buff *skb;
2611
2612 net_prefetch(xdp->data_meta);
2613
2614 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
2615 GFP_ATOMIC | __GFP_NOWARN);
2616 if (unlikely(!skb))
2617 return NULL;
2618
2619 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
2620 ALIGN(totalsize, sizeof(long)));
2621
2622 if (metasize) {
2623 skb_metadata_set(skb, metasize);
2624 __skb_pull(skb, metasize);
2625 }
2626
2627 return skb;
2628}
2629
2630static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2631 union igc_adv_rx_desc *desc,
2632 struct xdp_buff *xdp,
2633 ktime_t timestamp)
2634{
2635 struct igc_ring *ring = q_vector->rx.ring;
2636 struct sk_buff *skb;
2637
2638 skb = igc_construct_skb_zc(ring, xdp);
2639 if (!skb) {
2640 ring->rx_stats.alloc_failed++;
2641 return;
2642 }
2643
2644 if (timestamp)
2645 skb_hwtstamps(skb)->hwtstamp = timestamp;
2646
2647 if (igc_cleanup_headers(ring, desc, skb))
2648 return;
2649
2650 igc_process_skb_fields(ring, desc, skb);
2651 napi_gro_receive(&q_vector->napi, skb);
2652}
2653
2654static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2655{
2656 struct igc_adapter *adapter = q_vector->adapter;
2657 struct igc_ring *ring = q_vector->rx.ring;
2658 u16 cleaned_count = igc_desc_unused(ring);
2659 int total_bytes = 0, total_packets = 0;
2660 u16 ntc = ring->next_to_clean;
2661 struct bpf_prog *prog;
2662 bool failure = false;
2663 int xdp_status = 0;
2664
2665 rcu_read_lock();
2666
2667 prog = READ_ONCE(adapter->xdp_prog);
2668
2669 while (likely(total_packets < budget)) {
2670 union igc_adv_rx_desc *desc;
2671 struct igc_rx_buffer *bi;
2672 ktime_t timestamp = 0;
2673 unsigned int size;
2674 int res;
2675
2676 desc = IGC_RX_DESC(ring, ntc);
2677 size = le16_to_cpu(desc->wb.upper.length);
2678 if (!size)
2679 break;
2680
2681 /* This memory barrier is needed to keep us from reading
2682 * any other fields out of the rx_desc until we know the
2683 * descriptor has been written back
2684 */
2685 dma_rmb();
2686
2687 bi = &ring->rx_buffer_info[ntc];
2688
2689 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2690 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2691 bi->xdp->data);
2692
2693 bi->xdp->data += IGC_TS_HDR_LEN;
2694
2695 /* HW timestamp has been copied into local variable. Metadata
2696 * length when XDP program is called should be 0.
2697 */
2698 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2699 size -= IGC_TS_HDR_LEN;
2700 }
2701
2702 bi->xdp->data_end = bi->xdp->data + size;
2703 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2704
2705 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2706 switch (res) {
2707 case IGC_XDP_PASS:
2708 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2709 fallthrough;
2710 case IGC_XDP_CONSUMED:
2711 xsk_buff_free(bi->xdp);
2712 break;
2713 case IGC_XDP_TX:
2714 case IGC_XDP_REDIRECT:
2715 xdp_status |= res;
2716 break;
2717 }
2718
2719 bi->xdp = NULL;
2720 total_bytes += size;
2721 total_packets++;
2722 cleaned_count++;
2723 ntc++;
2724 if (ntc == ring->count)
2725 ntc = 0;
2726 }
2727
2728 ring->next_to_clean = ntc;
2729 rcu_read_unlock();
2730
2731 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2732 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2733
2734 if (xdp_status)
2735 igc_finalize_xdp(adapter, xdp_status);
2736
2737 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2738
2739 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2740 if (failure || ring->next_to_clean == ring->next_to_use)
2741 xsk_set_rx_need_wakeup(ring->xsk_pool);
2742 else
2743 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2744 return total_packets;
2745 }
2746
2747 return failure ? budget : total_packets;
2748}
2749
2750static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2751 unsigned int packets, unsigned int bytes)
2752{
2753 struct igc_ring *ring = q_vector->tx.ring;
2754
2755 u64_stats_update_begin(&ring->tx_syncp);
2756 ring->tx_stats.bytes += bytes;
2757 ring->tx_stats.packets += packets;
2758 u64_stats_update_end(&ring->tx_syncp);
2759
2760 q_vector->tx.total_bytes += bytes;
2761 q_vector->tx.total_packets += packets;
2762}
2763
2764static void igc_xdp_xmit_zc(struct igc_ring *ring)
2765{
2766 struct xsk_buff_pool *pool = ring->xsk_pool;
2767 struct netdev_queue *nq = txring_txq(ring);
2768 union igc_adv_tx_desc *tx_desc = NULL;
2769 int cpu = smp_processor_id();
2770 u16 ntu = ring->next_to_use;
2771 struct xdp_desc xdp_desc;
2772 u16 budget;
2773
2774 if (!netif_carrier_ok(ring->netdev))
2775 return;
2776
2777 __netif_tx_lock(nq, cpu);
2778
2779 budget = igc_desc_unused(ring);
2780
2781 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2782 u32 cmd_type, olinfo_status;
2783 struct igc_tx_buffer *bi;
2784 dma_addr_t dma;
2785
2786 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2787 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2788 xdp_desc.len;
2789 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2790
2791 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2792 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2793
2794 tx_desc = IGC_TX_DESC(ring, ntu);
2795 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2796 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2797 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2798
2799 bi = &ring->tx_buffer_info[ntu];
2800 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2801 bi->protocol = 0;
2802 bi->bytecount = xdp_desc.len;
2803 bi->gso_segs = 1;
2804 bi->time_stamp = jiffies;
2805 bi->next_to_watch = tx_desc;
2806
2807 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2808
2809 ntu++;
2810 if (ntu == ring->count)
2811 ntu = 0;
2812 }
2813
2814 ring->next_to_use = ntu;
2815 if (tx_desc) {
2816 igc_flush_tx_descriptors(ring);
2817 xsk_tx_release(pool);
2818 }
2819
2820 __netif_tx_unlock(nq);
2821}
2822
2823/**
2824 * igc_clean_tx_irq - Reclaim resources after transmit completes
2825 * @q_vector: pointer to q_vector containing needed info
2826 * @napi_budget: Used to determine if we are in netpoll
2827 *
2828 * returns true if ring is completely cleaned
2829 */
2830static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2831{
2832 struct igc_adapter *adapter = q_vector->adapter;
2833 unsigned int total_bytes = 0, total_packets = 0;
2834 unsigned int budget = q_vector->tx.work_limit;
2835 struct igc_ring *tx_ring = q_vector->tx.ring;
2836 unsigned int i = tx_ring->next_to_clean;
2837 struct igc_tx_buffer *tx_buffer;
2838 union igc_adv_tx_desc *tx_desc;
2839 u32 xsk_frames = 0;
2840
2841 if (test_bit(__IGC_DOWN, &adapter->state))
2842 return true;
2843
2844 tx_buffer = &tx_ring->tx_buffer_info[i];
2845 tx_desc = IGC_TX_DESC(tx_ring, i);
2846 i -= tx_ring->count;
2847
2848 do {
2849 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2850
2851 /* if next_to_watch is not set then there is no work pending */
2852 if (!eop_desc)
2853 break;
2854
2855 /* prevent any other reads prior to eop_desc */
2856 smp_rmb();
2857
2858 /* if DD is not set pending work has not been completed */
2859 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2860 break;
2861
2862 /* clear next_to_watch to prevent false hangs */
2863 tx_buffer->next_to_watch = NULL;
2864
2865 /* update the statistics for this packet */
2866 total_bytes += tx_buffer->bytecount;
2867 total_packets += tx_buffer->gso_segs;
2868
2869 switch (tx_buffer->type) {
2870 case IGC_TX_BUFFER_TYPE_XSK:
2871 xsk_frames++;
2872 break;
2873 case IGC_TX_BUFFER_TYPE_XDP:
2874 xdp_return_frame(tx_buffer->xdpf);
2875 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2876 break;
2877 case IGC_TX_BUFFER_TYPE_SKB:
2878 napi_consume_skb(tx_buffer->skb, napi_budget);
2879 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2880 break;
2881 default:
2882 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2883 break;
2884 }
2885
2886 /* clear last DMA location and unmap remaining buffers */
2887 while (tx_desc != eop_desc) {
2888 tx_buffer++;
2889 tx_desc++;
2890 i++;
2891 if (unlikely(!i)) {
2892 i -= tx_ring->count;
2893 tx_buffer = tx_ring->tx_buffer_info;
2894 tx_desc = IGC_TX_DESC(tx_ring, 0);
2895 }
2896
2897 /* unmap any remaining paged data */
2898 if (dma_unmap_len(tx_buffer, len))
2899 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2900 }
2901
2902 /* move us one more past the eop_desc for start of next pkt */
2903 tx_buffer++;
2904 tx_desc++;
2905 i++;
2906 if (unlikely(!i)) {
2907 i -= tx_ring->count;
2908 tx_buffer = tx_ring->tx_buffer_info;
2909 tx_desc = IGC_TX_DESC(tx_ring, 0);
2910 }
2911
2912 /* issue prefetch for next Tx descriptor */
2913 prefetch(tx_desc);
2914
2915 /* update budget accounting */
2916 budget--;
2917 } while (likely(budget));
2918
2919 netdev_tx_completed_queue(txring_txq(tx_ring),
2920 total_packets, total_bytes);
2921
2922 i += tx_ring->count;
2923 tx_ring->next_to_clean = i;
2924
2925 igc_update_tx_stats(q_vector, total_packets, total_bytes);
2926
2927 if (tx_ring->xsk_pool) {
2928 if (xsk_frames)
2929 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2930 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2931 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2932 igc_xdp_xmit_zc(tx_ring);
2933 }
2934
2935 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2936 struct igc_hw *hw = &adapter->hw;
2937
2938 /* Detect a transmit hang in hardware, this serializes the
2939 * check with the clearing of time_stamp and movement of i
2940 */
2941 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2942 if (tx_buffer->next_to_watch &&
2943 time_after(jiffies, tx_buffer->time_stamp +
2944 (adapter->tx_timeout_factor * HZ)) &&
2945 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
2946 (rd32(IGC_TDH(tx_ring->reg_idx)) !=
2947 readl(tx_ring->tail))) {
2948 /* detected Tx unit hang */
2949 netdev_err(tx_ring->netdev,
2950 "Detected Tx Unit Hang\n"
2951 " Tx Queue <%d>\n"
2952 " TDH <%x>\n"
2953 " TDT <%x>\n"
2954 " next_to_use <%x>\n"
2955 " next_to_clean <%x>\n"
2956 "buffer_info[next_to_clean]\n"
2957 " time_stamp <%lx>\n"
2958 " next_to_watch <%p>\n"
2959 " jiffies <%lx>\n"
2960 " desc.status <%x>\n",
2961 tx_ring->queue_index,
2962 rd32(IGC_TDH(tx_ring->reg_idx)),
2963 readl(tx_ring->tail),
2964 tx_ring->next_to_use,
2965 tx_ring->next_to_clean,
2966 tx_buffer->time_stamp,
2967 tx_buffer->next_to_watch,
2968 jiffies,
2969 tx_buffer->next_to_watch->wb.status);
2970 netif_stop_subqueue(tx_ring->netdev,
2971 tx_ring->queue_index);
2972
2973 /* we are about to reset, no point in enabling stuff */
2974 return true;
2975 }
2976 }
2977
2978#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2979 if (unlikely(total_packets &&
2980 netif_carrier_ok(tx_ring->netdev) &&
2981 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2982 /* Make sure that anybody stopping the queue after this
2983 * sees the new next_to_clean.
2984 */
2985 smp_mb();
2986 if (__netif_subqueue_stopped(tx_ring->netdev,
2987 tx_ring->queue_index) &&
2988 !(test_bit(__IGC_DOWN, &adapter->state))) {
2989 netif_wake_subqueue(tx_ring->netdev,
2990 tx_ring->queue_index);
2991
2992 u64_stats_update_begin(&tx_ring->tx_syncp);
2993 tx_ring->tx_stats.restart_queue++;
2994 u64_stats_update_end(&tx_ring->tx_syncp);
2995 }
2996 }
2997
2998 return !!budget;
2999}
3000
3001static int igc_find_mac_filter(struct igc_adapter *adapter,
3002 enum igc_mac_filter_type type, const u8 *addr)
3003{
3004 struct igc_hw *hw = &adapter->hw;
3005 int max_entries = hw->mac.rar_entry_count;
3006 u32 ral, rah;
3007 int i;
3008
3009 for (i = 0; i < max_entries; i++) {
3010 ral = rd32(IGC_RAL(i));
3011 rah = rd32(IGC_RAH(i));
3012
3013 if (!(rah & IGC_RAH_AV))
3014 continue;
3015 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
3016 continue;
3017 if ((rah & IGC_RAH_RAH_MASK) !=
3018 le16_to_cpup((__le16 *)(addr + 4)))
3019 continue;
3020 if (ral != le32_to_cpup((__le32 *)(addr)))
3021 continue;
3022
3023 return i;
3024 }
3025
3026 return -1;
3027}
3028
3029static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
3030{
3031 struct igc_hw *hw = &adapter->hw;
3032 int max_entries = hw->mac.rar_entry_count;
3033 u32 rah;
3034 int i;
3035
3036 for (i = 0; i < max_entries; i++) {
3037 rah = rd32(IGC_RAH(i));
3038
3039 if (!(rah & IGC_RAH_AV))
3040 return i;
3041 }
3042
3043 return -1;
3044}
3045
3046/**
3047 * igc_add_mac_filter() - Add MAC address filter
3048 * @adapter: Pointer to adapter where the filter should be added
3049 * @type: MAC address filter type (source or destination)
3050 * @addr: MAC address
3051 * @queue: If non-negative, queue assignment feature is enabled and frames
3052 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3053 * assignment is disabled.
3054 *
3055 * Return: 0 in case of success, negative errno code otherwise.
3056 */
3057static int igc_add_mac_filter(struct igc_adapter *adapter,
3058 enum igc_mac_filter_type type, const u8 *addr,
3059 int queue)
3060{
3061 struct net_device *dev = adapter->netdev;
3062 int index;
3063
3064 index = igc_find_mac_filter(adapter, type, addr);
3065 if (index >= 0)
3066 goto update_filter;
3067
3068 index = igc_get_avail_mac_filter_slot(adapter);
3069 if (index < 0)
3070 return -ENOSPC;
3071
3072 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
3073 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3074 addr, queue);
3075
3076update_filter:
3077 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
3078 return 0;
3079}
3080
3081/**
3082 * igc_del_mac_filter() - Delete MAC address filter
3083 * @adapter: Pointer to adapter where the filter should be deleted from
3084 * @type: MAC address filter type (source or destination)
3085 * @addr: MAC address
3086 */
3087static void igc_del_mac_filter(struct igc_adapter *adapter,
3088 enum igc_mac_filter_type type, const u8 *addr)
3089{
3090 struct net_device *dev = adapter->netdev;
3091 int index;
3092
3093 index = igc_find_mac_filter(adapter, type, addr);
3094 if (index < 0)
3095 return;
3096
3097 if (index == 0) {
3098 /* If this is the default filter, we don't actually delete it.
3099 * We just reset to its default value i.e. disable queue
3100 * assignment.
3101 */
3102 netdev_dbg(dev, "Disable default MAC filter queue assignment");
3103
3104 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
3105 } else {
3106 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
3107 index,
3108 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3109 addr);
3110
3111 igc_clear_mac_filter_hw(adapter, index);
3112 }
3113}
3114
3115/**
3116 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3117 * @adapter: Pointer to adapter where the filter should be added
3118 * @prio: VLAN priority value
3119 * @queue: Queue number which matching frames are assigned to
3120 *
3121 * Return: 0 in case of success, negative errno code otherwise.
3122 */
3123static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
3124 int queue)
3125{
3126 struct net_device *dev = adapter->netdev;
3127 struct igc_hw *hw = &adapter->hw;
3128 u32 vlanpqf;
3129
3130 vlanpqf = rd32(IGC_VLANPQF);
3131
3132 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
3133 netdev_dbg(dev, "VLAN priority filter already in use\n");
3134 return -EEXIST;
3135 }
3136
3137 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
3138 vlanpqf |= IGC_VLANPQF_VALID(prio);
3139
3140 wr32(IGC_VLANPQF, vlanpqf);
3141
3142 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
3143 prio, queue);
3144 return 0;
3145}
3146
3147/**
3148 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3149 * @adapter: Pointer to adapter where the filter should be deleted from
3150 * @prio: VLAN priority value
3151 */
3152static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
3153{
3154 struct igc_hw *hw = &adapter->hw;
3155 u32 vlanpqf;
3156
3157 vlanpqf = rd32(IGC_VLANPQF);
3158
3159 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
3160 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
3161
3162 wr32(IGC_VLANPQF, vlanpqf);
3163
3164 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
3165 prio);
3166}
3167
3168static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
3169{
3170 struct igc_hw *hw = &adapter->hw;
3171 int i;
3172
3173 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3174 u32 etqf = rd32(IGC_ETQF(i));
3175
3176 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3177 return i;
3178 }
3179
3180 return -1;
3181}
3182
3183/**
3184 * igc_add_etype_filter() - Add ethertype filter
3185 * @adapter: Pointer to adapter where the filter should be added
3186 * @etype: Ethertype value
3187 * @queue: If non-negative, queue assignment feature is enabled and frames
3188 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3189 * assignment is disabled.
3190 *
3191 * Return: 0 in case of success, negative errno code otherwise.
3192 */
3193static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3194 int queue)
3195{
3196 struct igc_hw *hw = &adapter->hw;
3197 int index;
3198 u32 etqf;
3199
3200 index = igc_get_avail_etype_filter_slot(adapter);
3201 if (index < 0)
3202 return -ENOSPC;
3203
3204 etqf = rd32(IGC_ETQF(index));
3205
3206 etqf &= ~IGC_ETQF_ETYPE_MASK;
3207 etqf |= etype;
3208
3209 if (queue >= 0) {
3210 etqf &= ~IGC_ETQF_QUEUE_MASK;
3211 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3212 etqf |= IGC_ETQF_QUEUE_ENABLE;
3213 }
3214
3215 etqf |= IGC_ETQF_FILTER_ENABLE;
3216
3217 wr32(IGC_ETQF(index), etqf);
3218
3219 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3220 etype, queue);
3221 return 0;
3222}
3223
3224static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3225{
3226 struct igc_hw *hw = &adapter->hw;
3227 int i;
3228
3229 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3230 u32 etqf = rd32(IGC_ETQF(i));
3231
3232 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3233 return i;
3234 }
3235
3236 return -1;
3237}
3238
3239/**
3240 * igc_del_etype_filter() - Delete ethertype filter
3241 * @adapter: Pointer to adapter where the filter should be deleted from
3242 * @etype: Ethertype value
3243 */
3244static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3245{
3246 struct igc_hw *hw = &adapter->hw;
3247 int index;
3248
3249 index = igc_find_etype_filter(adapter, etype);
3250 if (index < 0)
3251 return;
3252
3253 wr32(IGC_ETQF(index), 0);
3254
3255 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3256 etype);
3257}
3258
3259static int igc_flex_filter_select(struct igc_adapter *adapter,
3260 struct igc_flex_filter *input,
3261 u32 *fhft)
3262{
3263 struct igc_hw *hw = &adapter->hw;
3264 u8 fhft_index;
3265 u32 fhftsl;
3266
3267 if (input->index >= MAX_FLEX_FILTER) {
3268 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n");
3269 return -EINVAL;
3270 }
3271
3272 /* Indirect table select register */
3273 fhftsl = rd32(IGC_FHFTSL);
3274 fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3275 switch (input->index) {
3276 case 0 ... 7:
3277 fhftsl |= 0x00;
3278 break;
3279 case 8 ... 15:
3280 fhftsl |= 0x01;
3281 break;
3282 case 16 ... 23:
3283 fhftsl |= 0x02;
3284 break;
3285 case 24 ... 31:
3286 fhftsl |= 0x03;
3287 break;
3288 }
3289 wr32(IGC_FHFTSL, fhftsl);
3290
3291 /* Normalize index down to host table register */
3292 fhft_index = input->index % 8;
3293
3294 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3295 IGC_FHFT_EXT(fhft_index - 4);
3296
3297 return 0;
3298}
3299
3300static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3301 struct igc_flex_filter *input)
3302{
3303 struct device *dev = &adapter->pdev->dev;
3304 struct igc_hw *hw = &adapter->hw;
3305 u8 *data = input->data;
3306 u8 *mask = input->mask;
3307 u32 queuing;
3308 u32 fhft;
3309 u32 wufc;
3310 int ret;
3311 int i;
3312
3313 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3314 * out early to avoid surprises later.
3315 */
3316 if (input->length % 8 != 0) {
3317 dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n");
3318 return -EINVAL;
3319 }
3320
3321 /* Select corresponding flex filter register and get base for host table. */
3322 ret = igc_flex_filter_select(adapter, input, &fhft);
3323 if (ret)
3324 return ret;
3325
3326 /* When adding a filter globally disable flex filter feature. That is
3327 * recommended within the datasheet.
3328 */
3329 wufc = rd32(IGC_WUFC);
3330 wufc &= ~IGC_WUFC_FLEX_HQ;
3331 wr32(IGC_WUFC, wufc);
3332
3333 /* Configure filter */
3334 queuing = input->length & IGC_FHFT_LENGTH_MASK;
3335 queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK;
3336 queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK;
3337
3338 if (input->immediate_irq)
3339 queuing |= IGC_FHFT_IMM_INT;
3340
3341 if (input->drop)
3342 queuing |= IGC_FHFT_DROP;
3343
3344 wr32(fhft + 0xFC, queuing);
3345
3346 /* Write data (128 byte) and mask (128 bit) */
3347 for (i = 0; i < 16; ++i) {
3348 const size_t data_idx = i * 8;
3349 const size_t row_idx = i * 16;
3350 u32 dw0 =
3351 (data[data_idx + 0] << 0) |
3352 (data[data_idx + 1] << 8) |
3353 (data[data_idx + 2] << 16) |
3354 (data[data_idx + 3] << 24);
3355 u32 dw1 =
3356 (data[data_idx + 4] << 0) |
3357 (data[data_idx + 5] << 8) |
3358 (data[data_idx + 6] << 16) |
3359 (data[data_idx + 7] << 24);
3360 u32 tmp;
3361
3362 /* Write row: dw0, dw1 and mask */
3363 wr32(fhft + row_idx, dw0);
3364 wr32(fhft + row_idx + 4, dw1);
3365
3366 /* mask is only valid for MASK(7, 0) */
3367 tmp = rd32(fhft + row_idx + 8);
3368 tmp &= ~GENMASK(7, 0);
3369 tmp |= mask[i];
3370 wr32(fhft + row_idx + 8, tmp);
3371 }
3372
3373 /* Enable filter. */
3374 wufc |= IGC_WUFC_FLEX_HQ;
3375 if (input->index > 8) {
3376 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3377 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3378
3379 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3380
3381 wr32(IGC_WUFC_EXT, wufc_ext);
3382 } else {
3383 wufc |= (IGC_WUFC_FLX0 << input->index);
3384 }
3385 wr32(IGC_WUFC, wufc);
3386
3387 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n",
3388 input->index);
3389
3390 return 0;
3391}
3392
3393static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3394 const void *src, unsigned int offset,
3395 size_t len, const void *mask)
3396{
3397 int i;
3398
3399 /* data */
3400 memcpy(&flex->data[offset], src, len);
3401
3402 /* mask */
3403 for (i = 0; i < len; ++i) {
3404 const unsigned int idx = i + offset;
3405 const u8 *ptr = mask;
3406
3407 if (mask) {
3408 if (ptr[i] & 0xff)
3409 flex->mask[idx / 8] |= BIT(idx % 8);
3410
3411 continue;
3412 }
3413
3414 flex->mask[idx / 8] |= BIT(idx % 8);
3415 }
3416}
3417
3418static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3419{
3420 struct igc_hw *hw = &adapter->hw;
3421 u32 wufc, wufc_ext;
3422 int i;
3423
3424 wufc = rd32(IGC_WUFC);
3425 wufc_ext = rd32(IGC_WUFC_EXT);
3426
3427 for (i = 0; i < MAX_FLEX_FILTER; i++) {
3428 if (i < 8) {
3429 if (!(wufc & (IGC_WUFC_FLX0 << i)))
3430 return i;
3431 } else {
3432 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3433 return i;
3434 }
3435 }
3436
3437 return -ENOSPC;
3438}
3439
3440static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3441{
3442 struct igc_hw *hw = &adapter->hw;
3443 u32 wufc, wufc_ext;
3444
3445 wufc = rd32(IGC_WUFC);
3446 wufc_ext = rd32(IGC_WUFC_EXT);
3447
3448 if (wufc & IGC_WUFC_FILTER_MASK)
3449 return true;
3450
3451 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3452 return true;
3453
3454 return false;
3455}
3456
3457static int igc_add_flex_filter(struct igc_adapter *adapter,
3458 struct igc_nfc_rule *rule)
3459{
3460 struct igc_flex_filter flex = { };
3461 struct igc_nfc_filter *filter = &rule->filter;
3462 unsigned int eth_offset, user_offset;
3463 int ret, index;
3464 bool vlan;
3465
3466 index = igc_find_avail_flex_filter_slot(adapter);
3467 if (index < 0)
3468 return -ENOSPC;
3469
3470 /* Construct the flex filter:
3471 * -> dest_mac [6]
3472 * -> src_mac [6]
3473 * -> tpid [2]
3474 * -> vlan tci [2]
3475 * -> ether type [2]
3476 * -> user data [8]
3477 * -> = 26 bytes => 32 length
3478 */
3479 flex.index = index;
3480 flex.length = 32;
3481 flex.rx_queue = rule->action;
3482
3483 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3484 eth_offset = vlan ? 16 : 12;
3485 user_offset = vlan ? 18 : 14;
3486
3487 /* Add destination MAC */
3488 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3489 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3490 ETH_ALEN, NULL);
3491
3492 /* Add source MAC */
3493 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3494 igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3495 ETH_ALEN, NULL);
3496
3497 /* Add VLAN etype */
3498 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE)
3499 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12,
3500 sizeof(filter->vlan_etype),
3501 NULL);
3502
3503 /* Add VLAN TCI */
3504 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3505 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3506 sizeof(filter->vlan_tci), NULL);
3507
3508 /* Add Ether type */
3509 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3510 __be16 etype = cpu_to_be16(filter->etype);
3511
3512 igc_flex_filter_add_field(&flex, &etype, eth_offset,
3513 sizeof(etype), NULL);
3514 }
3515
3516 /* Add user data */
3517 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3518 igc_flex_filter_add_field(&flex, &filter->user_data,
3519 user_offset,
3520 sizeof(filter->user_data),
3521 filter->user_mask);
3522
3523 /* Add it down to the hardware and enable it. */
3524 ret = igc_write_flex_filter_ll(adapter, &flex);
3525 if (ret)
3526 return ret;
3527
3528 filter->flex_index = index;
3529
3530 return 0;
3531}
3532
3533static void igc_del_flex_filter(struct igc_adapter *adapter,
3534 u16 reg_index)
3535{
3536 struct igc_hw *hw = &adapter->hw;
3537 u32 wufc;
3538
3539 /* Just disable the filter. The filter table itself is kept
3540 * intact. Another flex_filter_add() should override the "old" data
3541 * then.
3542 */
3543 if (reg_index > 8) {
3544 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3545
3546 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3547 wr32(IGC_WUFC_EXT, wufc_ext);
3548 } else {
3549 wufc = rd32(IGC_WUFC);
3550
3551 wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3552 wr32(IGC_WUFC, wufc);
3553 }
3554
3555 if (igc_flex_filter_in_use(adapter))
3556 return;
3557
3558 /* No filters are in use, we may disable flex filters */
3559 wufc = rd32(IGC_WUFC);
3560 wufc &= ~IGC_WUFC_FLEX_HQ;
3561 wr32(IGC_WUFC, wufc);
3562}
3563
3564static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3565 struct igc_nfc_rule *rule)
3566{
3567 int err;
3568
3569 if (rule->flex) {
3570 return igc_add_flex_filter(adapter, rule);
3571 }
3572
3573 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3574 err = igc_add_etype_filter(adapter, rule->filter.etype,
3575 rule->action);
3576 if (err)
3577 return err;
3578 }
3579
3580 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3581 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3582 rule->filter.src_addr, rule->action);
3583 if (err)
3584 return err;
3585 }
3586
3587 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3588 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3589 rule->filter.dst_addr, rule->action);
3590 if (err)
3591 return err;
3592 }
3593
3594 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3595 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3596 VLAN_PRIO_SHIFT;
3597
3598 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3599 if (err)
3600 return err;
3601 }
3602
3603 return 0;
3604}
3605
3606static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3607 const struct igc_nfc_rule *rule)
3608{
3609 if (rule->flex) {
3610 igc_del_flex_filter(adapter, rule->filter.flex_index);
3611 return;
3612 }
3613
3614 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3615 igc_del_etype_filter(adapter, rule->filter.etype);
3616
3617 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3618 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3619 VLAN_PRIO_SHIFT;
3620
3621 igc_del_vlan_prio_filter(adapter, prio);
3622 }
3623
3624 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3625 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3626 rule->filter.src_addr);
3627
3628 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3629 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3630 rule->filter.dst_addr);
3631}
3632
3633/**
3634 * igc_get_nfc_rule() - Get NFC rule
3635 * @adapter: Pointer to adapter
3636 * @location: Rule location
3637 *
3638 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3639 *
3640 * Return: Pointer to NFC rule at @location. If not found, NULL.
3641 */
3642struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3643 u32 location)
3644{
3645 struct igc_nfc_rule *rule;
3646
3647 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3648 if (rule->location == location)
3649 return rule;
3650 if (rule->location > location)
3651 break;
3652 }
3653
3654 return NULL;
3655}
3656
3657/**
3658 * igc_del_nfc_rule() - Delete NFC rule
3659 * @adapter: Pointer to adapter
3660 * @rule: Pointer to rule to be deleted
3661 *
3662 * Disable NFC rule in hardware and delete it from adapter.
3663 *
3664 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3665 */
3666void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3667{
3668 igc_disable_nfc_rule(adapter, rule);
3669
3670 list_del(&rule->list);
3671 adapter->nfc_rule_count--;
3672
3673 kfree(rule);
3674}
3675
3676static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3677{
3678 struct igc_nfc_rule *rule, *tmp;
3679
3680 mutex_lock(&adapter->nfc_rule_lock);
3681
3682 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3683 igc_del_nfc_rule(adapter, rule);
3684
3685 mutex_unlock(&adapter->nfc_rule_lock);
3686}
3687
3688/**
3689 * igc_add_nfc_rule() - Add NFC rule
3690 * @adapter: Pointer to adapter
3691 * @rule: Pointer to rule to be added
3692 *
3693 * Enable NFC rule in hardware and add it to adapter.
3694 *
3695 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3696 *
3697 * Return: 0 on success, negative errno on failure.
3698 */
3699int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3700{
3701 struct igc_nfc_rule *pred, *cur;
3702 int err;
3703
3704 err = igc_enable_nfc_rule(adapter, rule);
3705 if (err)
3706 return err;
3707
3708 pred = NULL;
3709 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3710 if (cur->location >= rule->location)
3711 break;
3712 pred = cur;
3713 }
3714
3715 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3716 adapter->nfc_rule_count++;
3717 return 0;
3718}
3719
3720static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3721{
3722 struct igc_nfc_rule *rule;
3723
3724 mutex_lock(&adapter->nfc_rule_lock);
3725
3726 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3727 igc_enable_nfc_rule(adapter, rule);
3728
3729 mutex_unlock(&adapter->nfc_rule_lock);
3730}
3731
3732static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3733{
3734 struct igc_adapter *adapter = netdev_priv(netdev);
3735
3736 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3737}
3738
3739static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3740{
3741 struct igc_adapter *adapter = netdev_priv(netdev);
3742
3743 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3744 return 0;
3745}
3746
3747/**
3748 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3749 * @netdev: network interface device structure
3750 *
3751 * The set_rx_mode entry point is called whenever the unicast or multicast
3752 * address lists or the network interface flags are updated. This routine is
3753 * responsible for configuring the hardware for proper unicast, multicast,
3754 * promiscuous mode, and all-multi behavior.
3755 */
3756static void igc_set_rx_mode(struct net_device *netdev)
3757{
3758 struct igc_adapter *adapter = netdev_priv(netdev);
3759 struct igc_hw *hw = &adapter->hw;
3760 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3761 int count;
3762
3763 /* Check for Promiscuous and All Multicast modes */
3764 if (netdev->flags & IFF_PROMISC) {
3765 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3766 } else {
3767 if (netdev->flags & IFF_ALLMULTI) {
3768 rctl |= IGC_RCTL_MPE;
3769 } else {
3770 /* Write addresses to the MTA, if the attempt fails
3771 * then we should just turn on promiscuous mode so
3772 * that we can at least receive multicast traffic
3773 */
3774 count = igc_write_mc_addr_list(netdev);
3775 if (count < 0)
3776 rctl |= IGC_RCTL_MPE;
3777 }
3778 }
3779
3780 /* Write addresses to available RAR registers, if there is not
3781 * sufficient space to store all the addresses then enable
3782 * unicast promiscuous mode
3783 */
3784 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3785 rctl |= IGC_RCTL_UPE;
3786
3787 /* update state of unicast and multicast */
3788 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3789 wr32(IGC_RCTL, rctl);
3790
3791#if (PAGE_SIZE < 8192)
3792 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3793 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3794#endif
3795 wr32(IGC_RLPML, rlpml);
3796}
3797
3798/**
3799 * igc_configure - configure the hardware for RX and TX
3800 * @adapter: private board structure
3801 */
3802static void igc_configure(struct igc_adapter *adapter)
3803{
3804 struct net_device *netdev = adapter->netdev;
3805 int i = 0;
3806
3807 igc_get_hw_control(adapter);
3808 igc_set_rx_mode(netdev);
3809
3810 igc_restore_vlan(adapter);
3811
3812 igc_setup_tctl(adapter);
3813 igc_setup_mrqc(adapter);
3814 igc_setup_rctl(adapter);
3815
3816 igc_set_default_mac_filter(adapter);
3817 igc_restore_nfc_rules(adapter);
3818
3819 igc_configure_tx(adapter);
3820 igc_configure_rx(adapter);
3821
3822 igc_rx_fifo_flush_base(&adapter->hw);
3823
3824 /* call igc_desc_unused which always leaves
3825 * at least 1 descriptor unused to make sure
3826 * next_to_use != next_to_clean
3827 */
3828 for (i = 0; i < adapter->num_rx_queues; i++) {
3829 struct igc_ring *ring = adapter->rx_ring[i];
3830
3831 if (ring->xsk_pool)
3832 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3833 else
3834 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3835 }
3836}
3837
3838/**
3839 * igc_write_ivar - configure ivar for given MSI-X vector
3840 * @hw: pointer to the HW structure
3841 * @msix_vector: vector number we are allocating to a given ring
3842 * @index: row index of IVAR register to write within IVAR table
3843 * @offset: column offset of in IVAR, should be multiple of 8
3844 *
3845 * The IVAR table consists of 2 columns,
3846 * each containing an cause allocation for an Rx and Tx ring, and a
3847 * variable number of rows depending on the number of queues supported.
3848 */
3849static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3850 int index, int offset)
3851{
3852 u32 ivar = array_rd32(IGC_IVAR0, index);
3853
3854 /* clear any bits that are currently set */
3855 ivar &= ~((u32)0xFF << offset);
3856
3857 /* write vector and valid bit */
3858 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3859
3860 array_wr32(IGC_IVAR0, index, ivar);
3861}
3862
3863static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3864{
3865 struct igc_adapter *adapter = q_vector->adapter;
3866 struct igc_hw *hw = &adapter->hw;
3867 int rx_queue = IGC_N0_QUEUE;
3868 int tx_queue = IGC_N0_QUEUE;
3869
3870 if (q_vector->rx.ring)
3871 rx_queue = q_vector->rx.ring->reg_idx;
3872 if (q_vector->tx.ring)
3873 tx_queue = q_vector->tx.ring->reg_idx;
3874
3875 switch (hw->mac.type) {
3876 case igc_i225:
3877 if (rx_queue > IGC_N0_QUEUE)
3878 igc_write_ivar(hw, msix_vector,
3879 rx_queue >> 1,
3880 (rx_queue & 0x1) << 4);
3881 if (tx_queue > IGC_N0_QUEUE)
3882 igc_write_ivar(hw, msix_vector,
3883 tx_queue >> 1,
3884 ((tx_queue & 0x1) << 4) + 8);
3885 q_vector->eims_value = BIT(msix_vector);
3886 break;
3887 default:
3888 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3889 break;
3890 }
3891
3892 /* add q_vector eims value to global eims_enable_mask */
3893 adapter->eims_enable_mask |= q_vector->eims_value;
3894
3895 /* configure q_vector to set itr on first interrupt */
3896 q_vector->set_itr = 1;
3897}
3898
3899/**
3900 * igc_configure_msix - Configure MSI-X hardware
3901 * @adapter: Pointer to adapter structure
3902 *
3903 * igc_configure_msix sets up the hardware to properly
3904 * generate MSI-X interrupts.
3905 */
3906static void igc_configure_msix(struct igc_adapter *adapter)
3907{
3908 struct igc_hw *hw = &adapter->hw;
3909 int i, vector = 0;
3910 u32 tmp;
3911
3912 adapter->eims_enable_mask = 0;
3913
3914 /* set vector for other causes, i.e. link changes */
3915 switch (hw->mac.type) {
3916 case igc_i225:
3917 /* Turn on MSI-X capability first, or our settings
3918 * won't stick. And it will take days to debug.
3919 */
3920 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3921 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3922 IGC_GPIE_NSICR);
3923
3924 /* enable msix_other interrupt */
3925 adapter->eims_other = BIT(vector);
3926 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3927
3928 wr32(IGC_IVAR_MISC, tmp);
3929 break;
3930 default:
3931 /* do nothing, since nothing else supports MSI-X */
3932 break;
3933 } /* switch (hw->mac.type) */
3934
3935 adapter->eims_enable_mask |= adapter->eims_other;
3936
3937 for (i = 0; i < adapter->num_q_vectors; i++)
3938 igc_assign_vector(adapter->q_vector[i], vector++);
3939
3940 wrfl();
3941}
3942
3943/**
3944 * igc_irq_enable - Enable default interrupt generation settings
3945 * @adapter: board private structure
3946 */
3947static void igc_irq_enable(struct igc_adapter *adapter)
3948{
3949 struct igc_hw *hw = &adapter->hw;
3950
3951 if (adapter->msix_entries) {
3952 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3953 u32 regval = rd32(IGC_EIAC);
3954
3955 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3956 regval = rd32(IGC_EIAM);
3957 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3958 wr32(IGC_EIMS, adapter->eims_enable_mask);
3959 wr32(IGC_IMS, ims);
3960 } else {
3961 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3962 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3963 }
3964}
3965
3966/**
3967 * igc_irq_disable - Mask off interrupt generation on the NIC
3968 * @adapter: board private structure
3969 */
3970static void igc_irq_disable(struct igc_adapter *adapter)
3971{
3972 struct igc_hw *hw = &adapter->hw;
3973
3974 if (adapter->msix_entries) {
3975 u32 regval = rd32(IGC_EIAM);
3976
3977 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3978 wr32(IGC_EIMC, adapter->eims_enable_mask);
3979 regval = rd32(IGC_EIAC);
3980 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3981 }
3982
3983 wr32(IGC_IAM, 0);
3984 wr32(IGC_IMC, ~0);
3985 wrfl();
3986
3987 if (adapter->msix_entries) {
3988 int vector = 0, i;
3989
3990 synchronize_irq(adapter->msix_entries[vector++].vector);
3991
3992 for (i = 0; i < adapter->num_q_vectors; i++)
3993 synchronize_irq(adapter->msix_entries[vector++].vector);
3994 } else {
3995 synchronize_irq(adapter->pdev->irq);
3996 }
3997}
3998
3999void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4000 const u32 max_rss_queues)
4001{
4002 /* Determine if we need to pair queues. */
4003 /* If rss_queues > half of max_rss_queues, pair the queues in
4004 * order to conserve interrupts due to limited supply.
4005 */
4006 if (adapter->rss_queues > (max_rss_queues / 2))
4007 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4008 else
4009 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4010}
4011
4012unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4013{
4014 return IGC_MAX_RX_QUEUES;
4015}
4016
4017static void igc_init_queue_configuration(struct igc_adapter *adapter)
4018{
4019 u32 max_rss_queues;
4020
4021 max_rss_queues = igc_get_max_rss_queues(adapter);
4022 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4023
4024 igc_set_flag_queue_pairs(adapter, max_rss_queues);
4025}
4026
4027/**
4028 * igc_reset_q_vector - Reset config for interrupt vector
4029 * @adapter: board private structure to initialize
4030 * @v_idx: Index of vector to be reset
4031 *
4032 * If NAPI is enabled it will delete any references to the
4033 * NAPI struct. This is preparation for igc_free_q_vector.
4034 */
4035static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
4036{
4037 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4038
4039 /* if we're coming from igc_set_interrupt_capability, the vectors are
4040 * not yet allocated
4041 */
4042 if (!q_vector)
4043 return;
4044
4045 if (q_vector->tx.ring)
4046 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4047
4048 if (q_vector->rx.ring)
4049 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
4050
4051 netif_napi_del(&q_vector->napi);
4052}
4053
4054/**
4055 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4056 * @adapter: board private structure to initialize
4057 * @v_idx: Index of vector to be freed
4058 *
4059 * This function frees the memory allocated to the q_vector.
4060 */
4061static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
4062{
4063 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4064
4065 adapter->q_vector[v_idx] = NULL;
4066
4067 /* igc_get_stats64() might access the rings on this vector,
4068 * we must wait a grace period before freeing it.
4069 */
4070 if (q_vector)
4071 kfree_rcu(q_vector, rcu);
4072}
4073
4074/**
4075 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4076 * @adapter: board private structure to initialize
4077 *
4078 * This function frees the memory allocated to the q_vectors. In addition if
4079 * NAPI is enabled it will delete any references to the NAPI struct prior
4080 * to freeing the q_vector.
4081 */
4082static void igc_free_q_vectors(struct igc_adapter *adapter)
4083{
4084 int v_idx = adapter->num_q_vectors;
4085
4086 adapter->num_tx_queues = 0;
4087 adapter->num_rx_queues = 0;
4088 adapter->num_q_vectors = 0;
4089
4090 while (v_idx--) {
4091 igc_reset_q_vector(adapter, v_idx);
4092 igc_free_q_vector(adapter, v_idx);
4093 }
4094}
4095
4096/**
4097 * igc_update_itr - update the dynamic ITR value based on statistics
4098 * @q_vector: pointer to q_vector
4099 * @ring_container: ring info to update the itr for
4100 *
4101 * Stores a new ITR value based on packets and byte
4102 * counts during the last interrupt. The advantage of per interrupt
4103 * computation is faster updates and more accurate ITR for the current
4104 * traffic pattern. Constants in this function were computed
4105 * based on theoretical maximum wire speed and thresholds were set based
4106 * on testing data as well as attempting to minimize response time
4107 * while increasing bulk throughput.
4108 * NOTE: These calculations are only valid when operating in a single-
4109 * queue environment.
4110 */
4111static void igc_update_itr(struct igc_q_vector *q_vector,
4112 struct igc_ring_container *ring_container)
4113{
4114 unsigned int packets = ring_container->total_packets;
4115 unsigned int bytes = ring_container->total_bytes;
4116 u8 itrval = ring_container->itr;
4117
4118 /* no packets, exit with status unchanged */
4119 if (packets == 0)
4120 return;
4121
4122 switch (itrval) {
4123 case lowest_latency:
4124 /* handle TSO and jumbo frames */
4125 if (bytes / packets > 8000)
4126 itrval = bulk_latency;
4127 else if ((packets < 5) && (bytes > 512))
4128 itrval = low_latency;
4129 break;
4130 case low_latency: /* 50 usec aka 20000 ints/s */
4131 if (bytes > 10000) {
4132 /* this if handles the TSO accounting */
4133 if (bytes / packets > 8000)
4134 itrval = bulk_latency;
4135 else if ((packets < 10) || ((bytes / packets) > 1200))
4136 itrval = bulk_latency;
4137 else if ((packets > 35))
4138 itrval = lowest_latency;
4139 } else if (bytes / packets > 2000) {
4140 itrval = bulk_latency;
4141 } else if (packets <= 2 && bytes < 512) {
4142 itrval = lowest_latency;
4143 }
4144 break;
4145 case bulk_latency: /* 250 usec aka 4000 ints/s */
4146 if (bytes > 25000) {
4147 if (packets > 35)
4148 itrval = low_latency;
4149 } else if (bytes < 1500) {
4150 itrval = low_latency;
4151 }
4152 break;
4153 }
4154
4155 /* clear work counters since we have the values we need */
4156 ring_container->total_bytes = 0;
4157 ring_container->total_packets = 0;
4158
4159 /* write updated itr to ring container */
4160 ring_container->itr = itrval;
4161}
4162
4163static void igc_set_itr(struct igc_q_vector *q_vector)
4164{
4165 struct igc_adapter *adapter = q_vector->adapter;
4166 u32 new_itr = q_vector->itr_val;
4167 u8 current_itr = 0;
4168
4169 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4170 switch (adapter->link_speed) {
4171 case SPEED_10:
4172 case SPEED_100:
4173 current_itr = 0;
4174 new_itr = IGC_4K_ITR;
4175 goto set_itr_now;
4176 default:
4177 break;
4178 }
4179
4180 igc_update_itr(q_vector, &q_vector->tx);
4181 igc_update_itr(q_vector, &q_vector->rx);
4182
4183 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4184
4185 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4186 if (current_itr == lowest_latency &&
4187 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4188 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4189 current_itr = low_latency;
4190
4191 switch (current_itr) {
4192 /* counts and packets in update_itr are dependent on these numbers */
4193 case lowest_latency:
4194 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4195 break;
4196 case low_latency:
4197 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4198 break;
4199 case bulk_latency:
4200 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
4201 break;
4202 default:
4203 break;
4204 }
4205
4206set_itr_now:
4207 if (new_itr != q_vector->itr_val) {
4208 /* this attempts to bias the interrupt rate towards Bulk
4209 * by adding intermediate steps when interrupt rate is
4210 * increasing
4211 */
4212 new_itr = new_itr > q_vector->itr_val ?
4213 max((new_itr * q_vector->itr_val) /
4214 (new_itr + (q_vector->itr_val >> 2)),
4215 new_itr) : new_itr;
4216 /* Don't write the value here; it resets the adapter's
4217 * internal timer, and causes us to delay far longer than
4218 * we should between interrupts. Instead, we write the ITR
4219 * value at the beginning of the next interrupt so the timing
4220 * ends up being correct.
4221 */
4222 q_vector->itr_val = new_itr;
4223 q_vector->set_itr = 1;
4224 }
4225}
4226
4227static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
4228{
4229 int v_idx = adapter->num_q_vectors;
4230
4231 if (adapter->msix_entries) {
4232 pci_disable_msix(adapter->pdev);
4233 kfree(adapter->msix_entries);
4234 adapter->msix_entries = NULL;
4235 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4236 pci_disable_msi(adapter->pdev);
4237 }
4238
4239 while (v_idx--)
4240 igc_reset_q_vector(adapter, v_idx);
4241}
4242
4243/**
4244 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4245 * @adapter: Pointer to adapter structure
4246 * @msix: boolean value for MSI-X capability
4247 *
4248 * Attempt to configure interrupts using the best available
4249 * capabilities of the hardware and kernel.
4250 */
4251static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4252 bool msix)
4253{
4254 int numvecs, i;
4255 int err;
4256
4257 if (!msix)
4258 goto msi_only;
4259 adapter->flags |= IGC_FLAG_HAS_MSIX;
4260
4261 /* Number of supported queues. */
4262 adapter->num_rx_queues = adapter->rss_queues;
4263
4264 adapter->num_tx_queues = adapter->rss_queues;
4265
4266 /* start with one vector for every Rx queue */
4267 numvecs = adapter->num_rx_queues;
4268
4269 /* if Tx handler is separate add 1 for every Tx queue */
4270 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4271 numvecs += adapter->num_tx_queues;
4272
4273 /* store the number of vectors reserved for queues */
4274 adapter->num_q_vectors = numvecs;
4275
4276 /* add 1 vector for link status interrupts */
4277 numvecs++;
4278
4279 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
4280 GFP_KERNEL);
4281
4282 if (!adapter->msix_entries)
4283 return;
4284
4285 /* populate entry values */
4286 for (i = 0; i < numvecs; i++)
4287 adapter->msix_entries[i].entry = i;
4288
4289 err = pci_enable_msix_range(adapter->pdev,
4290 adapter->msix_entries,
4291 numvecs,
4292 numvecs);
4293 if (err > 0)
4294 return;
4295
4296 kfree(adapter->msix_entries);
4297 adapter->msix_entries = NULL;
4298
4299 igc_reset_interrupt_capability(adapter);
4300
4301msi_only:
4302 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
4303
4304 adapter->rss_queues = 1;
4305 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4306 adapter->num_rx_queues = 1;
4307 adapter->num_tx_queues = 1;
4308 adapter->num_q_vectors = 1;
4309 if (!pci_enable_msi(adapter->pdev))
4310 adapter->flags |= IGC_FLAG_HAS_MSI;
4311}
4312
4313/**
4314 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4315 * @q_vector: pointer to q_vector
4316 *
4317 * Stores a new ITR value based on strictly on packet size. This
4318 * algorithm is less sophisticated than that used in igc_update_itr,
4319 * due to the difficulty of synchronizing statistics across multiple
4320 * receive rings. The divisors and thresholds used by this function
4321 * were determined based on theoretical maximum wire speed and testing
4322 * data, in order to minimize response time while increasing bulk
4323 * throughput.
4324 * NOTE: This function is called only when operating in a multiqueue
4325 * receive environment.
4326 */
4327static void igc_update_ring_itr(struct igc_q_vector *q_vector)
4328{
4329 struct igc_adapter *adapter = q_vector->adapter;
4330 int new_val = q_vector->itr_val;
4331 int avg_wire_size = 0;
4332 unsigned int packets;
4333
4334 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4335 * ints/sec - ITR timer value of 120 ticks.
4336 */
4337 switch (adapter->link_speed) {
4338 case SPEED_10:
4339 case SPEED_100:
4340 new_val = IGC_4K_ITR;
4341 goto set_itr_val;
4342 default:
4343 break;
4344 }
4345
4346 packets = q_vector->rx.total_packets;
4347 if (packets)
4348 avg_wire_size = q_vector->rx.total_bytes / packets;
4349
4350 packets = q_vector->tx.total_packets;
4351 if (packets)
4352 avg_wire_size = max_t(u32, avg_wire_size,
4353 q_vector->tx.total_bytes / packets);
4354
4355 /* if avg_wire_size isn't set no work was done */
4356 if (!avg_wire_size)
4357 goto clear_counts;
4358
4359 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4360 avg_wire_size += 24;
4361
4362 /* Don't starve jumbo frames */
4363 avg_wire_size = min(avg_wire_size, 3000);
4364
4365 /* Give a little boost to mid-size frames */
4366 if (avg_wire_size > 300 && avg_wire_size < 1200)
4367 new_val = avg_wire_size / 3;
4368 else
4369 new_val = avg_wire_size / 2;
4370
4371 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4372 if (new_val < IGC_20K_ITR &&
4373 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4374 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4375 new_val = IGC_20K_ITR;
4376
4377set_itr_val:
4378 if (new_val != q_vector->itr_val) {
4379 q_vector->itr_val = new_val;
4380 q_vector->set_itr = 1;
4381 }
4382clear_counts:
4383 q_vector->rx.total_bytes = 0;
4384 q_vector->rx.total_packets = 0;
4385 q_vector->tx.total_bytes = 0;
4386 q_vector->tx.total_packets = 0;
4387}
4388
4389static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
4390{
4391 struct igc_adapter *adapter = q_vector->adapter;
4392 struct igc_hw *hw = &adapter->hw;
4393
4394 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4395 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4396 if (adapter->num_q_vectors == 1)
4397 igc_set_itr(q_vector);
4398 else
4399 igc_update_ring_itr(q_vector);
4400 }
4401
4402 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4403 if (adapter->msix_entries)
4404 wr32(IGC_EIMS, q_vector->eims_value);
4405 else
4406 igc_irq_enable(adapter);
4407 }
4408}
4409
4410static void igc_add_ring(struct igc_ring *ring,
4411 struct igc_ring_container *head)
4412{
4413 head->ring = ring;
4414 head->count++;
4415}
4416
4417/**
4418 * igc_cache_ring_register - Descriptor ring to register mapping
4419 * @adapter: board private structure to initialize
4420 *
4421 * Once we know the feature-set enabled for the device, we'll cache
4422 * the register offset the descriptor ring is assigned to.
4423 */
4424static void igc_cache_ring_register(struct igc_adapter *adapter)
4425{
4426 int i = 0, j = 0;
4427
4428 switch (adapter->hw.mac.type) {
4429 case igc_i225:
4430 default:
4431 for (; i < adapter->num_rx_queues; i++)
4432 adapter->rx_ring[i]->reg_idx = i;
4433 for (; j < adapter->num_tx_queues; j++)
4434 adapter->tx_ring[j]->reg_idx = j;
4435 break;
4436 }
4437}
4438
4439/**
4440 * igc_poll - NAPI Rx polling callback
4441 * @napi: napi polling structure
4442 * @budget: count of how many packets we should handle
4443 */
4444static int igc_poll(struct napi_struct *napi, int budget)
4445{
4446 struct igc_q_vector *q_vector = container_of(napi,
4447 struct igc_q_vector,
4448 napi);
4449 struct igc_ring *rx_ring = q_vector->rx.ring;
4450 bool clean_complete = true;
4451 int work_done = 0;
4452
4453 if (q_vector->tx.ring)
4454 clean_complete = igc_clean_tx_irq(q_vector, budget);
4455
4456 if (rx_ring) {
4457 int cleaned = rx_ring->xsk_pool ?
4458 igc_clean_rx_irq_zc(q_vector, budget) :
4459 igc_clean_rx_irq(q_vector, budget);
4460
4461 work_done += cleaned;
4462 if (cleaned >= budget)
4463 clean_complete = false;
4464 }
4465
4466 /* If all work not completed, return budget and keep polling */
4467 if (!clean_complete)
4468 return budget;
4469
4470 /* Exit the polling mode, but don't re-enable interrupts if stack might
4471 * poll us due to busy-polling
4472 */
4473 if (likely(napi_complete_done(napi, work_done)))
4474 igc_ring_irq_enable(q_vector);
4475
4476 return min(work_done, budget - 1);
4477}
4478
4479/**
4480 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4481 * @adapter: board private structure to initialize
4482 * @v_count: q_vectors allocated on adapter, used for ring interleaving
4483 * @v_idx: index of vector in adapter struct
4484 * @txr_count: total number of Tx rings to allocate
4485 * @txr_idx: index of first Tx ring to allocate
4486 * @rxr_count: total number of Rx rings to allocate
4487 * @rxr_idx: index of first Rx ring to allocate
4488 *
4489 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4490 */
4491static int igc_alloc_q_vector(struct igc_adapter *adapter,
4492 unsigned int v_count, unsigned int v_idx,
4493 unsigned int txr_count, unsigned int txr_idx,
4494 unsigned int rxr_count, unsigned int rxr_idx)
4495{
4496 struct igc_q_vector *q_vector;
4497 struct igc_ring *ring;
4498 int ring_count;
4499
4500 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4501 if (txr_count > 1 || rxr_count > 1)
4502 return -ENOMEM;
4503
4504 ring_count = txr_count + rxr_count;
4505
4506 /* allocate q_vector and rings */
4507 q_vector = adapter->q_vector[v_idx];
4508 if (!q_vector)
4509 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4510 GFP_KERNEL);
4511 else
4512 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4513 if (!q_vector)
4514 return -ENOMEM;
4515
4516 /* initialize NAPI */
4517 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
4518
4519 /* tie q_vector and adapter together */
4520 adapter->q_vector[v_idx] = q_vector;
4521 q_vector->adapter = adapter;
4522
4523 /* initialize work limits */
4524 q_vector->tx.work_limit = adapter->tx_work_limit;
4525
4526 /* initialize ITR configuration */
4527 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4528 q_vector->itr_val = IGC_START_ITR;
4529
4530 /* initialize pointer to rings */
4531 ring = q_vector->ring;
4532
4533 /* initialize ITR */
4534 if (rxr_count) {
4535 /* rx or rx/tx vector */
4536 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4537 q_vector->itr_val = adapter->rx_itr_setting;
4538 } else {
4539 /* tx only vector */
4540 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4541 q_vector->itr_val = adapter->tx_itr_setting;
4542 }
4543
4544 if (txr_count) {
4545 /* assign generic ring traits */
4546 ring->dev = &adapter->pdev->dev;
4547 ring->netdev = adapter->netdev;
4548
4549 /* configure backlink on ring */
4550 ring->q_vector = q_vector;
4551
4552 /* update q_vector Tx values */
4553 igc_add_ring(ring, &q_vector->tx);
4554
4555 /* apply Tx specific ring traits */
4556 ring->count = adapter->tx_ring_count;
4557 ring->queue_index = txr_idx;
4558
4559 /* assign ring to adapter */
4560 adapter->tx_ring[txr_idx] = ring;
4561
4562 /* push pointer to next ring */
4563 ring++;
4564 }
4565
4566 if (rxr_count) {
4567 /* assign generic ring traits */
4568 ring->dev = &adapter->pdev->dev;
4569 ring->netdev = adapter->netdev;
4570
4571 /* configure backlink on ring */
4572 ring->q_vector = q_vector;
4573
4574 /* update q_vector Rx values */
4575 igc_add_ring(ring, &q_vector->rx);
4576
4577 /* apply Rx specific ring traits */
4578 ring->count = adapter->rx_ring_count;
4579 ring->queue_index = rxr_idx;
4580
4581 /* assign ring to adapter */
4582 adapter->rx_ring[rxr_idx] = ring;
4583 }
4584
4585 return 0;
4586}
4587
4588/**
4589 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4590 * @adapter: board private structure to initialize
4591 *
4592 * We allocate one q_vector per queue interrupt. If allocation fails we
4593 * return -ENOMEM.
4594 */
4595static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4596{
4597 int rxr_remaining = adapter->num_rx_queues;
4598 int txr_remaining = adapter->num_tx_queues;
4599 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4600 int q_vectors = adapter->num_q_vectors;
4601 int err;
4602
4603 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4604 for (; rxr_remaining; v_idx++) {
4605 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4606 0, 0, 1, rxr_idx);
4607
4608 if (err)
4609 goto err_out;
4610
4611 /* update counts and index */
4612 rxr_remaining--;
4613 rxr_idx++;
4614 }
4615 }
4616
4617 for (; v_idx < q_vectors; v_idx++) {
4618 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4619 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4620
4621 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4622 tqpv, txr_idx, rqpv, rxr_idx);
4623
4624 if (err)
4625 goto err_out;
4626
4627 /* update counts and index */
4628 rxr_remaining -= rqpv;
4629 txr_remaining -= tqpv;
4630 rxr_idx++;
4631 txr_idx++;
4632 }
4633
4634 return 0;
4635
4636err_out:
4637 adapter->num_tx_queues = 0;
4638 adapter->num_rx_queues = 0;
4639 adapter->num_q_vectors = 0;
4640
4641 while (v_idx--)
4642 igc_free_q_vector(adapter, v_idx);
4643
4644 return -ENOMEM;
4645}
4646
4647/**
4648 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4649 * @adapter: Pointer to adapter structure
4650 * @msix: boolean for MSI-X capability
4651 *
4652 * This function initializes the interrupts and allocates all of the queues.
4653 */
4654static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4655{
4656 struct net_device *dev = adapter->netdev;
4657 int err = 0;
4658
4659 igc_set_interrupt_capability(adapter, msix);
4660
4661 err = igc_alloc_q_vectors(adapter);
4662 if (err) {
4663 netdev_err(dev, "Unable to allocate memory for vectors\n");
4664 goto err_alloc_q_vectors;
4665 }
4666
4667 igc_cache_ring_register(adapter);
4668
4669 return 0;
4670
4671err_alloc_q_vectors:
4672 igc_reset_interrupt_capability(adapter);
4673 return err;
4674}
4675
4676/**
4677 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4678 * @adapter: board private structure to initialize
4679 *
4680 * igc_sw_init initializes the Adapter private data structure.
4681 * Fields are initialized based on PCI device information and
4682 * OS network device settings (MTU size).
4683 */
4684static int igc_sw_init(struct igc_adapter *adapter)
4685{
4686 struct net_device *netdev = adapter->netdev;
4687 struct pci_dev *pdev = adapter->pdev;
4688 struct igc_hw *hw = &adapter->hw;
4689
4690 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4691
4692 /* set default ring sizes */
4693 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4694 adapter->rx_ring_count = IGC_DEFAULT_RXD;
4695
4696 /* set default ITR values */
4697 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4698 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4699
4700 /* set default work limits */
4701 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4702
4703 /* adjust max frame to be at least the size of a standard frame */
4704 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4705 VLAN_HLEN;
4706 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4707
4708 mutex_init(&adapter->nfc_rule_lock);
4709 INIT_LIST_HEAD(&adapter->nfc_rule_list);
4710 adapter->nfc_rule_count = 0;
4711
4712 spin_lock_init(&adapter->stats64_lock);
4713 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4714 adapter->flags |= IGC_FLAG_HAS_MSIX;
4715
4716 igc_init_queue_configuration(adapter);
4717
4718 /* This call may decrease the number of queues */
4719 if (igc_init_interrupt_scheme(adapter, true)) {
4720 netdev_err(netdev, "Unable to allocate memory for queues\n");
4721 return -ENOMEM;
4722 }
4723
4724 /* Explicitly disable IRQ since the NIC can be in any state. */
4725 igc_irq_disable(adapter);
4726
4727 set_bit(__IGC_DOWN, &adapter->state);
4728
4729 return 0;
4730}
4731
4732/**
4733 * igc_up - Open the interface and prepare it to handle traffic
4734 * @adapter: board private structure
4735 */
4736void igc_up(struct igc_adapter *adapter)
4737{
4738 struct igc_hw *hw = &adapter->hw;
4739 int i = 0;
4740
4741 /* hardware has been reset, we need to reload some things */
4742 igc_configure(adapter);
4743
4744 clear_bit(__IGC_DOWN, &adapter->state);
4745
4746 for (i = 0; i < adapter->num_q_vectors; i++)
4747 napi_enable(&adapter->q_vector[i]->napi);
4748
4749 if (adapter->msix_entries)
4750 igc_configure_msix(adapter);
4751 else
4752 igc_assign_vector(adapter->q_vector[0], 0);
4753
4754 /* Clear any pending interrupts. */
4755 rd32(IGC_ICR);
4756 igc_irq_enable(adapter);
4757
4758 netif_tx_start_all_queues(adapter->netdev);
4759
4760 /* start the watchdog. */
4761 hw->mac.get_link_status = true;
4762 schedule_work(&adapter->watchdog_task);
4763}
4764
4765/**
4766 * igc_update_stats - Update the board statistics counters
4767 * @adapter: board private structure
4768 */
4769void igc_update_stats(struct igc_adapter *adapter)
4770{
4771 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4772 struct pci_dev *pdev = adapter->pdev;
4773 struct igc_hw *hw = &adapter->hw;
4774 u64 _bytes, _packets;
4775 u64 bytes, packets;
4776 unsigned int start;
4777 u32 mpc;
4778 int i;
4779
4780 /* Prevent stats update while adapter is being reset, or if the pci
4781 * connection is down.
4782 */
4783 if (adapter->link_speed == 0)
4784 return;
4785 if (pci_channel_offline(pdev))
4786 return;
4787
4788 packets = 0;
4789 bytes = 0;
4790
4791 rcu_read_lock();
4792 for (i = 0; i < adapter->num_rx_queues; i++) {
4793 struct igc_ring *ring = adapter->rx_ring[i];
4794 u32 rqdpc = rd32(IGC_RQDPC(i));
4795
4796 if (hw->mac.type >= igc_i225)
4797 wr32(IGC_RQDPC(i), 0);
4798
4799 if (rqdpc) {
4800 ring->rx_stats.drops += rqdpc;
4801 net_stats->rx_fifo_errors += rqdpc;
4802 }
4803
4804 do {
4805 start = u64_stats_fetch_begin(&ring->rx_syncp);
4806 _bytes = ring->rx_stats.bytes;
4807 _packets = ring->rx_stats.packets;
4808 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
4809 bytes += _bytes;
4810 packets += _packets;
4811 }
4812
4813 net_stats->rx_bytes = bytes;
4814 net_stats->rx_packets = packets;
4815
4816 packets = 0;
4817 bytes = 0;
4818 for (i = 0; i < adapter->num_tx_queues; i++) {
4819 struct igc_ring *ring = adapter->tx_ring[i];
4820
4821 do {
4822 start = u64_stats_fetch_begin(&ring->tx_syncp);
4823 _bytes = ring->tx_stats.bytes;
4824 _packets = ring->tx_stats.packets;
4825 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
4826 bytes += _bytes;
4827 packets += _packets;
4828 }
4829 net_stats->tx_bytes = bytes;
4830 net_stats->tx_packets = packets;
4831 rcu_read_unlock();
4832
4833 /* read stats registers */
4834 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4835 adapter->stats.gprc += rd32(IGC_GPRC);
4836 adapter->stats.gorc += rd32(IGC_GORCL);
4837 rd32(IGC_GORCH); /* clear GORCL */
4838 adapter->stats.bprc += rd32(IGC_BPRC);
4839 adapter->stats.mprc += rd32(IGC_MPRC);
4840 adapter->stats.roc += rd32(IGC_ROC);
4841
4842 adapter->stats.prc64 += rd32(IGC_PRC64);
4843 adapter->stats.prc127 += rd32(IGC_PRC127);
4844 adapter->stats.prc255 += rd32(IGC_PRC255);
4845 adapter->stats.prc511 += rd32(IGC_PRC511);
4846 adapter->stats.prc1023 += rd32(IGC_PRC1023);
4847 adapter->stats.prc1522 += rd32(IGC_PRC1522);
4848 adapter->stats.tlpic += rd32(IGC_TLPIC);
4849 adapter->stats.rlpic += rd32(IGC_RLPIC);
4850 adapter->stats.hgptc += rd32(IGC_HGPTC);
4851
4852 mpc = rd32(IGC_MPC);
4853 adapter->stats.mpc += mpc;
4854 net_stats->rx_fifo_errors += mpc;
4855 adapter->stats.scc += rd32(IGC_SCC);
4856 adapter->stats.ecol += rd32(IGC_ECOL);
4857 adapter->stats.mcc += rd32(IGC_MCC);
4858 adapter->stats.latecol += rd32(IGC_LATECOL);
4859 adapter->stats.dc += rd32(IGC_DC);
4860 adapter->stats.rlec += rd32(IGC_RLEC);
4861 adapter->stats.xonrxc += rd32(IGC_XONRXC);
4862 adapter->stats.xontxc += rd32(IGC_XONTXC);
4863 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4864 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4865 adapter->stats.fcruc += rd32(IGC_FCRUC);
4866 adapter->stats.gptc += rd32(IGC_GPTC);
4867 adapter->stats.gotc += rd32(IGC_GOTCL);
4868 rd32(IGC_GOTCH); /* clear GOTCL */
4869 adapter->stats.rnbc += rd32(IGC_RNBC);
4870 adapter->stats.ruc += rd32(IGC_RUC);
4871 adapter->stats.rfc += rd32(IGC_RFC);
4872 adapter->stats.rjc += rd32(IGC_RJC);
4873 adapter->stats.tor += rd32(IGC_TORH);
4874 adapter->stats.tot += rd32(IGC_TOTH);
4875 adapter->stats.tpr += rd32(IGC_TPR);
4876
4877 adapter->stats.ptc64 += rd32(IGC_PTC64);
4878 adapter->stats.ptc127 += rd32(IGC_PTC127);
4879 adapter->stats.ptc255 += rd32(IGC_PTC255);
4880 adapter->stats.ptc511 += rd32(IGC_PTC511);
4881 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4882 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4883
4884 adapter->stats.mptc += rd32(IGC_MPTC);
4885 adapter->stats.bptc += rd32(IGC_BPTC);
4886
4887 adapter->stats.tpt += rd32(IGC_TPT);
4888 adapter->stats.colc += rd32(IGC_COLC);
4889 adapter->stats.colc += rd32(IGC_RERC);
4890
4891 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
4892
4893 adapter->stats.tsctc += rd32(IGC_TSCTC);
4894
4895 adapter->stats.iac += rd32(IGC_IAC);
4896
4897 /* Fill out the OS statistics structure */
4898 net_stats->multicast = adapter->stats.mprc;
4899 net_stats->collisions = adapter->stats.colc;
4900
4901 /* Rx Errors */
4902
4903 /* RLEC on some newer hardware can be incorrect so build
4904 * our own version based on RUC and ROC
4905 */
4906 net_stats->rx_errors = adapter->stats.rxerrc +
4907 adapter->stats.crcerrs + adapter->stats.algnerrc +
4908 adapter->stats.ruc + adapter->stats.roc +
4909 adapter->stats.cexterr;
4910 net_stats->rx_length_errors = adapter->stats.ruc +
4911 adapter->stats.roc;
4912 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4913 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4914 net_stats->rx_missed_errors = adapter->stats.mpc;
4915
4916 /* Tx Errors */
4917 net_stats->tx_errors = adapter->stats.ecol +
4918 adapter->stats.latecol;
4919 net_stats->tx_aborted_errors = adapter->stats.ecol;
4920 net_stats->tx_window_errors = adapter->stats.latecol;
4921 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4922
4923 /* Tx Dropped needs to be maintained elsewhere */
4924
4925 /* Management Stats */
4926 adapter->stats.mgptc += rd32(IGC_MGTPTC);
4927 adapter->stats.mgprc += rd32(IGC_MGTPRC);
4928 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4929}
4930
4931/**
4932 * igc_down - Close the interface
4933 * @adapter: board private structure
4934 */
4935void igc_down(struct igc_adapter *adapter)
4936{
4937 struct net_device *netdev = adapter->netdev;
4938 struct igc_hw *hw = &adapter->hw;
4939 u32 tctl, rctl;
4940 int i = 0;
4941
4942 set_bit(__IGC_DOWN, &adapter->state);
4943
4944 igc_ptp_suspend(adapter);
4945
4946 if (pci_device_is_present(adapter->pdev)) {
4947 /* disable receives in the hardware */
4948 rctl = rd32(IGC_RCTL);
4949 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
4950 /* flush and sleep below */
4951 }
4952 /* set trans_start so we don't get spurious watchdogs during reset */
4953 netif_trans_update(netdev);
4954
4955 netif_carrier_off(netdev);
4956 netif_tx_stop_all_queues(netdev);
4957
4958 if (pci_device_is_present(adapter->pdev)) {
4959 /* disable transmits in the hardware */
4960 tctl = rd32(IGC_TCTL);
4961 tctl &= ~IGC_TCTL_EN;
4962 wr32(IGC_TCTL, tctl);
4963 /* flush both disables and wait for them to finish */
4964 wrfl();
4965 usleep_range(10000, 20000);
4966
4967 igc_irq_disable(adapter);
4968 }
4969
4970 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4971
4972 for (i = 0; i < adapter->num_q_vectors; i++) {
4973 if (adapter->q_vector[i]) {
4974 napi_synchronize(&adapter->q_vector[i]->napi);
4975 napi_disable(&adapter->q_vector[i]->napi);
4976 }
4977 }
4978
4979 del_timer_sync(&adapter->watchdog_timer);
4980 del_timer_sync(&adapter->phy_info_timer);
4981
4982 /* record the stats before reset*/
4983 spin_lock(&adapter->stats64_lock);
4984 igc_update_stats(adapter);
4985 spin_unlock(&adapter->stats64_lock);
4986
4987 adapter->link_speed = 0;
4988 adapter->link_duplex = 0;
4989
4990 if (!pci_channel_offline(adapter->pdev))
4991 igc_reset(adapter);
4992
4993 /* clear VLAN promisc flag so VFTA will be updated if necessary */
4994 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
4995
4996 igc_clean_all_tx_rings(adapter);
4997 igc_clean_all_rx_rings(adapter);
4998}
4999
5000void igc_reinit_locked(struct igc_adapter *adapter)
5001{
5002 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5003 usleep_range(1000, 2000);
5004 igc_down(adapter);
5005 igc_up(adapter);
5006 clear_bit(__IGC_RESETTING, &adapter->state);
5007}
5008
5009static void igc_reset_task(struct work_struct *work)
5010{
5011 struct igc_adapter *adapter;
5012
5013 adapter = container_of(work, struct igc_adapter, reset_task);
5014
5015 rtnl_lock();
5016 /* If we're already down or resetting, just bail */
5017 if (test_bit(__IGC_DOWN, &adapter->state) ||
5018 test_bit(__IGC_RESETTING, &adapter->state)) {
5019 rtnl_unlock();
5020 return;
5021 }
5022
5023 igc_rings_dump(adapter);
5024 igc_regs_dump(adapter);
5025 netdev_err(adapter->netdev, "Reset adapter\n");
5026 igc_reinit_locked(adapter);
5027 rtnl_unlock();
5028}
5029
5030/**
5031 * igc_change_mtu - Change the Maximum Transfer Unit
5032 * @netdev: network interface device structure
5033 * @new_mtu: new value for maximum frame size
5034 *
5035 * Returns 0 on success, negative on failure
5036 */
5037static int igc_change_mtu(struct net_device *netdev, int new_mtu)
5038{
5039 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5040 struct igc_adapter *adapter = netdev_priv(netdev);
5041
5042 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
5043 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
5044 return -EINVAL;
5045 }
5046
5047 /* adjust max frame to be at least the size of a standard frame */
5048 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5049 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5050
5051 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5052 usleep_range(1000, 2000);
5053
5054 /* igc_down has a dependency on max_frame_size */
5055 adapter->max_frame_size = max_frame;
5056
5057 if (netif_running(netdev))
5058 igc_down(adapter);
5059
5060 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5061 netdev->mtu = new_mtu;
5062
5063 if (netif_running(netdev))
5064 igc_up(adapter);
5065 else
5066 igc_reset(adapter);
5067
5068 clear_bit(__IGC_RESETTING, &adapter->state);
5069
5070 return 0;
5071}
5072
5073/**
5074 * igc_tx_timeout - Respond to a Tx Hang
5075 * @netdev: network interface device structure
5076 * @txqueue: queue number that timed out
5077 **/
5078static void igc_tx_timeout(struct net_device *netdev,
5079 unsigned int __always_unused txqueue)
5080{
5081 struct igc_adapter *adapter = netdev_priv(netdev);
5082 struct igc_hw *hw = &adapter->hw;
5083
5084 /* Do the reset outside of interrupt context */
5085 adapter->tx_timeout_count++;
5086 schedule_work(&adapter->reset_task);
5087 wr32(IGC_EICS,
5088 (adapter->eims_enable_mask & ~adapter->eims_other));
5089}
5090
5091/**
5092 * igc_get_stats64 - Get System Network Statistics
5093 * @netdev: network interface device structure
5094 * @stats: rtnl_link_stats64 pointer
5095 *
5096 * Returns the address of the device statistics structure.
5097 * The statistics are updated here and also from the timer callback.
5098 */
5099static void igc_get_stats64(struct net_device *netdev,
5100 struct rtnl_link_stats64 *stats)
5101{
5102 struct igc_adapter *adapter = netdev_priv(netdev);
5103
5104 spin_lock(&adapter->stats64_lock);
5105 if (!test_bit(__IGC_RESETTING, &adapter->state))
5106 igc_update_stats(adapter);
5107 memcpy(stats, &adapter->stats64, sizeof(*stats));
5108 spin_unlock(&adapter->stats64_lock);
5109}
5110
5111static netdev_features_t igc_fix_features(struct net_device *netdev,
5112 netdev_features_t features)
5113{
5114 /* Since there is no support for separate Rx/Tx vlan accel
5115 * enable/disable make sure Tx flag is always in same state as Rx.
5116 */
5117 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5118 features |= NETIF_F_HW_VLAN_CTAG_TX;
5119 else
5120 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
5121
5122 return features;
5123}
5124
5125static int igc_set_features(struct net_device *netdev,
5126 netdev_features_t features)
5127{
5128 netdev_features_t changed = netdev->features ^ features;
5129 struct igc_adapter *adapter = netdev_priv(netdev);
5130
5131 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
5132 igc_vlan_mode(netdev, features);
5133
5134 /* Add VLAN support */
5135 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
5136 return 0;
5137
5138 if (!(features & NETIF_F_NTUPLE))
5139 igc_flush_nfc_rules(adapter);
5140
5141 netdev->features = features;
5142
5143 if (netif_running(netdev))
5144 igc_reinit_locked(adapter);
5145 else
5146 igc_reset(adapter);
5147
5148 return 1;
5149}
5150
5151static netdev_features_t
5152igc_features_check(struct sk_buff *skb, struct net_device *dev,
5153 netdev_features_t features)
5154{
5155 unsigned int network_hdr_len, mac_hdr_len;
5156
5157 /* Make certain the headers can be described by a context descriptor */
5158 mac_hdr_len = skb_network_header(skb) - skb->data;
5159 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
5160 return features & ~(NETIF_F_HW_CSUM |
5161 NETIF_F_SCTP_CRC |
5162 NETIF_F_HW_VLAN_CTAG_TX |
5163 NETIF_F_TSO |
5164 NETIF_F_TSO6);
5165
5166 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
5167 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
5168 return features & ~(NETIF_F_HW_CSUM |
5169 NETIF_F_SCTP_CRC |
5170 NETIF_F_TSO |
5171 NETIF_F_TSO6);
5172
5173 /* We can only support IPv4 TSO in tunnels if we can mangle the
5174 * inner IP ID field, so strip TSO if MANGLEID is not supported.
5175 */
5176 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
5177 features &= ~NETIF_F_TSO;
5178
5179 return features;
5180}
5181
5182static void igc_tsync_interrupt(struct igc_adapter *adapter)
5183{
5184 u32 ack, tsauxc, sec, nsec, tsicr;
5185 struct igc_hw *hw = &adapter->hw;
5186 struct ptp_clock_event event;
5187 struct timespec64 ts;
5188
5189 tsicr = rd32(IGC_TSICR);
5190 ack = 0;
5191
5192 if (tsicr & IGC_TSICR_SYS_WRAP) {
5193 event.type = PTP_CLOCK_PPS;
5194 if (adapter->ptp_caps.pps)
5195 ptp_clock_event(adapter->ptp_clock, &event);
5196 ack |= IGC_TSICR_SYS_WRAP;
5197 }
5198
5199 if (tsicr & IGC_TSICR_TXTS) {
5200 /* retrieve hardware timestamp */
5201 schedule_work(&adapter->ptp_tx_work);
5202 ack |= IGC_TSICR_TXTS;
5203 }
5204
5205 if (tsicr & IGC_TSICR_TT0) {
5206 spin_lock(&adapter->tmreg_lock);
5207 ts = timespec64_add(adapter->perout[0].start,
5208 adapter->perout[0].period);
5209 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5210 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5211 tsauxc = rd32(IGC_TSAUXC);
5212 tsauxc |= IGC_TSAUXC_EN_TT0;
5213 wr32(IGC_TSAUXC, tsauxc);
5214 adapter->perout[0].start = ts;
5215 spin_unlock(&adapter->tmreg_lock);
5216 ack |= IGC_TSICR_TT0;
5217 }
5218
5219 if (tsicr & IGC_TSICR_TT1) {
5220 spin_lock(&adapter->tmreg_lock);
5221 ts = timespec64_add(adapter->perout[1].start,
5222 adapter->perout[1].period);
5223 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5224 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5225 tsauxc = rd32(IGC_TSAUXC);
5226 tsauxc |= IGC_TSAUXC_EN_TT1;
5227 wr32(IGC_TSAUXC, tsauxc);
5228 adapter->perout[1].start = ts;
5229 spin_unlock(&adapter->tmreg_lock);
5230 ack |= IGC_TSICR_TT1;
5231 }
5232
5233 if (tsicr & IGC_TSICR_AUTT0) {
5234 nsec = rd32(IGC_AUXSTMPL0);
5235 sec = rd32(IGC_AUXSTMPH0);
5236 event.type = PTP_CLOCK_EXTTS;
5237 event.index = 0;
5238 event.timestamp = sec * NSEC_PER_SEC + nsec;
5239 ptp_clock_event(adapter->ptp_clock, &event);
5240 ack |= IGC_TSICR_AUTT0;
5241 }
5242
5243 if (tsicr & IGC_TSICR_AUTT1) {
5244 nsec = rd32(IGC_AUXSTMPL1);
5245 sec = rd32(IGC_AUXSTMPH1);
5246 event.type = PTP_CLOCK_EXTTS;
5247 event.index = 1;
5248 event.timestamp = sec * NSEC_PER_SEC + nsec;
5249 ptp_clock_event(adapter->ptp_clock, &event);
5250 ack |= IGC_TSICR_AUTT1;
5251 }
5252
5253 /* acknowledge the interrupts */
5254 wr32(IGC_TSICR, ack);
5255}
5256
5257/**
5258 * igc_msix_other - msix other interrupt handler
5259 * @irq: interrupt number
5260 * @data: pointer to a q_vector
5261 */
5262static irqreturn_t igc_msix_other(int irq, void *data)
5263{
5264 struct igc_adapter *adapter = data;
5265 struct igc_hw *hw = &adapter->hw;
5266 u32 icr = rd32(IGC_ICR);
5267
5268 /* reading ICR causes bit 31 of EICR to be cleared */
5269 if (icr & IGC_ICR_DRSTA)
5270 schedule_work(&adapter->reset_task);
5271
5272 if (icr & IGC_ICR_DOUTSYNC) {
5273 /* HW is reporting DMA is out of sync */
5274 adapter->stats.doosync++;
5275 }
5276
5277 if (icr & IGC_ICR_LSC) {
5278 hw->mac.get_link_status = true;
5279 /* guard against interrupt when we're going down */
5280 if (!test_bit(__IGC_DOWN, &adapter->state))
5281 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5282 }
5283
5284 if (icr & IGC_ICR_TS)
5285 igc_tsync_interrupt(adapter);
5286
5287 wr32(IGC_EIMS, adapter->eims_other);
5288
5289 return IRQ_HANDLED;
5290}
5291
5292static void igc_write_itr(struct igc_q_vector *q_vector)
5293{
5294 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
5295
5296 if (!q_vector->set_itr)
5297 return;
5298
5299 if (!itr_val)
5300 itr_val = IGC_ITR_VAL_MASK;
5301
5302 itr_val |= IGC_EITR_CNT_IGNR;
5303
5304 writel(itr_val, q_vector->itr_register);
5305 q_vector->set_itr = 0;
5306}
5307
5308static irqreturn_t igc_msix_ring(int irq, void *data)
5309{
5310 struct igc_q_vector *q_vector = data;
5311
5312 /* Write the ITR value calculated from the previous interrupt. */
5313 igc_write_itr(q_vector);
5314
5315 napi_schedule(&q_vector->napi);
5316
5317 return IRQ_HANDLED;
5318}
5319
5320/**
5321 * igc_request_msix - Initialize MSI-X interrupts
5322 * @adapter: Pointer to adapter structure
5323 *
5324 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5325 * kernel.
5326 */
5327static int igc_request_msix(struct igc_adapter *adapter)
5328{
5329 unsigned int num_q_vectors = adapter->num_q_vectors;
5330 int i = 0, err = 0, vector = 0, free_vector = 0;
5331 struct net_device *netdev = adapter->netdev;
5332
5333 err = request_irq(adapter->msix_entries[vector].vector,
5334 &igc_msix_other, 0, netdev->name, adapter);
5335 if (err)
5336 goto err_out;
5337
5338 if (num_q_vectors > MAX_Q_VECTORS) {
5339 num_q_vectors = MAX_Q_VECTORS;
5340 dev_warn(&adapter->pdev->dev,
5341 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5342 adapter->num_q_vectors, MAX_Q_VECTORS);
5343 }
5344 for (i = 0; i < num_q_vectors; i++) {
5345 struct igc_q_vector *q_vector = adapter->q_vector[i];
5346
5347 vector++;
5348
5349 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
5350
5351 if (q_vector->rx.ring && q_vector->tx.ring)
5352 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5353 q_vector->rx.ring->queue_index);
5354 else if (q_vector->tx.ring)
5355 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5356 q_vector->tx.ring->queue_index);
5357 else if (q_vector->rx.ring)
5358 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5359 q_vector->rx.ring->queue_index);
5360 else
5361 sprintf(q_vector->name, "%s-unused", netdev->name);
5362
5363 err = request_irq(adapter->msix_entries[vector].vector,
5364 igc_msix_ring, 0, q_vector->name,
5365 q_vector);
5366 if (err)
5367 goto err_free;
5368 }
5369
5370 igc_configure_msix(adapter);
5371 return 0;
5372
5373err_free:
5374 /* free already assigned IRQs */
5375 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
5376
5377 vector--;
5378 for (i = 0; i < vector; i++) {
5379 free_irq(adapter->msix_entries[free_vector++].vector,
5380 adapter->q_vector[i]);
5381 }
5382err_out:
5383 return err;
5384}
5385
5386/**
5387 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5388 * @adapter: Pointer to adapter structure
5389 *
5390 * This function resets the device so that it has 0 rx queues, tx queues, and
5391 * MSI-X interrupts allocated.
5392 */
5393static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5394{
5395 igc_free_q_vectors(adapter);
5396 igc_reset_interrupt_capability(adapter);
5397}
5398
5399/* Need to wait a few seconds after link up to get diagnostic information from
5400 * the phy
5401 */
5402static void igc_update_phy_info(struct timer_list *t)
5403{
5404 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5405
5406 igc_get_phy_info(&adapter->hw);
5407}
5408
5409/**
5410 * igc_has_link - check shared code for link and determine up/down
5411 * @adapter: pointer to driver private info
5412 */
5413bool igc_has_link(struct igc_adapter *adapter)
5414{
5415 struct igc_hw *hw = &adapter->hw;
5416 bool link_active = false;
5417
5418 /* get_link_status is set on LSC (link status) interrupt or
5419 * rx sequence error interrupt. get_link_status will stay
5420 * false until the igc_check_for_link establishes link
5421 * for copper adapters ONLY
5422 */
5423 if (!hw->mac.get_link_status)
5424 return true;
5425 hw->mac.ops.check_for_link(hw);
5426 link_active = !hw->mac.get_link_status;
5427
5428 if (hw->mac.type == igc_i225) {
5429 if (!netif_carrier_ok(adapter->netdev)) {
5430 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5431 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5432 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5433 adapter->link_check_timeout = jiffies;
5434 }
5435 }
5436
5437 return link_active;
5438}
5439
5440/**
5441 * igc_watchdog - Timer Call-back
5442 * @t: timer for the watchdog
5443 */
5444static void igc_watchdog(struct timer_list *t)
5445{
5446 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5447 /* Do the rest outside of interrupt context */
5448 schedule_work(&adapter->watchdog_task);
5449}
5450
5451static void igc_watchdog_task(struct work_struct *work)
5452{
5453 struct igc_adapter *adapter = container_of(work,
5454 struct igc_adapter,
5455 watchdog_task);
5456 struct net_device *netdev = adapter->netdev;
5457 struct igc_hw *hw = &adapter->hw;
5458 struct igc_phy_info *phy = &hw->phy;
5459 u16 phy_data, retry_count = 20;
5460 u32 link;
5461 int i;
5462
5463 link = igc_has_link(adapter);
5464
5465 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5466 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5467 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5468 else
5469 link = false;
5470 }
5471
5472 if (link) {
5473 /* Cancel scheduled suspend requests. */
5474 pm_runtime_resume(netdev->dev.parent);
5475
5476 if (!netif_carrier_ok(netdev)) {
5477 u32 ctrl;
5478
5479 hw->mac.ops.get_speed_and_duplex(hw,
5480 &adapter->link_speed,
5481 &adapter->link_duplex);
5482
5483 ctrl = rd32(IGC_CTRL);
5484 /* Link status message must follow this format */
5485 netdev_info(netdev,
5486 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5487 adapter->link_speed,
5488 adapter->link_duplex == FULL_DUPLEX ?
5489 "Full" : "Half",
5490 (ctrl & IGC_CTRL_TFCE) &&
5491 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5492 (ctrl & IGC_CTRL_RFCE) ? "RX" :
5493 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
5494
5495 /* disable EEE if enabled */
5496 if ((adapter->flags & IGC_FLAG_EEE) &&
5497 adapter->link_duplex == HALF_DUPLEX) {
5498 netdev_info(netdev,
5499 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5500 adapter->hw.dev_spec._base.eee_enable = false;
5501 adapter->flags &= ~IGC_FLAG_EEE;
5502 }
5503
5504 /* check if SmartSpeed worked */
5505 igc_check_downshift(hw);
5506 if (phy->speed_downgraded)
5507 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5508
5509 /* adjust timeout factor according to speed/duplex */
5510 adapter->tx_timeout_factor = 1;
5511 switch (adapter->link_speed) {
5512 case SPEED_10:
5513 adapter->tx_timeout_factor = 14;
5514 break;
5515 case SPEED_100:
5516 case SPEED_1000:
5517 case SPEED_2500:
5518 adapter->tx_timeout_factor = 1;
5519 break;
5520 }
5521
5522 /* Once the launch time has been set on the wire, there
5523 * is a delay before the link speed can be determined
5524 * based on link-up activity. Write into the register
5525 * as soon as we know the correct link speed.
5526 */
5527 igc_tsn_adjust_txtime_offset(adapter);
5528
5529 if (adapter->link_speed != SPEED_1000)
5530 goto no_wait;
5531
5532 /* wait for Remote receiver status OK */
5533retry_read_status:
5534 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5535 &phy_data)) {
5536 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5537 retry_count) {
5538 msleep(100);
5539 retry_count--;
5540 goto retry_read_status;
5541 } else if (!retry_count) {
5542 netdev_err(netdev, "exceed max 2 second\n");
5543 }
5544 } else {
5545 netdev_err(netdev, "read 1000Base-T Status Reg\n");
5546 }
5547no_wait:
5548 netif_carrier_on(netdev);
5549
5550 /* link state has changed, schedule phy info update */
5551 if (!test_bit(__IGC_DOWN, &adapter->state))
5552 mod_timer(&adapter->phy_info_timer,
5553 round_jiffies(jiffies + 2 * HZ));
5554 }
5555 } else {
5556 if (netif_carrier_ok(netdev)) {
5557 adapter->link_speed = 0;
5558 adapter->link_duplex = 0;
5559
5560 /* Links status message must follow this format */
5561 netdev_info(netdev, "NIC Link is Down\n");
5562 netif_carrier_off(netdev);
5563
5564 /* link state has changed, schedule phy info update */
5565 if (!test_bit(__IGC_DOWN, &adapter->state))
5566 mod_timer(&adapter->phy_info_timer,
5567 round_jiffies(jiffies + 2 * HZ));
5568
5569 /* link is down, time to check for alternate media */
5570 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
5571 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5572 schedule_work(&adapter->reset_task);
5573 /* return immediately */
5574 return;
5575 }
5576 }
5577 pm_schedule_suspend(netdev->dev.parent,
5578 MSEC_PER_SEC * 5);
5579
5580 /* also check for alternate media here */
5581 } else if (!netif_carrier_ok(netdev) &&
5582 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
5583 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5584 schedule_work(&adapter->reset_task);
5585 /* return immediately */
5586 return;
5587 }
5588 }
5589 }
5590
5591 spin_lock(&adapter->stats64_lock);
5592 igc_update_stats(adapter);
5593 spin_unlock(&adapter->stats64_lock);
5594
5595 for (i = 0; i < adapter->num_tx_queues; i++) {
5596 struct igc_ring *tx_ring = adapter->tx_ring[i];
5597
5598 if (!netif_carrier_ok(netdev)) {
5599 /* We've lost link, so the controller stops DMA,
5600 * but we've got queued Tx work that's never going
5601 * to get done, so reset controller to flush Tx.
5602 * (Do the reset outside of interrupt context).
5603 */
5604 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5605 adapter->tx_timeout_count++;
5606 schedule_work(&adapter->reset_task);
5607 /* return immediately since reset is imminent */
5608 return;
5609 }
5610 }
5611
5612 /* Force detection of hung controller every watchdog period */
5613 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5614 }
5615
5616 /* Cause software interrupt to ensure Rx ring is cleaned */
5617 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5618 u32 eics = 0;
5619
5620 for (i = 0; i < adapter->num_q_vectors; i++)
5621 eics |= adapter->q_vector[i]->eims_value;
5622 wr32(IGC_EICS, eics);
5623 } else {
5624 wr32(IGC_ICS, IGC_ICS_RXDMT0);
5625 }
5626
5627 igc_ptp_tx_hang(adapter);
5628
5629 /* Reset the timer */
5630 if (!test_bit(__IGC_DOWN, &adapter->state)) {
5631 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5632 mod_timer(&adapter->watchdog_timer,
5633 round_jiffies(jiffies + HZ));
5634 else
5635 mod_timer(&adapter->watchdog_timer,
5636 round_jiffies(jiffies + 2 * HZ));
5637 }
5638}
5639
5640/**
5641 * igc_intr_msi - Interrupt Handler
5642 * @irq: interrupt number
5643 * @data: pointer to a network interface device structure
5644 */
5645static irqreturn_t igc_intr_msi(int irq, void *data)
5646{
5647 struct igc_adapter *adapter = data;
5648 struct igc_q_vector *q_vector = adapter->q_vector[0];
5649 struct igc_hw *hw = &adapter->hw;
5650 /* read ICR disables interrupts using IAM */
5651 u32 icr = rd32(IGC_ICR);
5652
5653 igc_write_itr(q_vector);
5654
5655 if (icr & IGC_ICR_DRSTA)
5656 schedule_work(&adapter->reset_task);
5657
5658 if (icr & IGC_ICR_DOUTSYNC) {
5659 /* HW is reporting DMA is out of sync */
5660 adapter->stats.doosync++;
5661 }
5662
5663 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5664 hw->mac.get_link_status = true;
5665 if (!test_bit(__IGC_DOWN, &adapter->state))
5666 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5667 }
5668
5669 if (icr & IGC_ICR_TS)
5670 igc_tsync_interrupt(adapter);
5671
5672 napi_schedule(&q_vector->napi);
5673
5674 return IRQ_HANDLED;
5675}
5676
5677/**
5678 * igc_intr - Legacy Interrupt Handler
5679 * @irq: interrupt number
5680 * @data: pointer to a network interface device structure
5681 */
5682static irqreturn_t igc_intr(int irq, void *data)
5683{
5684 struct igc_adapter *adapter = data;
5685 struct igc_q_vector *q_vector = adapter->q_vector[0];
5686 struct igc_hw *hw = &adapter->hw;
5687 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5688 * need for the IMC write
5689 */
5690 u32 icr = rd32(IGC_ICR);
5691
5692 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5693 * not set, then the adapter didn't send an interrupt
5694 */
5695 if (!(icr & IGC_ICR_INT_ASSERTED))
5696 return IRQ_NONE;
5697
5698 igc_write_itr(q_vector);
5699
5700 if (icr & IGC_ICR_DRSTA)
5701 schedule_work(&adapter->reset_task);
5702
5703 if (icr & IGC_ICR_DOUTSYNC) {
5704 /* HW is reporting DMA is out of sync */
5705 adapter->stats.doosync++;
5706 }
5707
5708 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5709 hw->mac.get_link_status = true;
5710 /* guard against interrupt when we're going down */
5711 if (!test_bit(__IGC_DOWN, &adapter->state))
5712 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5713 }
5714
5715 if (icr & IGC_ICR_TS)
5716 igc_tsync_interrupt(adapter);
5717
5718 napi_schedule(&q_vector->napi);
5719
5720 return IRQ_HANDLED;
5721}
5722
5723static void igc_free_irq(struct igc_adapter *adapter)
5724{
5725 if (adapter->msix_entries) {
5726 int vector = 0, i;
5727
5728 free_irq(adapter->msix_entries[vector++].vector, adapter);
5729
5730 for (i = 0; i < adapter->num_q_vectors; i++)
5731 free_irq(adapter->msix_entries[vector++].vector,
5732 adapter->q_vector[i]);
5733 } else {
5734 free_irq(adapter->pdev->irq, adapter);
5735 }
5736}
5737
5738/**
5739 * igc_request_irq - initialize interrupts
5740 * @adapter: Pointer to adapter structure
5741 *
5742 * Attempts to configure interrupts using the best available
5743 * capabilities of the hardware and kernel.
5744 */
5745static int igc_request_irq(struct igc_adapter *adapter)
5746{
5747 struct net_device *netdev = adapter->netdev;
5748 struct pci_dev *pdev = adapter->pdev;
5749 int err = 0;
5750
5751 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5752 err = igc_request_msix(adapter);
5753 if (!err)
5754 goto request_done;
5755 /* fall back to MSI */
5756 igc_free_all_tx_resources(adapter);
5757 igc_free_all_rx_resources(adapter);
5758
5759 igc_clear_interrupt_scheme(adapter);
5760 err = igc_init_interrupt_scheme(adapter, false);
5761 if (err)
5762 goto request_done;
5763 igc_setup_all_tx_resources(adapter);
5764 igc_setup_all_rx_resources(adapter);
5765 igc_configure(adapter);
5766 }
5767
5768 igc_assign_vector(adapter->q_vector[0], 0);
5769
5770 if (adapter->flags & IGC_FLAG_HAS_MSI) {
5771 err = request_irq(pdev->irq, &igc_intr_msi, 0,
5772 netdev->name, adapter);
5773 if (!err)
5774 goto request_done;
5775
5776 /* fall back to legacy interrupts */
5777 igc_reset_interrupt_capability(adapter);
5778 adapter->flags &= ~IGC_FLAG_HAS_MSI;
5779 }
5780
5781 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5782 netdev->name, adapter);
5783
5784 if (err)
5785 netdev_err(netdev, "Error %d getting interrupt\n", err);
5786
5787request_done:
5788 return err;
5789}
5790
5791/**
5792 * __igc_open - Called when a network interface is made active
5793 * @netdev: network interface device structure
5794 * @resuming: boolean indicating if the device is resuming
5795 *
5796 * Returns 0 on success, negative value on failure
5797 *
5798 * The open entry point is called when a network interface is made
5799 * active by the system (IFF_UP). At this point all resources needed
5800 * for transmit and receive operations are allocated, the interrupt
5801 * handler is registered with the OS, the watchdog timer is started,
5802 * and the stack is notified that the interface is ready.
5803 */
5804static int __igc_open(struct net_device *netdev, bool resuming)
5805{
5806 struct igc_adapter *adapter = netdev_priv(netdev);
5807 struct pci_dev *pdev = adapter->pdev;
5808 struct igc_hw *hw = &adapter->hw;
5809 int err = 0;
5810 int i = 0;
5811
5812 /* disallow open during test */
5813
5814 if (test_bit(__IGC_TESTING, &adapter->state)) {
5815 WARN_ON(resuming);
5816 return -EBUSY;
5817 }
5818
5819 if (!resuming)
5820 pm_runtime_get_sync(&pdev->dev);
5821
5822 netif_carrier_off(netdev);
5823
5824 /* allocate transmit descriptors */
5825 err = igc_setup_all_tx_resources(adapter);
5826 if (err)
5827 goto err_setup_tx;
5828
5829 /* allocate receive descriptors */
5830 err = igc_setup_all_rx_resources(adapter);
5831 if (err)
5832 goto err_setup_rx;
5833
5834 igc_power_up_link(adapter);
5835
5836 igc_configure(adapter);
5837
5838 err = igc_request_irq(adapter);
5839 if (err)
5840 goto err_req_irq;
5841
5842 /* Notify the stack of the actual queue counts. */
5843 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
5844 if (err)
5845 goto err_set_queues;
5846
5847 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5848 if (err)
5849 goto err_set_queues;
5850
5851 clear_bit(__IGC_DOWN, &adapter->state);
5852
5853 for (i = 0; i < adapter->num_q_vectors; i++)
5854 napi_enable(&adapter->q_vector[i]->napi);
5855
5856 /* Clear any pending interrupts. */
5857 rd32(IGC_ICR);
5858 igc_irq_enable(adapter);
5859
5860 if (!resuming)
5861 pm_runtime_put(&pdev->dev);
5862
5863 netif_tx_start_all_queues(netdev);
5864
5865 /* start the watchdog. */
5866 hw->mac.get_link_status = true;
5867 schedule_work(&adapter->watchdog_task);
5868
5869 return IGC_SUCCESS;
5870
5871err_set_queues:
5872 igc_free_irq(adapter);
5873err_req_irq:
5874 igc_release_hw_control(adapter);
5875 igc_power_down_phy_copper_base(&adapter->hw);
5876 igc_free_all_rx_resources(adapter);
5877err_setup_rx:
5878 igc_free_all_tx_resources(adapter);
5879err_setup_tx:
5880 igc_reset(adapter);
5881 if (!resuming)
5882 pm_runtime_put(&pdev->dev);
5883
5884 return err;
5885}
5886
5887int igc_open(struct net_device *netdev)
5888{
5889 return __igc_open(netdev, false);
5890}
5891
5892/**
5893 * __igc_close - Disables a network interface
5894 * @netdev: network interface device structure
5895 * @suspending: boolean indicating the device is suspending
5896 *
5897 * Returns 0, this is not allowed to fail
5898 *
5899 * The close entry point is called when an interface is de-activated
5900 * by the OS. The hardware is still under the driver's control, but
5901 * needs to be disabled. A global MAC reset is issued to stop the
5902 * hardware, and all transmit and receive resources are freed.
5903 */
5904static int __igc_close(struct net_device *netdev, bool suspending)
5905{
5906 struct igc_adapter *adapter = netdev_priv(netdev);
5907 struct pci_dev *pdev = adapter->pdev;
5908
5909 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5910
5911 if (!suspending)
5912 pm_runtime_get_sync(&pdev->dev);
5913
5914 igc_down(adapter);
5915
5916 igc_release_hw_control(adapter);
5917
5918 igc_free_irq(adapter);
5919
5920 igc_free_all_tx_resources(adapter);
5921 igc_free_all_rx_resources(adapter);
5922
5923 if (!suspending)
5924 pm_runtime_put_sync(&pdev->dev);
5925
5926 return 0;
5927}
5928
5929int igc_close(struct net_device *netdev)
5930{
5931 if (netif_device_present(netdev) || netdev->dismantle)
5932 return __igc_close(netdev, false);
5933 return 0;
5934}
5935
5936/**
5937 * igc_ioctl - Access the hwtstamp interface
5938 * @netdev: network interface device structure
5939 * @ifr: interface request data
5940 * @cmd: ioctl command
5941 **/
5942static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5943{
5944 switch (cmd) {
5945 case SIOCGHWTSTAMP:
5946 return igc_ptp_get_ts_config(netdev, ifr);
5947 case SIOCSHWTSTAMP:
5948 return igc_ptp_set_ts_config(netdev, ifr);
5949 default:
5950 return -EOPNOTSUPP;
5951 }
5952}
5953
5954static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
5955 bool enable)
5956{
5957 struct igc_ring *ring;
5958
5959 if (queue < 0 || queue >= adapter->num_tx_queues)
5960 return -EINVAL;
5961
5962 ring = adapter->tx_ring[queue];
5963 ring->launchtime_enable = enable;
5964
5965 return 0;
5966}
5967
5968static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
5969{
5970 struct timespec64 b;
5971
5972 b = ktime_to_timespec64(base_time);
5973
5974 return timespec64_compare(now, &b) > 0;
5975}
5976
5977static bool validate_schedule(struct igc_adapter *adapter,
5978 const struct tc_taprio_qopt_offload *qopt)
5979{
5980 int queue_uses[IGC_MAX_TX_QUEUES] = { };
5981 struct timespec64 now;
5982 size_t n;
5983
5984 if (qopt->cycle_time_extension)
5985 return false;
5986
5987 igc_ptp_read(adapter, &now);
5988
5989 /* If we program the controller's BASET registers with a time
5990 * in the future, it will hold all the packets until that
5991 * time, causing a lot of TX Hangs, so to avoid that, we
5992 * reject schedules that would start in the future.
5993 */
5994 if (!is_base_time_past(qopt->base_time, &now))
5995 return false;
5996
5997 for (n = 0; n < qopt->num_entries; n++) {
5998 const struct tc_taprio_sched_entry *e, *prev;
5999 int i;
6000
6001 prev = n ? &qopt->entries[n - 1] : NULL;
6002 e = &qopt->entries[n];
6003
6004 /* i225 only supports "global" frame preemption
6005 * settings.
6006 */
6007 if (e->command != TC_TAPRIO_CMD_SET_GATES)
6008 return false;
6009
6010 for (i = 0; i < adapter->num_tx_queues; i++) {
6011 if (e->gate_mask & BIT(i))
6012 queue_uses[i]++;
6013
6014 /* There are limitations: A single queue cannot be
6015 * opened and closed multiple times per cycle unless the
6016 * gate stays open. Check for it.
6017 */
6018 if (queue_uses[i] > 1 &&
6019 !(prev->gate_mask & BIT(i)))
6020 return false;
6021 }
6022 }
6023
6024 return true;
6025}
6026
6027static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
6028 struct tc_etf_qopt_offload *qopt)
6029{
6030 struct igc_hw *hw = &adapter->hw;
6031 int err;
6032
6033 if (hw->mac.type != igc_i225)
6034 return -EOPNOTSUPP;
6035
6036 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
6037 if (err)
6038 return err;
6039
6040 return igc_tsn_offload_apply(adapter);
6041}
6042
6043static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
6044{
6045 int i;
6046
6047 adapter->base_time = 0;
6048 adapter->cycle_time = NSEC_PER_SEC;
6049
6050 for (i = 0; i < adapter->num_tx_queues; i++) {
6051 struct igc_ring *ring = adapter->tx_ring[i];
6052
6053 ring->start_time = 0;
6054 ring->end_time = NSEC_PER_SEC;
6055 }
6056
6057 return 0;
6058}
6059
6060static int igc_save_qbv_schedule(struct igc_adapter *adapter,
6061 struct tc_taprio_qopt_offload *qopt)
6062{
6063 bool queue_configured[IGC_MAX_TX_QUEUES] = { };
6064 u32 start_time = 0, end_time = 0;
6065 size_t n;
6066 int i;
6067
6068 adapter->qbv_enable = qopt->enable;
6069
6070 if (!qopt->enable)
6071 return igc_tsn_clear_schedule(adapter);
6072
6073 if (qopt->base_time < 0)
6074 return -ERANGE;
6075
6076 if (adapter->base_time)
6077 return -EALREADY;
6078
6079 if (!validate_schedule(adapter, qopt))
6080 return -EINVAL;
6081
6082 adapter->cycle_time = qopt->cycle_time;
6083 adapter->base_time = qopt->base_time;
6084
6085 for (n = 0; n < qopt->num_entries; n++) {
6086 struct tc_taprio_sched_entry *e = &qopt->entries[n];
6087
6088 end_time += e->interval;
6089
6090 /* If any of the conditions below are true, we need to manually
6091 * control the end time of the cycle.
6092 * 1. Qbv users can specify a cycle time that is not equal
6093 * to the total GCL intervals. Hence, recalculation is
6094 * necessary here to exclude the time interval that
6095 * exceeds the cycle time.
6096 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6097 * once the end of the list is reached, it will switch
6098 * to the END_OF_CYCLE state and leave the gates in the
6099 * same state until the next cycle is started.
6100 */
6101 if (end_time > adapter->cycle_time ||
6102 n + 1 == qopt->num_entries)
6103 end_time = adapter->cycle_time;
6104
6105 for (i = 0; i < adapter->num_tx_queues; i++) {
6106 struct igc_ring *ring = adapter->tx_ring[i];
6107
6108 if (!(e->gate_mask & BIT(i)))
6109 continue;
6110
6111 /* Check whether a queue stays open for more than one
6112 * entry. If so, keep the start and advance the end
6113 * time.
6114 */
6115 if (!queue_configured[i])
6116 ring->start_time = start_time;
6117 ring->end_time = end_time;
6118
6119 queue_configured[i] = true;
6120 }
6121
6122 start_time += e->interval;
6123 }
6124
6125 /* Check whether a queue gets configured.
6126 * If not, set the start and end time to be end time.
6127 */
6128 for (i = 0; i < adapter->num_tx_queues; i++) {
6129 if (!queue_configured[i]) {
6130 struct igc_ring *ring = adapter->tx_ring[i];
6131
6132 ring->start_time = end_time;
6133 ring->end_time = end_time;
6134 }
6135 }
6136
6137 return 0;
6138}
6139
6140static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
6141 struct tc_taprio_qopt_offload *qopt)
6142{
6143 struct igc_hw *hw = &adapter->hw;
6144 int err;
6145
6146 if (hw->mac.type != igc_i225)
6147 return -EOPNOTSUPP;
6148
6149 err = igc_save_qbv_schedule(adapter, qopt);
6150 if (err)
6151 return err;
6152
6153 return igc_tsn_offload_apply(adapter);
6154}
6155
6156static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
6157 bool enable, int idleslope, int sendslope,
6158 int hicredit, int locredit)
6159{
6160 bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
6161 struct net_device *netdev = adapter->netdev;
6162 struct igc_ring *ring;
6163 int i;
6164
6165 /* i225 has two sets of credit-based shaper logic.
6166 * Supporting it only on the top two priority queues
6167 */
6168 if (queue < 0 || queue > 1)
6169 return -EINVAL;
6170
6171 ring = adapter->tx_ring[queue];
6172
6173 for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
6174 if (adapter->tx_ring[i])
6175 cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6176
6177 /* CBS should be enabled on the highest priority queue first in order
6178 * for the CBS algorithm to operate as intended.
6179 */
6180 if (enable) {
6181 if (queue == 1 && !cbs_status[0]) {
6182 netdev_err(netdev,
6183 "Enabling CBS on queue1 before queue0\n");
6184 return -EINVAL;
6185 }
6186 } else {
6187 if (queue == 0 && cbs_status[1]) {
6188 netdev_err(netdev,
6189 "Disabling CBS on queue0 before queue1\n");
6190 return -EINVAL;
6191 }
6192 }
6193
6194 ring->cbs_enable = enable;
6195 ring->idleslope = idleslope;
6196 ring->sendslope = sendslope;
6197 ring->hicredit = hicredit;
6198 ring->locredit = locredit;
6199
6200 return 0;
6201}
6202
6203static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
6204 struct tc_cbs_qopt_offload *qopt)
6205{
6206 struct igc_hw *hw = &adapter->hw;
6207 int err;
6208
6209 if (hw->mac.type != igc_i225)
6210 return -EOPNOTSUPP;
6211
6212 if (qopt->queue < 0 || qopt->queue > 1)
6213 return -EINVAL;
6214
6215 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
6216 qopt->idleslope, qopt->sendslope,
6217 qopt->hicredit, qopt->locredit);
6218 if (err)
6219 return err;
6220
6221 return igc_tsn_offload_apply(adapter);
6222}
6223
6224static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
6225 void *type_data)
6226{
6227 struct igc_adapter *adapter = netdev_priv(dev);
6228
6229 switch (type) {
6230 case TC_SETUP_QDISC_TAPRIO:
6231 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
6232
6233 case TC_SETUP_QDISC_ETF:
6234 return igc_tsn_enable_launchtime(adapter, type_data);
6235
6236 case TC_SETUP_QDISC_CBS:
6237 return igc_tsn_enable_cbs(adapter, type_data);
6238
6239 default:
6240 return -EOPNOTSUPP;
6241 }
6242}
6243
6244static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6245{
6246 struct igc_adapter *adapter = netdev_priv(dev);
6247
6248 switch (bpf->command) {
6249 case XDP_SETUP_PROG:
6250 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
6251 case XDP_SETUP_XSK_POOL:
6252 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
6253 bpf->xsk.queue_id);
6254 default:
6255 return -EOPNOTSUPP;
6256 }
6257}
6258
6259static int igc_xdp_xmit(struct net_device *dev, int num_frames,
6260 struct xdp_frame **frames, u32 flags)
6261{
6262 struct igc_adapter *adapter = netdev_priv(dev);
6263 int cpu = smp_processor_id();
6264 struct netdev_queue *nq;
6265 struct igc_ring *ring;
6266 int i, drops;
6267
6268 if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
6269 return -ENETDOWN;
6270
6271 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6272 return -EINVAL;
6273
6274 ring = igc_xdp_get_tx_ring(adapter, cpu);
6275 nq = txring_txq(ring);
6276
6277 __netif_tx_lock(nq, cpu);
6278
6279 drops = 0;
6280 for (i = 0; i < num_frames; i++) {
6281 int err;
6282 struct xdp_frame *xdpf = frames[i];
6283
6284 err = igc_xdp_init_tx_descriptor(ring, xdpf);
6285 if (err) {
6286 xdp_return_frame_rx_napi(xdpf);
6287 drops++;
6288 }
6289 }
6290
6291 if (flags & XDP_XMIT_FLUSH)
6292 igc_flush_tx_descriptors(ring);
6293
6294 __netif_tx_unlock(nq);
6295
6296 return num_frames - drops;
6297}
6298
6299static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
6300 struct igc_q_vector *q_vector)
6301{
6302 struct igc_hw *hw = &adapter->hw;
6303 u32 eics = 0;
6304
6305 eics |= q_vector->eims_value;
6306 wr32(IGC_EICS, eics);
6307}
6308
6309int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
6310{
6311 struct igc_adapter *adapter = netdev_priv(dev);
6312 struct igc_q_vector *q_vector;
6313 struct igc_ring *ring;
6314
6315 if (test_bit(__IGC_DOWN, &adapter->state))
6316 return -ENETDOWN;
6317
6318 if (!igc_xdp_is_enabled(adapter))
6319 return -ENXIO;
6320
6321 if (queue_id >= adapter->num_rx_queues)
6322 return -EINVAL;
6323
6324 ring = adapter->rx_ring[queue_id];
6325
6326 if (!ring->xsk_pool)
6327 return -ENXIO;
6328
6329 q_vector = adapter->q_vector[queue_id];
6330 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6331 igc_trigger_rxtxq_interrupt(adapter, q_vector);
6332
6333 return 0;
6334}
6335
6336static const struct net_device_ops igc_netdev_ops = {
6337 .ndo_open = igc_open,
6338 .ndo_stop = igc_close,
6339 .ndo_start_xmit = igc_xmit_frame,
6340 .ndo_set_rx_mode = igc_set_rx_mode,
6341 .ndo_set_mac_address = igc_set_mac,
6342 .ndo_change_mtu = igc_change_mtu,
6343 .ndo_tx_timeout = igc_tx_timeout,
6344 .ndo_get_stats64 = igc_get_stats64,
6345 .ndo_fix_features = igc_fix_features,
6346 .ndo_set_features = igc_set_features,
6347 .ndo_features_check = igc_features_check,
6348 .ndo_eth_ioctl = igc_ioctl,
6349 .ndo_setup_tc = igc_setup_tc,
6350 .ndo_bpf = igc_bpf,
6351 .ndo_xdp_xmit = igc_xdp_xmit,
6352 .ndo_xsk_wakeup = igc_xsk_wakeup,
6353};
6354
6355/* PCIe configuration access */
6356void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6357{
6358 struct igc_adapter *adapter = hw->back;
6359
6360 pci_read_config_word(adapter->pdev, reg, value);
6361}
6362
6363void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6364{
6365 struct igc_adapter *adapter = hw->back;
6366
6367 pci_write_config_word(adapter->pdev, reg, *value);
6368}
6369
6370s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6371{
6372 struct igc_adapter *adapter = hw->back;
6373
6374 if (!pci_is_pcie(adapter->pdev))
6375 return -IGC_ERR_CONFIG;
6376
6377 pcie_capability_read_word(adapter->pdev, reg, value);
6378
6379 return IGC_SUCCESS;
6380}
6381
6382s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6383{
6384 struct igc_adapter *adapter = hw->back;
6385
6386 if (!pci_is_pcie(adapter->pdev))
6387 return -IGC_ERR_CONFIG;
6388
6389 pcie_capability_write_word(adapter->pdev, reg, *value);
6390
6391 return IGC_SUCCESS;
6392}
6393
6394u32 igc_rd32(struct igc_hw *hw, u32 reg)
6395{
6396 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
6397 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6398 u32 value = 0;
6399
6400 if (IGC_REMOVED(hw_addr))
6401 return ~value;
6402
6403 value = readl(&hw_addr[reg]);
6404
6405 /* reads should not return all F's */
6406 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6407 struct net_device *netdev = igc->netdev;
6408
6409 hw->hw_addr = NULL;
6410 netif_device_detach(netdev);
6411 netdev_err(netdev, "PCIe link lost, device now detached\n");
6412 WARN(pci_device_is_present(igc->pdev),
6413 "igc: Failed to read reg 0x%x!\n", reg);
6414 }
6415
6416 return value;
6417}
6418
6419/**
6420 * igc_probe - Device Initialization Routine
6421 * @pdev: PCI device information struct
6422 * @ent: entry in igc_pci_tbl
6423 *
6424 * Returns 0 on success, negative on failure
6425 *
6426 * igc_probe initializes an adapter identified by a pci_dev structure.
6427 * The OS initialization, configuring the adapter private structure,
6428 * and a hardware reset occur.
6429 */
6430static int igc_probe(struct pci_dev *pdev,
6431 const struct pci_device_id *ent)
6432{
6433 struct igc_adapter *adapter;
6434 struct net_device *netdev;
6435 struct igc_hw *hw;
6436 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
6437 int err;
6438
6439 err = pci_enable_device_mem(pdev);
6440 if (err)
6441 return err;
6442
6443 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6444 if (err) {
6445 dev_err(&pdev->dev,
6446 "No usable DMA configuration, aborting\n");
6447 goto err_dma;
6448 }
6449
6450 err = pci_request_mem_regions(pdev, igc_driver_name);
6451 if (err)
6452 goto err_pci_reg;
6453
6454 pci_enable_pcie_error_reporting(pdev);
6455
6456 err = pci_enable_ptm(pdev, NULL);
6457 if (err < 0)
6458 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
6459
6460 pci_set_master(pdev);
6461
6462 err = -ENOMEM;
6463 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6464 IGC_MAX_TX_QUEUES);
6465
6466 if (!netdev)
6467 goto err_alloc_etherdev;
6468
6469 SET_NETDEV_DEV(netdev, &pdev->dev);
6470
6471 pci_set_drvdata(pdev, netdev);
6472 adapter = netdev_priv(netdev);
6473 adapter->netdev = netdev;
6474 adapter->pdev = pdev;
6475 hw = &adapter->hw;
6476 hw->back = adapter;
6477 adapter->port_num = hw->bus.func;
6478 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6479
6480 err = pci_save_state(pdev);
6481 if (err)
6482 goto err_ioremap;
6483
6484 err = -EIO;
6485 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6486 pci_resource_len(pdev, 0));
6487 if (!adapter->io_addr)
6488 goto err_ioremap;
6489
6490 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6491 hw->hw_addr = adapter->io_addr;
6492
6493 netdev->netdev_ops = &igc_netdev_ops;
6494 igc_ethtool_set_ops(netdev);
6495 netdev->watchdog_timeo = 5 * HZ;
6496
6497 netdev->mem_start = pci_resource_start(pdev, 0);
6498 netdev->mem_end = pci_resource_end(pdev, 0);
6499
6500 /* PCI config space info */
6501 hw->vendor_id = pdev->vendor;
6502 hw->device_id = pdev->device;
6503 hw->revision_id = pdev->revision;
6504 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6505 hw->subsystem_device_id = pdev->subsystem_device;
6506
6507 /* Copy the default MAC and PHY function pointers */
6508 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6509 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6510
6511 /* Initialize skew-specific constants */
6512 err = ei->get_invariants(hw);
6513 if (err)
6514 goto err_sw_init;
6515
6516 /* Add supported features to the features list*/
6517 netdev->features |= NETIF_F_SG;
6518 netdev->features |= NETIF_F_TSO;
6519 netdev->features |= NETIF_F_TSO6;
6520 netdev->features |= NETIF_F_TSO_ECN;
6521 netdev->features |= NETIF_F_RXCSUM;
6522 netdev->features |= NETIF_F_HW_CSUM;
6523 netdev->features |= NETIF_F_SCTP_CRC;
6524 netdev->features |= NETIF_F_HW_TC;
6525
6526#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
6527 NETIF_F_GSO_GRE_CSUM | \
6528 NETIF_F_GSO_IPXIP4 | \
6529 NETIF_F_GSO_IPXIP6 | \
6530 NETIF_F_GSO_UDP_TUNNEL | \
6531 NETIF_F_GSO_UDP_TUNNEL_CSUM)
6532
6533 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
6534 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
6535
6536 /* setup the private structure */
6537 err = igc_sw_init(adapter);
6538 if (err)
6539 goto err_sw_init;
6540
6541 /* copy netdev features into list of user selectable features */
6542 netdev->hw_features |= NETIF_F_NTUPLE;
6543 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
6544 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
6545 netdev->hw_features |= netdev->features;
6546
6547 netdev->features |= NETIF_F_HIGHDMA;
6548
6549 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
6550 netdev->mpls_features |= NETIF_F_HW_CSUM;
6551 netdev->hw_enc_features |= netdev->vlan_features;
6552
6553 /* MTU range: 68 - 9216 */
6554 netdev->min_mtu = ETH_MIN_MTU;
6555 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6556
6557 /* before reading the NVM, reset the controller to put the device in a
6558 * known good starting state
6559 */
6560 hw->mac.ops.reset_hw(hw);
6561
6562 if (igc_get_flash_presence_i225(hw)) {
6563 if (hw->nvm.ops.validate(hw) < 0) {
6564 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6565 err = -EIO;
6566 goto err_eeprom;
6567 }
6568 }
6569
6570 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
6571 /* copy the MAC address out of the NVM */
6572 if (hw->mac.ops.read_mac_addr(hw))
6573 dev_err(&pdev->dev, "NVM Read Error\n");
6574 }
6575
6576 eth_hw_addr_set(netdev, hw->mac.addr);
6577
6578 if (!is_valid_ether_addr(netdev->dev_addr)) {
6579 dev_err(&pdev->dev, "Invalid MAC Address\n");
6580 err = -EIO;
6581 goto err_eeprom;
6582 }
6583
6584 /* configure RXPBSIZE and TXPBSIZE */
6585 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
6586 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6587
6588 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
6589 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
6590
6591 INIT_WORK(&adapter->reset_task, igc_reset_task);
6592 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
6593
6594 /* Initialize link properties that are user-changeable */
6595 adapter->fc_autoneg = true;
6596 hw->mac.autoneg = true;
6597 hw->phy.autoneg_advertised = 0xaf;
6598
6599 hw->fc.requested_mode = igc_fc_default;
6600 hw->fc.current_mode = igc_fc_default;
6601
6602 /* By default, support wake on port A */
6603 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6604
6605 /* initialize the wol settings based on the eeprom settings */
6606 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6607 adapter->wol |= IGC_WUFC_MAG;
6608
6609 device_set_wakeup_enable(&adapter->pdev->dev,
6610 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6611
6612 igc_ptp_init(adapter);
6613
6614 igc_tsn_clear_schedule(adapter);
6615
6616 /* reset the hardware with the new settings */
6617 igc_reset(adapter);
6618
6619 /* let the f/w know that the h/w is now under the control of the
6620 * driver.
6621 */
6622 igc_get_hw_control(adapter);
6623
6624 strncpy(netdev->name, "eth%d", IFNAMSIZ);
6625 err = register_netdev(netdev);
6626 if (err)
6627 goto err_register;
6628
6629 /* carrier off reporting is important to ethtool even BEFORE open */
6630 netif_carrier_off(netdev);
6631
6632 /* Check if Media Autosense is enabled */
6633 adapter->ei = *ei;
6634
6635 /* print pcie link status and MAC address */
6636 pcie_print_link_status(pdev);
6637 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6638
6639 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6640 /* Disable EEE for internal PHY devices */
6641 hw->dev_spec._base.eee_enable = false;
6642 adapter->flags &= ~IGC_FLAG_EEE;
6643 igc_set_eee_i225(hw, false, false, false);
6644
6645 pm_runtime_put_noidle(&pdev->dev);
6646
6647 return 0;
6648
6649err_register:
6650 igc_release_hw_control(adapter);
6651err_eeprom:
6652 if (!igc_check_reset_block(hw))
6653 igc_reset_phy(hw);
6654err_sw_init:
6655 igc_clear_interrupt_scheme(adapter);
6656 iounmap(adapter->io_addr);
6657err_ioremap:
6658 free_netdev(netdev);
6659err_alloc_etherdev:
6660 pci_disable_pcie_error_reporting(pdev);
6661 pci_release_mem_regions(pdev);
6662err_pci_reg:
6663err_dma:
6664 pci_disable_device(pdev);
6665 return err;
6666}
6667
6668/**
6669 * igc_remove - Device Removal Routine
6670 * @pdev: PCI device information struct
6671 *
6672 * igc_remove is called by the PCI subsystem to alert the driver
6673 * that it should release a PCI device. This could be caused by a
6674 * Hot-Plug event, or because the driver is going to be removed from
6675 * memory.
6676 */
6677static void igc_remove(struct pci_dev *pdev)
6678{
6679 struct net_device *netdev = pci_get_drvdata(pdev);
6680 struct igc_adapter *adapter = netdev_priv(netdev);
6681
6682 pm_runtime_get_noresume(&pdev->dev);
6683
6684 igc_flush_nfc_rules(adapter);
6685
6686 igc_ptp_stop(adapter);
6687
6688 set_bit(__IGC_DOWN, &adapter->state);
6689
6690 del_timer_sync(&adapter->watchdog_timer);
6691 del_timer_sync(&adapter->phy_info_timer);
6692
6693 cancel_work_sync(&adapter->reset_task);
6694 cancel_work_sync(&adapter->watchdog_task);
6695
6696 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6697 * would have already happened in close and is redundant.
6698 */
6699 igc_release_hw_control(adapter);
6700 unregister_netdev(netdev);
6701
6702 igc_clear_interrupt_scheme(adapter);
6703 pci_iounmap(pdev, adapter->io_addr);
6704 pci_release_mem_regions(pdev);
6705
6706 free_netdev(netdev);
6707
6708 pci_disable_pcie_error_reporting(pdev);
6709
6710 pci_disable_device(pdev);
6711}
6712
6713static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
6714 bool runtime)
6715{
6716 struct net_device *netdev = pci_get_drvdata(pdev);
6717 struct igc_adapter *adapter = netdev_priv(netdev);
6718 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
6719 struct igc_hw *hw = &adapter->hw;
6720 u32 ctrl, rctl, status;
6721 bool wake;
6722
6723 rtnl_lock();
6724 netif_device_detach(netdev);
6725
6726 if (netif_running(netdev))
6727 __igc_close(netdev, true);
6728
6729 igc_ptp_suspend(adapter);
6730
6731 igc_clear_interrupt_scheme(adapter);
6732 rtnl_unlock();
6733
6734 status = rd32(IGC_STATUS);
6735 if (status & IGC_STATUS_LU)
6736 wufc &= ~IGC_WUFC_LNKC;
6737
6738 if (wufc) {
6739 igc_setup_rctl(adapter);
6740 igc_set_rx_mode(netdev);
6741
6742 /* turn on all-multi mode if wake on multicast is enabled */
6743 if (wufc & IGC_WUFC_MC) {
6744 rctl = rd32(IGC_RCTL);
6745 rctl |= IGC_RCTL_MPE;
6746 wr32(IGC_RCTL, rctl);
6747 }
6748
6749 ctrl = rd32(IGC_CTRL);
6750 ctrl |= IGC_CTRL_ADVD3WUC;
6751 wr32(IGC_CTRL, ctrl);
6752
6753 /* Allow time for pending master requests to run */
6754 igc_disable_pcie_master(hw);
6755
6756 wr32(IGC_WUC, IGC_WUC_PME_EN);
6757 wr32(IGC_WUFC, wufc);
6758 } else {
6759 wr32(IGC_WUC, 0);
6760 wr32(IGC_WUFC, 0);
6761 }
6762
6763 wake = wufc || adapter->en_mng_pt;
6764 if (!wake)
6765 igc_power_down_phy_copper_base(&adapter->hw);
6766 else
6767 igc_power_up_link(adapter);
6768
6769 if (enable_wake)
6770 *enable_wake = wake;
6771
6772 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6773 * would have already happened in close and is redundant.
6774 */
6775 igc_release_hw_control(adapter);
6776
6777 pci_disable_device(pdev);
6778
6779 return 0;
6780}
6781
6782#ifdef CONFIG_PM
6783static int __maybe_unused igc_runtime_suspend(struct device *dev)
6784{
6785 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
6786}
6787
6788static void igc_deliver_wake_packet(struct net_device *netdev)
6789{
6790 struct igc_adapter *adapter = netdev_priv(netdev);
6791 struct igc_hw *hw = &adapter->hw;
6792 struct sk_buff *skb;
6793 u32 wupl;
6794
6795 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
6796
6797 /* WUPM stores only the first 128 bytes of the wake packet.
6798 * Read the packet only if we have the whole thing.
6799 */
6800 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
6801 return;
6802
6803 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
6804 if (!skb)
6805 return;
6806
6807 skb_put(skb, wupl);
6808
6809 /* Ensure reads are 32-bit aligned */
6810 wupl = roundup(wupl, 4);
6811
6812 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
6813
6814 skb->protocol = eth_type_trans(skb, netdev);
6815 netif_rx(skb);
6816}
6817
6818static int __maybe_unused igc_resume(struct device *dev)
6819{
6820 struct pci_dev *pdev = to_pci_dev(dev);
6821 struct net_device *netdev = pci_get_drvdata(pdev);
6822 struct igc_adapter *adapter = netdev_priv(netdev);
6823 struct igc_hw *hw = &adapter->hw;
6824 u32 err, val;
6825
6826 pci_set_power_state(pdev, PCI_D0);
6827 pci_restore_state(pdev);
6828 pci_save_state(pdev);
6829
6830 if (!pci_device_is_present(pdev))
6831 return -ENODEV;
6832 err = pci_enable_device_mem(pdev);
6833 if (err) {
6834 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
6835 return err;
6836 }
6837 pci_set_master(pdev);
6838
6839 pci_enable_wake(pdev, PCI_D3hot, 0);
6840 pci_enable_wake(pdev, PCI_D3cold, 0);
6841
6842 if (igc_init_interrupt_scheme(adapter, true)) {
6843 netdev_err(netdev, "Unable to allocate memory for queues\n");
6844 return -ENOMEM;
6845 }
6846
6847 igc_reset(adapter);
6848
6849 /* let the f/w know that the h/w is now under the control of the
6850 * driver.
6851 */
6852 igc_get_hw_control(adapter);
6853
6854 val = rd32(IGC_WUS);
6855 if (val & WAKE_PKT_WUS)
6856 igc_deliver_wake_packet(netdev);
6857
6858 wr32(IGC_WUS, ~0);
6859
6860 rtnl_lock();
6861 if (!err && netif_running(netdev))
6862 err = __igc_open(netdev, true);
6863
6864 if (!err)
6865 netif_device_attach(netdev);
6866 rtnl_unlock();
6867
6868 return err;
6869}
6870
6871static int __maybe_unused igc_runtime_resume(struct device *dev)
6872{
6873 return igc_resume(dev);
6874}
6875
6876static int __maybe_unused igc_suspend(struct device *dev)
6877{
6878 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
6879}
6880
6881static int __maybe_unused igc_runtime_idle(struct device *dev)
6882{
6883 struct net_device *netdev = dev_get_drvdata(dev);
6884 struct igc_adapter *adapter = netdev_priv(netdev);
6885
6886 if (!igc_has_link(adapter))
6887 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6888
6889 return -EBUSY;
6890}
6891#endif /* CONFIG_PM */
6892
6893static void igc_shutdown(struct pci_dev *pdev)
6894{
6895 bool wake;
6896
6897 __igc_shutdown(pdev, &wake, 0);
6898
6899 if (system_state == SYSTEM_POWER_OFF) {
6900 pci_wake_from_d3(pdev, wake);
6901 pci_set_power_state(pdev, PCI_D3hot);
6902 }
6903}
6904
6905/**
6906 * igc_io_error_detected - called when PCI error is detected
6907 * @pdev: Pointer to PCI device
6908 * @state: The current PCI connection state
6909 *
6910 * This function is called after a PCI bus error affecting
6911 * this device has been detected.
6912 **/
6913static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
6914 pci_channel_state_t state)
6915{
6916 struct net_device *netdev = pci_get_drvdata(pdev);
6917 struct igc_adapter *adapter = netdev_priv(netdev);
6918
6919 netif_device_detach(netdev);
6920
6921 if (state == pci_channel_io_perm_failure)
6922 return PCI_ERS_RESULT_DISCONNECT;
6923
6924 if (netif_running(netdev))
6925 igc_down(adapter);
6926 pci_disable_device(pdev);
6927
6928 /* Request a slot reset. */
6929 return PCI_ERS_RESULT_NEED_RESET;
6930}
6931
6932/**
6933 * igc_io_slot_reset - called after the PCI bus has been reset.
6934 * @pdev: Pointer to PCI device
6935 *
6936 * Restart the card from scratch, as if from a cold-boot. Implementation
6937 * resembles the first-half of the igc_resume routine.
6938 **/
6939static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
6940{
6941 struct net_device *netdev = pci_get_drvdata(pdev);
6942 struct igc_adapter *adapter = netdev_priv(netdev);
6943 struct igc_hw *hw = &adapter->hw;
6944 pci_ers_result_t result;
6945
6946 if (pci_enable_device_mem(pdev)) {
6947 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
6948 result = PCI_ERS_RESULT_DISCONNECT;
6949 } else {
6950 pci_set_master(pdev);
6951 pci_restore_state(pdev);
6952 pci_save_state(pdev);
6953
6954 pci_enable_wake(pdev, PCI_D3hot, 0);
6955 pci_enable_wake(pdev, PCI_D3cold, 0);
6956
6957 /* In case of PCI error, adapter loses its HW address
6958 * so we should re-assign it here.
6959 */
6960 hw->hw_addr = adapter->io_addr;
6961
6962 igc_reset(adapter);
6963 wr32(IGC_WUS, ~0);
6964 result = PCI_ERS_RESULT_RECOVERED;
6965 }
6966
6967 return result;
6968}
6969
6970/**
6971 * igc_io_resume - called when traffic can start to flow again.
6972 * @pdev: Pointer to PCI device
6973 *
6974 * This callback is called when the error recovery driver tells us that
6975 * its OK to resume normal operation. Implementation resembles the
6976 * second-half of the igc_resume routine.
6977 */
6978static void igc_io_resume(struct pci_dev *pdev)
6979{
6980 struct net_device *netdev = pci_get_drvdata(pdev);
6981 struct igc_adapter *adapter = netdev_priv(netdev);
6982
6983 rtnl_lock();
6984 if (netif_running(netdev)) {
6985 if (igc_open(netdev)) {
6986 netdev_err(netdev, "igc_open failed after reset\n");
6987 return;
6988 }
6989 }
6990
6991 netif_device_attach(netdev);
6992
6993 /* let the f/w know that the h/w is now under the control of the
6994 * driver.
6995 */
6996 igc_get_hw_control(adapter);
6997 rtnl_unlock();
6998}
6999
7000static const struct pci_error_handlers igc_err_handler = {
7001 .error_detected = igc_io_error_detected,
7002 .slot_reset = igc_io_slot_reset,
7003 .resume = igc_io_resume,
7004};
7005
7006#ifdef CONFIG_PM
7007static const struct dev_pm_ops igc_pm_ops = {
7008 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
7009 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
7010 igc_runtime_idle)
7011};
7012#endif
7013
7014static struct pci_driver igc_driver = {
7015 .name = igc_driver_name,
7016 .id_table = igc_pci_tbl,
7017 .probe = igc_probe,
7018 .remove = igc_remove,
7019#ifdef CONFIG_PM
7020 .driver.pm = &igc_pm_ops,
7021#endif
7022 .shutdown = igc_shutdown,
7023 .err_handler = &igc_err_handler,
7024};
7025
7026/**
7027 * igc_reinit_queues - return error
7028 * @adapter: pointer to adapter structure
7029 */
7030int igc_reinit_queues(struct igc_adapter *adapter)
7031{
7032 struct net_device *netdev = adapter->netdev;
7033 int err = 0;
7034
7035 if (netif_running(netdev))
7036 igc_close(netdev);
7037
7038 igc_reset_interrupt_capability(adapter);
7039
7040 if (igc_init_interrupt_scheme(adapter, true)) {
7041 netdev_err(netdev, "Unable to allocate memory for queues\n");
7042 return -ENOMEM;
7043 }
7044
7045 if (netif_running(netdev))
7046 err = igc_open(netdev);
7047
7048 return err;
7049}
7050
7051/**
7052 * igc_get_hw_dev - return device
7053 * @hw: pointer to hardware structure
7054 *
7055 * used by hardware layer to print debugging information
7056 */
7057struct net_device *igc_get_hw_dev(struct igc_hw *hw)
7058{
7059 struct igc_adapter *adapter = hw->back;
7060
7061 return adapter->netdev;
7062}
7063
7064static void igc_disable_rx_ring_hw(struct igc_ring *ring)
7065{
7066 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7067 u8 idx = ring->reg_idx;
7068 u32 rxdctl;
7069
7070 rxdctl = rd32(IGC_RXDCTL(idx));
7071 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
7072 rxdctl |= IGC_RXDCTL_SWFLUSH;
7073 wr32(IGC_RXDCTL(idx), rxdctl);
7074}
7075
7076void igc_disable_rx_ring(struct igc_ring *ring)
7077{
7078 igc_disable_rx_ring_hw(ring);
7079 igc_clean_rx_ring(ring);
7080}
7081
7082void igc_enable_rx_ring(struct igc_ring *ring)
7083{
7084 struct igc_adapter *adapter = ring->q_vector->adapter;
7085
7086 igc_configure_rx_ring(adapter, ring);
7087
7088 if (ring->xsk_pool)
7089 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
7090 else
7091 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
7092}
7093
7094static void igc_disable_tx_ring_hw(struct igc_ring *ring)
7095{
7096 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7097 u8 idx = ring->reg_idx;
7098 u32 txdctl;
7099
7100 txdctl = rd32(IGC_TXDCTL(idx));
7101 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
7102 txdctl |= IGC_TXDCTL_SWFLUSH;
7103 wr32(IGC_TXDCTL(idx), txdctl);
7104}
7105
7106void igc_disable_tx_ring(struct igc_ring *ring)
7107{
7108 igc_disable_tx_ring_hw(ring);
7109 igc_clean_tx_ring(ring);
7110}
7111
7112void igc_enable_tx_ring(struct igc_ring *ring)
7113{
7114 struct igc_adapter *adapter = ring->q_vector->adapter;
7115
7116 igc_configure_tx_ring(adapter, ring);
7117}
7118
7119/**
7120 * igc_init_module - Driver Registration Routine
7121 *
7122 * igc_init_module is the first routine called when the driver is
7123 * loaded. All it does is register with the PCI subsystem.
7124 */
7125static int __init igc_init_module(void)
7126{
7127 int ret;
7128
7129 pr_info("%s\n", igc_driver_string);
7130 pr_info("%s\n", igc_copyright);
7131
7132 ret = pci_register_driver(&igc_driver);
7133 return ret;
7134}
7135
7136module_init(igc_init_module);
7137
7138/**
7139 * igc_exit_module - Driver Exit Cleanup Routine
7140 *
7141 * igc_exit_module is called just before the driver is removed
7142 * from memory.
7143 */
7144static void __exit igc_exit_module(void)
7145{
7146 pci_unregister_driver(&igc_driver);
7147}
7148
7149module_exit(igc_exit_module);
7150/* igc_main.c */