Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4#ifndef _IXGBE_H_
5#define _IXGBE_H_
6
7#include <linux/bitops.h>
8#include <linux/types.h>
9#include <linux/pci.h>
10#include <linux/netdevice.h>
11#include <linux/cpumask.h>
12#include <linux/aer.h>
13#include <linux/if_vlan.h>
14#include <linux/jiffies.h>
15#include <linux/phy.h>
16
17#include <linux/timecounter.h>
18#include <linux/net_tstamp.h>
19#include <linux/ptp_clock_kernel.h>
20
21#include "ixgbe_type.h"
22#include "ixgbe_common.h"
23#include "ixgbe_dcb.h"
24#if IS_ENABLED(CONFIG_FCOE)
25#define IXGBE_FCOE
26#include "ixgbe_fcoe.h"
27#endif /* IS_ENABLED(CONFIG_FCOE) */
28#ifdef CONFIG_IXGBE_DCA
29#include <linux/dca.h>
30#endif
31#include "ixgbe_ipsec.h"
32
33#include <net/xdp.h>
34
35/* common prefix used by pr_<> macros */
36#undef pr_fmt
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39/* TX/RX descriptor defines */
40#define IXGBE_DEFAULT_TXD 512
41#define IXGBE_DEFAULT_TX_WORK 256
42#define IXGBE_MAX_TXD 4096
43#define IXGBE_MIN_TXD 64
44
45#if (PAGE_SIZE < 8192)
46#define IXGBE_DEFAULT_RXD 512
47#else
48#define IXGBE_DEFAULT_RXD 128
49#endif
50#define IXGBE_MAX_RXD 4096
51#define IXGBE_MIN_RXD 64
52
53/* flow control */
54#define IXGBE_MIN_FCRTL 0x40
55#define IXGBE_MAX_FCRTL 0x7FF80
56#define IXGBE_MIN_FCRTH 0x600
57#define IXGBE_MAX_FCRTH 0x7FFF0
58#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
59#define IXGBE_MIN_FCPAUSE 0
60#define IXGBE_MAX_FCPAUSE 0xFFFF
61
62/* Supported Rx Buffer Sizes */
63#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
64#define IXGBE_RXBUFFER_1536 1536
65#define IXGBE_RXBUFFER_2K 2048
66#define IXGBE_RXBUFFER_3K 3072
67#define IXGBE_RXBUFFER_4K 4096
68#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
69
70/* Attempt to maximize the headroom available for incoming frames. We
71 * use a 2K buffer for receives and need 1536/1534 to store the data for
72 * the frame. This leaves us with 512 bytes of room. From that we need
73 * to deduct the space needed for the shared info and the padding needed
74 * to IP align the frame.
75 *
76 * Note: For cache line sizes 256 or larger this value is going to end
77 * up negative. In these cases we should fall back to the 3K
78 * buffers.
79 */
80#if (PAGE_SIZE < 8192)
81#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
82#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
83((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
84
85static inline int ixgbe_compute_pad(int rx_buf_len)
86{
87 int page_size, pad_size;
88
89 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
90 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
91
92 return pad_size;
93}
94
95static inline int ixgbe_skb_pad(void)
96{
97 int rx_buf_len;
98
99 /* If a 2K buffer cannot handle a standard Ethernet frame then
100 * optimize padding for a 3K buffer instead of a 1.5K buffer.
101 *
102 * For a 3K buffer we need to add enough padding to allow for
103 * tailroom due to NET_IP_ALIGN possibly shifting us out of
104 * cache-line alignment.
105 */
106 if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
107 rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
108 else
109 rx_buf_len = IXGBE_RXBUFFER_1536;
110
111 /* if needed make room for NET_IP_ALIGN */
112 rx_buf_len -= NET_IP_ALIGN;
113
114 return ixgbe_compute_pad(rx_buf_len);
115}
116
117#define IXGBE_SKB_PAD ixgbe_skb_pad()
118#else
119#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
120#endif
121
122/*
123 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
124 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
125 * this adds up to 448 bytes of extra data.
126 *
127 * Since netdev_alloc_skb now allocates a page fragment we can use a value
128 * of 256 and the resultant skb will have a truesize of 960 or less.
129 */
130#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
131
132/* How many Rx Buffers do we bundle into one write to the hardware ? */
133#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
134
135#define IXGBE_RX_DMA_ATTR \
136 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
137
138enum ixgbe_tx_flags {
139 /* cmd_type flags */
140 IXGBE_TX_FLAGS_HW_VLAN = 0x01,
141 IXGBE_TX_FLAGS_TSO = 0x02,
142 IXGBE_TX_FLAGS_TSTAMP = 0x04,
143
144 /* olinfo flags */
145 IXGBE_TX_FLAGS_CC = 0x08,
146 IXGBE_TX_FLAGS_IPV4 = 0x10,
147 IXGBE_TX_FLAGS_CSUM = 0x20,
148 IXGBE_TX_FLAGS_IPSEC = 0x40,
149
150 /* software defined flags */
151 IXGBE_TX_FLAGS_SW_VLAN = 0x80,
152 IXGBE_TX_FLAGS_FCOE = 0x100,
153};
154
155/* VLAN info */
156#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
157#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
158#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
159#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
160
161#define IXGBE_MAX_VF_MC_ENTRIES 30
162#define IXGBE_MAX_VF_FUNCTIONS 64
163#define IXGBE_MAX_VFTA_ENTRIES 128
164#define MAX_EMULATION_MAC_ADDRS 16
165#define IXGBE_MAX_PF_MACVLANS 15
166#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
167#define IXGBE_82599_VF_DEVICE_ID 0x10ED
168#define IXGBE_X540_VF_DEVICE_ID 0x1515
169
170struct vf_data_storage {
171 struct pci_dev *vfdev;
172 unsigned char vf_mac_addresses[ETH_ALEN];
173 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
174 u16 num_vf_mc_hashes;
175 bool clear_to_send;
176 bool pf_set_mac;
177 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
178 u16 pf_qos;
179 u16 tx_rate;
180 u8 spoofchk_enabled;
181 bool rss_query_enabled;
182 u8 trusted;
183 int xcast_mode;
184 unsigned int vf_api;
185};
186
187enum ixgbevf_xcast_modes {
188 IXGBEVF_XCAST_MODE_NONE = 0,
189 IXGBEVF_XCAST_MODE_MULTI,
190 IXGBEVF_XCAST_MODE_ALLMULTI,
191 IXGBEVF_XCAST_MODE_PROMISC,
192};
193
194struct vf_macvlans {
195 struct list_head l;
196 int vf;
197 bool free;
198 bool is_macvlan;
199 u8 vf_macvlan[ETH_ALEN];
200};
201
202#define IXGBE_MAX_TXD_PWR 14
203#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
204
205/* Tx Descriptors needed, worst case */
206#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
207#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
208
209/* wrapper around a pointer to a socket buffer,
210 * so a DMA handle can be stored along with the buffer */
211struct ixgbe_tx_buffer {
212 union ixgbe_adv_tx_desc *next_to_watch;
213 unsigned long time_stamp;
214 union {
215 struct sk_buff *skb;
216 struct xdp_frame *xdpf;
217 };
218 unsigned int bytecount;
219 unsigned short gso_segs;
220 __be16 protocol;
221 DEFINE_DMA_UNMAP_ADDR(dma);
222 DEFINE_DMA_UNMAP_LEN(len);
223 u32 tx_flags;
224};
225
226struct ixgbe_rx_buffer {
227 union {
228 struct {
229 struct sk_buff *skb;
230 dma_addr_t dma;
231 struct page *page;
232 __u32 page_offset;
233 __u16 pagecnt_bias;
234 };
235 struct {
236 bool discard;
237 struct xdp_buff *xdp;
238 };
239 };
240};
241
242struct ixgbe_queue_stats {
243 u64 packets;
244 u64 bytes;
245};
246
247struct ixgbe_tx_queue_stats {
248 u64 restart_queue;
249 u64 tx_busy;
250 u64 tx_done_old;
251};
252
253struct ixgbe_rx_queue_stats {
254 u64 rsc_count;
255 u64 rsc_flush;
256 u64 non_eop_descs;
257 u64 alloc_rx_page;
258 u64 alloc_rx_page_failed;
259 u64 alloc_rx_buff_failed;
260 u64 csum_err;
261};
262
263#define IXGBE_TS_HDR_LEN 8
264
265enum ixgbe_ring_state_t {
266 __IXGBE_RX_3K_BUFFER,
267 __IXGBE_RX_BUILD_SKB_ENABLED,
268 __IXGBE_RX_RSC_ENABLED,
269 __IXGBE_RX_CSUM_UDP_ZERO_ERR,
270 __IXGBE_RX_FCOE,
271 __IXGBE_TX_FDIR_INIT_DONE,
272 __IXGBE_TX_XPS_INIT_DONE,
273 __IXGBE_TX_DETECT_HANG,
274 __IXGBE_HANG_CHECK_ARMED,
275 __IXGBE_TX_XDP_RING,
276 __IXGBE_TX_DISABLED,
277};
278
279#define ring_uses_build_skb(ring) \
280 test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
281
282struct ixgbe_fwd_adapter {
283 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
284 struct net_device *netdev;
285 unsigned int tx_base_queue;
286 unsigned int rx_base_queue;
287 int pool;
288};
289
290#define check_for_tx_hang(ring) \
291 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
292#define set_check_for_tx_hang(ring) \
293 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
294#define clear_check_for_tx_hang(ring) \
295 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
296#define ring_is_rsc_enabled(ring) \
297 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
298#define set_ring_rsc_enabled(ring) \
299 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
300#define clear_ring_rsc_enabled(ring) \
301 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
302#define ring_is_xdp(ring) \
303 test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
304#define set_ring_xdp(ring) \
305 set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
306#define clear_ring_xdp(ring) \
307 clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
308struct ixgbe_ring {
309 struct ixgbe_ring *next; /* pointer to next ring in q_vector */
310 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
311 struct net_device *netdev; /* netdev ring belongs to */
312 struct bpf_prog *xdp_prog;
313 struct device *dev; /* device for DMA mapping */
314 void *desc; /* descriptor ring memory */
315 union {
316 struct ixgbe_tx_buffer *tx_buffer_info;
317 struct ixgbe_rx_buffer *rx_buffer_info;
318 };
319 unsigned long state;
320 u8 __iomem *tail;
321 dma_addr_t dma; /* phys. address of descriptor ring */
322 unsigned int size; /* length in bytes */
323
324 u16 count; /* amount of descriptors */
325
326 u8 queue_index; /* needed for multiqueue queue management */
327 u8 reg_idx; /* holds the special value that gets
328 * the hardware register offset
329 * associated with this ring, which is
330 * different for DCB and RSS modes
331 */
332 u16 next_to_use;
333 u16 next_to_clean;
334
335 unsigned long last_rx_timestamp;
336
337 union {
338 u16 next_to_alloc;
339 struct {
340 u8 atr_sample_rate;
341 u8 atr_count;
342 };
343 };
344
345 u8 dcb_tc;
346 struct ixgbe_queue_stats stats;
347 struct u64_stats_sync syncp;
348 union {
349 struct ixgbe_tx_queue_stats tx_stats;
350 struct ixgbe_rx_queue_stats rx_stats;
351 };
352 struct xdp_rxq_info xdp_rxq;
353 struct xdp_umem *xsk_umem;
354 u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
355 u16 rx_buf_len;
356} ____cacheline_internodealigned_in_smp;
357
358enum ixgbe_ring_f_enum {
359 RING_F_NONE = 0,
360 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
361 RING_F_RSS,
362 RING_F_FDIR,
363#ifdef IXGBE_FCOE
364 RING_F_FCOE,
365#endif /* IXGBE_FCOE */
366
367 RING_F_ARRAY_SIZE /* must be last in enum set */
368};
369
370#define IXGBE_MAX_RSS_INDICES 16
371#define IXGBE_MAX_RSS_INDICES_X550 63
372#define IXGBE_MAX_VMDQ_INDICES 64
373#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
374#define IXGBE_MAX_FCOE_INDICES 8
375#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
376#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
377#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
378#define IXGBE_MAX_L2A_QUEUES 4
379#define IXGBE_BAD_L2A_QUEUE 3
380#define IXGBE_MAX_MACVLANS 63
381
382struct ixgbe_ring_feature {
383 u16 limit; /* upper limit on feature indices */
384 u16 indices; /* current value of indices */
385 u16 mask; /* Mask used for feature to ring mapping */
386 u16 offset; /* offset to start of feature */
387} ____cacheline_internodealigned_in_smp;
388
389#define IXGBE_82599_VMDQ_8Q_MASK 0x78
390#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
391#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
392
393/*
394 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
395 * this is twice the size of a half page we need to double the page order
396 * for FCoE enabled Rx queues.
397 */
398static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
399{
400 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
401 return IXGBE_RXBUFFER_3K;
402#if (PAGE_SIZE < 8192)
403 if (ring_uses_build_skb(ring))
404 return IXGBE_MAX_2K_FRAME_BUILD_SKB;
405#endif
406 return IXGBE_RXBUFFER_2K;
407}
408
409static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
410{
411#if (PAGE_SIZE < 8192)
412 if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
413 return 1;
414#endif
415 return 0;
416}
417#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
418
419#define IXGBE_ITR_ADAPTIVE_MIN_INC 2
420#define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
421#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
422#define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
423#define IXGBE_ITR_ADAPTIVE_BULK 0x00
424
425struct ixgbe_ring_container {
426 struct ixgbe_ring *ring; /* pointer to linked list of rings */
427 unsigned long next_update; /* jiffies value of last update */
428 unsigned int total_bytes; /* total bytes processed this int */
429 unsigned int total_packets; /* total packets processed this int */
430 u16 work_limit; /* total work allowed per interrupt */
431 u8 count; /* total number of rings in vector */
432 u8 itr; /* current ITR setting for ring */
433};
434
435/* iterator for handling rings in ring container */
436#define ixgbe_for_each_ring(pos, head) \
437 for (pos = (head).ring; pos != NULL; pos = pos->next)
438
439#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
440 ? 8 : 1)
441#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
442
443/* MAX_Q_VECTORS of these are allocated,
444 * but we only use one per queue-specific vector.
445 */
446struct ixgbe_q_vector {
447 struct ixgbe_adapter *adapter;
448#ifdef CONFIG_IXGBE_DCA
449 int cpu; /* CPU for DCA */
450#endif
451 u16 v_idx; /* index of q_vector within array, also used for
452 * finding the bit in EICR and friends that
453 * represents the vector for this ring */
454 u16 itr; /* Interrupt throttle rate written to EITR */
455 struct ixgbe_ring_container rx, tx;
456
457 struct napi_struct napi;
458 cpumask_t affinity_mask;
459 int numa_node;
460 struct rcu_head rcu; /* to avoid race with update stats on free */
461 char name[IFNAMSIZ + 9];
462
463 /* for dynamic allocation of rings associated with this q_vector */
464 struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
465};
466
467#ifdef CONFIG_IXGBE_HWMON
468
469#define IXGBE_HWMON_TYPE_LOC 0
470#define IXGBE_HWMON_TYPE_TEMP 1
471#define IXGBE_HWMON_TYPE_CAUTION 2
472#define IXGBE_HWMON_TYPE_MAX 3
473
474struct hwmon_attr {
475 struct device_attribute dev_attr;
476 struct ixgbe_hw *hw;
477 struct ixgbe_thermal_diode_data *sensor;
478 char name[12];
479};
480
481struct hwmon_buff {
482 struct attribute_group group;
483 const struct attribute_group *groups[2];
484 struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
485 struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
486 unsigned int n_hwmon;
487};
488#endif /* CONFIG_IXGBE_HWMON */
489
490/*
491 * microsecond values for various ITR rates shifted by 2 to fit itr register
492 * with the first 3 bits reserved 0
493 */
494#define IXGBE_MIN_RSC_ITR 24
495#define IXGBE_100K_ITR 40
496#define IXGBE_20K_ITR 200
497#define IXGBE_12K_ITR 336
498
499/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
500static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
501 const u32 stat_err_bits)
502{
503 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
504}
505
506static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
507{
508 u16 ntc = ring->next_to_clean;
509 u16 ntu = ring->next_to_use;
510
511 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
512}
513
514#define IXGBE_RX_DESC(R, i) \
515 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
516#define IXGBE_TX_DESC(R, i) \
517 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
518#define IXGBE_TX_CTXTDESC(R, i) \
519 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
520
521#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
522#ifdef IXGBE_FCOE
523/* Use 3K as the baby jumbo frame size for FCoE */
524#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
525#endif /* IXGBE_FCOE */
526
527#define OTHER_VECTOR 1
528#define NON_Q_VECTORS (OTHER_VECTOR)
529
530#define MAX_MSIX_VECTORS_82599 64
531#define MAX_Q_VECTORS_82599 64
532#define MAX_MSIX_VECTORS_82598 18
533#define MAX_Q_VECTORS_82598 16
534
535struct ixgbe_mac_addr {
536 u8 addr[ETH_ALEN];
537 u16 pool;
538 u16 state; /* bitmask */
539};
540
541#define IXGBE_MAC_STATE_DEFAULT 0x1
542#define IXGBE_MAC_STATE_MODIFIED 0x2
543#define IXGBE_MAC_STATE_IN_USE 0x4
544
545#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
546#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
547
548#define MIN_MSIX_Q_VECTORS 1
549#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
550
551/* default to trying for four seconds */
552#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
553#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
554
555/* board specific private data structure */
556struct ixgbe_adapter {
557 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
558 /* OS defined structs */
559 struct net_device *netdev;
560 struct bpf_prog *xdp_prog;
561 struct pci_dev *pdev;
562 struct mii_bus *mii_bus;
563
564 unsigned long state;
565
566 /* Some features need tri-state capability,
567 * thus the additional *_CAPABLE flags.
568 */
569 u32 flags;
570#define IXGBE_FLAG_MSI_ENABLED BIT(1)
571#define IXGBE_FLAG_MSIX_ENABLED BIT(3)
572#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
573#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
574#define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
575#define IXGBE_FLAG_DCA_ENABLED BIT(8)
576#define IXGBE_FLAG_DCA_CAPABLE BIT(9)
577#define IXGBE_FLAG_IMIR_ENABLED BIT(10)
578#define IXGBE_FLAG_MQ_CAPABLE BIT(11)
579#define IXGBE_FLAG_DCB_ENABLED BIT(12)
580#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
581#define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
582#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
583#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
584#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
585#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
586#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
587#define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
588#define IXGBE_FLAG_FCOE_ENABLED BIT(21)
589#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
590#define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
591#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
592#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
593#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
594
595 u32 flags2;
596#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
597#define IXGBE_FLAG2_RSC_ENABLED BIT(1)
598#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
599#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
600#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
601#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
602#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
603#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
604#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
605#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
606#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
607#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
608#define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
609#define IXGBE_FLAG2_EEE_ENABLED BIT(15)
610#define IXGBE_FLAG2_RX_LEGACY BIT(16)
611#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
612#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
613
614 /* Tx fast path data */
615 int num_tx_queues;
616 u16 tx_itr_setting;
617 u16 tx_work_limit;
618 u64 tx_ipsec;
619
620 /* Rx fast path data */
621 int num_rx_queues;
622 u16 rx_itr_setting;
623 u64 rx_ipsec;
624
625 /* Port number used to identify VXLAN traffic */
626 __be16 vxlan_port;
627 __be16 geneve_port;
628
629 /* XDP */
630 int num_xdp_queues;
631 struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
632 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
633
634 /* TX */
635 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
636
637 u64 restart_queue;
638 u64 lsc_int;
639 u32 tx_timeout_count;
640
641 /* RX */
642 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
643 int num_rx_pools; /* == num_rx_queues in 82598 */
644 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
645 u64 hw_csum_rx_error;
646 u64 hw_rx_no_dma_resources;
647 u64 rsc_total_count;
648 u64 rsc_total_flush;
649 u64 non_eop_descs;
650 u32 alloc_rx_page;
651 u32 alloc_rx_page_failed;
652 u32 alloc_rx_buff_failed;
653
654 struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
655
656 /* DCB parameters */
657 struct ieee_pfc *ixgbe_ieee_pfc;
658 struct ieee_ets *ixgbe_ieee_ets;
659 struct ixgbe_dcb_config dcb_cfg;
660 struct ixgbe_dcb_config temp_dcb_cfg;
661 u8 hw_tcs;
662 u8 dcb_set_bitmap;
663 u8 dcbx_cap;
664 enum ixgbe_fc_mode last_lfc_mode;
665
666 int num_q_vectors; /* current number of q_vectors for device */
667 int max_q_vectors; /* true count of q_vectors for device */
668 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
669 struct msix_entry *msix_entries;
670
671 u32 test_icr;
672 struct ixgbe_ring test_tx_ring;
673 struct ixgbe_ring test_rx_ring;
674
675 /* structs defined in ixgbe_hw.h */
676 struct ixgbe_hw hw;
677 u16 msg_enable;
678 struct ixgbe_hw_stats stats;
679
680 u64 tx_busy;
681 unsigned int tx_ring_count;
682 unsigned int xdp_ring_count;
683 unsigned int rx_ring_count;
684
685 u32 link_speed;
686 bool link_up;
687 unsigned long sfp_poll_time;
688 unsigned long link_check_timeout;
689
690 struct timer_list service_timer;
691 struct work_struct service_task;
692
693 struct hlist_head fdir_filter_list;
694 unsigned long fdir_overflow; /* number of times ATR was backed off */
695 union ixgbe_atr_input fdir_mask;
696 int fdir_filter_count;
697 u32 fdir_pballoc;
698 u32 atr_sample_rate;
699 spinlock_t fdir_perfect_lock;
700
701#ifdef IXGBE_FCOE
702 struct ixgbe_fcoe fcoe;
703#endif /* IXGBE_FCOE */
704 u8 __iomem *io_addr; /* Mainly for iounmap use */
705 u32 wol;
706
707 u16 bridge_mode;
708
709 char eeprom_id[NVM_VER_SIZE];
710 u16 eeprom_cap;
711
712 u32 interrupt_event;
713 u32 led_reg;
714
715 struct ptp_clock *ptp_clock;
716 struct ptp_clock_info ptp_caps;
717 struct work_struct ptp_tx_work;
718 struct sk_buff *ptp_tx_skb;
719 struct hwtstamp_config tstamp_config;
720 unsigned long ptp_tx_start;
721 unsigned long last_overflow_check;
722 unsigned long last_rx_ptp_check;
723 unsigned long last_rx_timestamp;
724 spinlock_t tmreg_lock;
725 struct cyclecounter hw_cc;
726 struct timecounter hw_tc;
727 u32 base_incval;
728 u32 tx_hwtstamp_timeouts;
729 u32 tx_hwtstamp_skipped;
730 u32 rx_hwtstamp_cleared;
731 void (*ptp_setup_sdp)(struct ixgbe_adapter *);
732
733 /* SR-IOV */
734 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
735 unsigned int num_vfs;
736 struct vf_data_storage *vfinfo;
737 int vf_rate_link_speed;
738 struct vf_macvlans vf_mvs;
739 struct vf_macvlans *mv_list;
740
741 u32 timer_event_accumulator;
742 u32 vferr_refcount;
743 struct ixgbe_mac_addr *mac_table;
744 struct kobject *info_kobj;
745#ifdef CONFIG_IXGBE_HWMON
746 struct hwmon_buff *ixgbe_hwmon_buff;
747#endif /* CONFIG_IXGBE_HWMON */
748#ifdef CONFIG_DEBUG_FS
749 struct dentry *ixgbe_dbg_adapter;
750#endif /*CONFIG_DEBUG_FS*/
751
752 u8 default_up;
753 /* Bitmask indicating in use pools */
754 DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
755
756#define IXGBE_MAX_LINK_HANDLE 10
757 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
758 unsigned long tables;
759
760/* maximum number of RETA entries among all devices supported by ixgbe
761 * driver: currently it's x550 device in non-SRIOV mode
762 */
763#define IXGBE_MAX_RETA_ENTRIES 512
764 u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
765
766#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
767 u32 *rss_key;
768
769#ifdef CONFIG_IXGBE_IPSEC
770 struct ixgbe_ipsec *ipsec;
771#endif /* CONFIG_IXGBE_IPSEC */
772};
773
774static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
775{
776 switch (adapter->hw.mac.type) {
777 case ixgbe_mac_82598EB:
778 case ixgbe_mac_82599EB:
779 case ixgbe_mac_X540:
780 return IXGBE_MAX_RSS_INDICES;
781 case ixgbe_mac_X550:
782 case ixgbe_mac_X550EM_x:
783 case ixgbe_mac_x550em_a:
784 return IXGBE_MAX_RSS_INDICES_X550;
785 default:
786 return 0;
787 }
788}
789
790struct ixgbe_fdir_filter {
791 struct hlist_node fdir_node;
792 union ixgbe_atr_input filter;
793 u16 sw_idx;
794 u64 action;
795};
796
797enum ixgbe_state_t {
798 __IXGBE_TESTING,
799 __IXGBE_RESETTING,
800 __IXGBE_DOWN,
801 __IXGBE_DISABLED,
802 __IXGBE_REMOVING,
803 __IXGBE_SERVICE_SCHED,
804 __IXGBE_SERVICE_INITED,
805 __IXGBE_IN_SFP_INIT,
806 __IXGBE_PTP_RUNNING,
807 __IXGBE_PTP_TX_IN_PROGRESS,
808 __IXGBE_RESET_REQUESTED,
809};
810
811struct ixgbe_cb {
812 union { /* Union defining head/tail partner */
813 struct sk_buff *head;
814 struct sk_buff *tail;
815 };
816 dma_addr_t dma;
817 u16 append_cnt;
818 bool page_released;
819};
820#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
821
822enum ixgbe_boards {
823 board_82598,
824 board_82599,
825 board_X540,
826 board_X550,
827 board_X550EM_x,
828 board_x550em_x_fw,
829 board_x550em_a,
830 board_x550em_a_fw,
831};
832
833extern const struct ixgbe_info ixgbe_82598_info;
834extern const struct ixgbe_info ixgbe_82599_info;
835extern const struct ixgbe_info ixgbe_X540_info;
836extern const struct ixgbe_info ixgbe_X550_info;
837extern const struct ixgbe_info ixgbe_X550EM_x_info;
838extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
839extern const struct ixgbe_info ixgbe_x550em_a_info;
840extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
841#ifdef CONFIG_IXGBE_DCB
842extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
843#endif
844
845extern char ixgbe_driver_name[];
846#ifdef IXGBE_FCOE
847extern char ixgbe_default_device_descr[];
848#endif /* IXGBE_FCOE */
849
850int ixgbe_open(struct net_device *netdev);
851int ixgbe_close(struct net_device *netdev);
852void ixgbe_up(struct ixgbe_adapter *adapter);
853void ixgbe_down(struct ixgbe_adapter *adapter);
854void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
855void ixgbe_reset(struct ixgbe_adapter *adapter);
856void ixgbe_set_ethtool_ops(struct net_device *netdev);
857int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
858int ixgbe_setup_tx_resources(struct ixgbe_ring *);
859void ixgbe_free_rx_resources(struct ixgbe_ring *);
860void ixgbe_free_tx_resources(struct ixgbe_ring *);
861void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
862void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
863void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
864void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
865void ixgbe_update_stats(struct ixgbe_adapter *adapter);
866int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
867bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
868 u16 subdevice_id);
869#ifdef CONFIG_PCI_IOV
870void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
871#endif
872int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
873 const u8 *addr, u16 queue);
874int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
875 const u8 *addr, u16 queue);
876void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
877void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
878netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
879 struct ixgbe_ring *);
880void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
881 struct ixgbe_tx_buffer *);
882void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
883void ixgbe_write_eitr(struct ixgbe_q_vector *);
884int ixgbe_poll(struct napi_struct *napi, int budget);
885int ethtool_ioctl(struct ifreq *ifr);
886s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
887s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
888s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
889s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
890 union ixgbe_atr_hash_dword input,
891 union ixgbe_atr_hash_dword common,
892 u8 queue);
893s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
894 union ixgbe_atr_input *input_mask);
895s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
896 union ixgbe_atr_input *input,
897 u16 soft_id, u8 queue);
898s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
899 union ixgbe_atr_input *input,
900 u16 soft_id);
901void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
902 union ixgbe_atr_input *mask);
903int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
904 struct ixgbe_fdir_filter *input,
905 u16 sw_idx);
906void ixgbe_set_rx_mode(struct net_device *netdev);
907#ifdef CONFIG_IXGBE_DCB
908void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
909#endif
910int ixgbe_setup_tc(struct net_device *dev, u8 tc);
911void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
912void ixgbe_do_reset(struct net_device *netdev);
913#ifdef CONFIG_IXGBE_HWMON
914void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
915int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
916#endif /* CONFIG_IXGBE_HWMON */
917#ifdef IXGBE_FCOE
918void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
919int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
920 u8 *hdr_len);
921int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
922 union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
923int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
924 struct scatterlist *sgl, unsigned int sgc);
925int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
926 struct scatterlist *sgl, unsigned int sgc);
927int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
928int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
929void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
930int ixgbe_fcoe_enable(struct net_device *netdev);
931int ixgbe_fcoe_disable(struct net_device *netdev);
932#ifdef CONFIG_IXGBE_DCB
933u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
934u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
935#endif /* CONFIG_IXGBE_DCB */
936int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
937int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
938 struct netdev_fcoe_hbainfo *info);
939u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
940#endif /* IXGBE_FCOE */
941#ifdef CONFIG_DEBUG_FS
942void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
943void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
944void ixgbe_dbg_init(void);
945void ixgbe_dbg_exit(void);
946#else
947static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
948static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
949static inline void ixgbe_dbg_init(void) {}
950static inline void ixgbe_dbg_exit(void) {}
951#endif /* CONFIG_DEBUG_FS */
952static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
953{
954 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
955}
956
957void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
958void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
959void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
960void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
961void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
962void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
963void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
964void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
965static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
966 union ixgbe_adv_rx_desc *rx_desc,
967 struct sk_buff *skb)
968{
969 if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
970 ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
971 return;
972 }
973
974 if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
975 return;
976
977 ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
978
979 /* Update the last_rx_timestamp timer in order to enable watchdog check
980 * for error case of latched timestamp on a dropped packet.
981 */
982 rx_ring->last_rx_timestamp = jiffies;
983}
984
985int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
986int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
987void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
988void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
989void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
990#ifdef CONFIG_PCI_IOV
991void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
992#endif
993
994netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
995 struct ixgbe_adapter *adapter,
996 struct ixgbe_ring *tx_ring);
997u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
998void ixgbe_store_key(struct ixgbe_adapter *adapter);
999void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1000s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1001 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1002#ifdef CONFIG_IXGBE_IPSEC
1003void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1004void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1005void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1006void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1007 union ixgbe_adv_rx_desc *rx_desc,
1008 struct sk_buff *skb);
1009int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1010 struct ixgbe_ipsec_tx_data *itd);
1011void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
1012int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1013int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1014#else
1015static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
1016static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
1017static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
1018static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1019 union ixgbe_adv_rx_desc *rx_desc,
1020 struct sk_buff *skb) { }
1021static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1022 struct ixgbe_tx_buffer *first,
1023 struct ixgbe_ipsec_tx_data *itd) { return 0; }
1024static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
1025 u32 vf) { }
1026static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
1027 u32 *mbuf, u32 vf) { return -EACCES; }
1028static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
1029 u32 *mbuf, u32 vf) { return -EACCES; }
1030#endif /* CONFIG_IXGBE_IPSEC */
1031
1032static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
1033{
1034 return !!adapter->xdp_prog;
1035}
1036
1037#endif /* _IXGBE_H_ */