Linux Audio

Check our new training course

Loading...
v6.8
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#ifndef _IXGBE_H_
   5#define _IXGBE_H_
   6
   7#include <linux/bitops.h>
   8#include <linux/types.h>
   9#include <linux/pci.h>
  10#include <linux/netdevice.h>
  11#include <linux/cpumask.h>
 
  12#include <linux/if_vlan.h>
  13#include <linux/jiffies.h>
  14#include <linux/phy.h>
  15
  16#include <linux/timecounter.h>
  17#include <linux/net_tstamp.h>
  18#include <linux/ptp_clock_kernel.h>
  19
  20#include "ixgbe_type.h"
  21#include "ixgbe_common.h"
  22#include "ixgbe_dcb.h"
  23#if IS_ENABLED(CONFIG_FCOE)
  24#define IXGBE_FCOE
  25#include "ixgbe_fcoe.h"
  26#endif /* IS_ENABLED(CONFIG_FCOE) */
  27#ifdef CONFIG_IXGBE_DCA
  28#include <linux/dca.h>
  29#endif
  30#include "ixgbe_ipsec.h"
  31
  32#include <net/xdp.h>
  33
  34/* common prefix used by pr_<> macros */
  35#undef pr_fmt
  36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  37
  38/* TX/RX descriptor defines */
  39#define IXGBE_DEFAULT_TXD		    512
  40#define IXGBE_DEFAULT_TX_WORK		    256
  41#define IXGBE_MAX_TXD_82598		   4096
  42#define IXGBE_MAX_TXD_82599		   8192
  43#define IXGBE_MAX_TXD_X540		   8192
  44#define IXGBE_MAX_TXD_X550		  32768
  45#define IXGBE_MIN_TXD			     64
  46
  47#if (PAGE_SIZE < 8192)
  48#define IXGBE_DEFAULT_RXD		    512
  49#else
  50#define IXGBE_DEFAULT_RXD		    128
  51#endif
  52#define IXGBE_MAX_RXD_82598		   4096
  53#define IXGBE_MAX_RXD_82599		   8192
  54#define IXGBE_MAX_RXD_X540		   8192
  55#define IXGBE_MAX_RXD_X550		  32768
  56#define IXGBE_MIN_RXD			     64
  57
  58/* flow control */
  59#define IXGBE_MIN_FCRTL			   0x40
  60#define IXGBE_MAX_FCRTL			0x7FF80
  61#define IXGBE_MIN_FCRTH			  0x600
  62#define IXGBE_MAX_FCRTH			0x7FFF0
  63#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
  64#define IXGBE_MIN_FCPAUSE		      0
  65#define IXGBE_MAX_FCPAUSE		 0xFFFF
  66
  67/* Supported Rx Buffer Sizes */
  68#define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
  69#define IXGBE_RXBUFFER_1536  1536
  70#define IXGBE_RXBUFFER_2K    2048
  71#define IXGBE_RXBUFFER_3K    3072
  72#define IXGBE_RXBUFFER_4K    4096
  73#define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
  74
  75#define IXGBE_PKT_HDR_PAD   (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
  76
  77/* Attempt to maximize the headroom available for incoming frames.  We
  78 * use a 2K buffer for receives and need 1536/1534 to store the data for
  79 * the frame.  This leaves us with 512 bytes of room.  From that we need
  80 * to deduct the space needed for the shared info and the padding needed
  81 * to IP align the frame.
  82 *
  83 * Note: For cache line sizes 256 or larger this value is going to end
  84 *	 up negative.  In these cases we should fall back to the 3K
  85 *	 buffers.
  86 */
  87#if (PAGE_SIZE < 8192)
  88#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
  89#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
  90((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
  91
  92static inline int ixgbe_compute_pad(int rx_buf_len)
  93{
  94	int page_size, pad_size;
  95
  96	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
  97	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
  98
  99	return pad_size;
 100}
 101
 102static inline int ixgbe_skb_pad(void)
 103{
 104	int rx_buf_len;
 105
 106	/* If a 2K buffer cannot handle a standard Ethernet frame then
 107	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
 108	 *
 109	 * For a 3K buffer we need to add enough padding to allow for
 110	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
 111	 * cache-line alignment.
 112	 */
 113	if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
 114		rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
 115	else
 116		rx_buf_len = IXGBE_RXBUFFER_1536;
 117
 118	/* if needed make room for NET_IP_ALIGN */
 119	rx_buf_len -= NET_IP_ALIGN;
 120
 121	return ixgbe_compute_pad(rx_buf_len);
 122}
 123
 124#define IXGBE_SKB_PAD	ixgbe_skb_pad()
 125#else
 126#define IXGBE_SKB_PAD	(NET_SKB_PAD + NET_IP_ALIGN)
 127#endif
 128
 129/*
 130 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 131 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
 132 * this adds up to 448 bytes of extra data.
 133 *
 134 * Since netdev_alloc_skb now allocates a page fragment we can use a value
 135 * of 256 and the resultant skb will have a truesize of 960 or less.
 136 */
 137#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
 138
 139/* How many Rx Buffers do we bundle into one write to the hardware ? */
 140#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
 141
 142#define IXGBE_RX_DMA_ATTR \
 143	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 144
 145enum ixgbe_tx_flags {
 146	/* cmd_type flags */
 147	IXGBE_TX_FLAGS_HW_VLAN	= 0x01,
 148	IXGBE_TX_FLAGS_TSO	= 0x02,
 149	IXGBE_TX_FLAGS_TSTAMP	= 0x04,
 150
 151	/* olinfo flags */
 152	IXGBE_TX_FLAGS_CC	= 0x08,
 153	IXGBE_TX_FLAGS_IPV4	= 0x10,
 154	IXGBE_TX_FLAGS_CSUM	= 0x20,
 155	IXGBE_TX_FLAGS_IPSEC	= 0x40,
 156
 157	/* software defined flags */
 158	IXGBE_TX_FLAGS_SW_VLAN	= 0x80,
 159	IXGBE_TX_FLAGS_FCOE	= 0x100,
 160};
 161
 162/* VLAN info */
 163#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
 164#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 165#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
 166#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
 167
 168#define IXGBE_MAX_VF_MC_ENTRIES         30
 169#define IXGBE_MAX_VF_FUNCTIONS          64
 170#define IXGBE_MAX_VFTA_ENTRIES          128
 171#define MAX_EMULATION_MAC_ADDRS         16
 172#define IXGBE_MAX_PF_MACVLANS           15
 173#define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
 174#define IXGBE_82599_VF_DEVICE_ID        0x10ED
 175#define IXGBE_X540_VF_DEVICE_ID         0x1515
 176
 177#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
 178	{							\
 179		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
 180		if (current_counter < last_counter)		\
 181			counter += 0x100000000LL;		\
 182		last_counter = current_counter;			\
 183		counter &= 0xFFFFFFFF00000000LL;		\
 184		counter |= current_counter;			\
 185	}
 186
 187#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
 188	{								 \
 189		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
 190		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
 191		u64 current_counter = (current_counter_msb << 32) |	 \
 192			current_counter_lsb;				 \
 193		if (current_counter < last_counter)			 \
 194			counter += 0x1000000000LL;			 \
 195		last_counter = current_counter;				 \
 196		counter &= 0xFFFFFFF000000000LL;			 \
 197		counter |= current_counter;				 \
 198	}
 199
 200struct vf_stats {
 201	u64 gprc;
 202	u64 gorc;
 203	u64 gptc;
 204	u64 gotc;
 205	u64 mprc;
 206};
 207
 208struct vf_data_storage {
 209	struct pci_dev *vfdev;
 210	unsigned char vf_mac_addresses[ETH_ALEN];
 211	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
 212	u16 num_vf_mc_hashes;
 213	bool clear_to_send;
 214	struct vf_stats vfstats;
 215	struct vf_stats last_vfstats;
 216	struct vf_stats saved_rst_vfstats;
 217	bool pf_set_mac;
 218	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 219	u16 pf_qos;
 220	u16 tx_rate;
 221	int link_enable;
 222	int link_state;
 223	u8 spoofchk_enabled;
 224	bool rss_query_enabled;
 225	u8 trusted;
 226	int xcast_mode;
 227	unsigned int vf_api;
 228	u8 primary_abort_count;
 229};
 230
 231enum ixgbevf_xcast_modes {
 232	IXGBEVF_XCAST_MODE_NONE = 0,
 233	IXGBEVF_XCAST_MODE_MULTI,
 234	IXGBEVF_XCAST_MODE_ALLMULTI,
 235	IXGBEVF_XCAST_MODE_PROMISC,
 236};
 237
 238struct vf_macvlans {
 239	struct list_head l;
 240	int vf;
 241	bool free;
 242	bool is_macvlan;
 243	u8 vf_macvlan[ETH_ALEN];
 244};
 245
 246#define IXGBE_MAX_TXD_PWR	14
 247#define IXGBE_MAX_DATA_PER_TXD	(1u << IXGBE_MAX_TXD_PWR)
 248
 249/* Tx Descriptors needed, worst case */
 250#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
 251#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 252
 253/* wrapper around a pointer to a socket buffer,
 254 * so a DMA handle can be stored along with the buffer */
 255struct ixgbe_tx_buffer {
 256	union ixgbe_adv_tx_desc *next_to_watch;
 257	unsigned long time_stamp;
 258	union {
 259		struct sk_buff *skb;
 260		struct xdp_frame *xdpf;
 261	};
 262	unsigned int bytecount;
 263	unsigned short gso_segs;
 264	__be16 protocol;
 265	DEFINE_DMA_UNMAP_ADDR(dma);
 266	DEFINE_DMA_UNMAP_LEN(len);
 267	u32 tx_flags;
 268};
 269
 270struct ixgbe_rx_buffer {
 271	union {
 272		struct {
 273			struct sk_buff *skb;
 274			dma_addr_t dma;
 275			struct page *page;
 276			__u32 page_offset;
 277			__u16 pagecnt_bias;
 278		};
 279		struct {
 280			bool discard;
 281			struct xdp_buff *xdp;
 282		};
 283	};
 284};
 285
 286struct ixgbe_queue_stats {
 287	u64 packets;
 288	u64 bytes;
 289};
 290
 291struct ixgbe_tx_queue_stats {
 292	u64 restart_queue;
 293	u64 tx_busy;
 294	u64 tx_done_old;
 295};
 296
 297struct ixgbe_rx_queue_stats {
 298	u64 rsc_count;
 299	u64 rsc_flush;
 300	u64 non_eop_descs;
 301	u64 alloc_rx_page;
 302	u64 alloc_rx_page_failed;
 303	u64 alloc_rx_buff_failed;
 304	u64 csum_err;
 305};
 306
 307#define IXGBE_TS_HDR_LEN 8
 308
 309enum ixgbe_ring_state_t {
 310	__IXGBE_RX_3K_BUFFER,
 311	__IXGBE_RX_BUILD_SKB_ENABLED,
 312	__IXGBE_RX_RSC_ENABLED,
 313	__IXGBE_RX_CSUM_UDP_ZERO_ERR,
 314	__IXGBE_RX_FCOE,
 315	__IXGBE_TX_FDIR_INIT_DONE,
 316	__IXGBE_TX_XPS_INIT_DONE,
 317	__IXGBE_TX_DETECT_HANG,
 318	__IXGBE_HANG_CHECK_ARMED,
 319	__IXGBE_TX_XDP_RING,
 320	__IXGBE_TX_DISABLED,
 321};
 322
 323#define ring_uses_build_skb(ring) \
 324	test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
 325
 326struct ixgbe_fwd_adapter {
 327	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 328	struct net_device *netdev;
 329	unsigned int tx_base_queue;
 330	unsigned int rx_base_queue;
 331	int pool;
 332};
 333
 334#define check_for_tx_hang(ring) \
 335	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 336#define set_check_for_tx_hang(ring) \
 337	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 338#define clear_check_for_tx_hang(ring) \
 339	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 340#define ring_is_rsc_enabled(ring) \
 341	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 342#define set_ring_rsc_enabled(ring) \
 343	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 344#define clear_ring_rsc_enabled(ring) \
 345	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 346#define ring_is_xdp(ring) \
 347	test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 348#define set_ring_xdp(ring) \
 349	set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 350#define clear_ring_xdp(ring) \
 351	clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 352struct ixgbe_ring {
 353	struct ixgbe_ring *next;	/* pointer to next ring in q_vector */
 354	struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
 355	struct net_device *netdev;	/* netdev ring belongs to */
 356	struct bpf_prog *xdp_prog;
 357	struct device *dev;		/* device for DMA mapping */
 358	void *desc;			/* descriptor ring memory */
 359	union {
 360		struct ixgbe_tx_buffer *tx_buffer_info;
 361		struct ixgbe_rx_buffer *rx_buffer_info;
 362	};
 363	unsigned long state;
 364	u8 __iomem *tail;
 365	dma_addr_t dma;			/* phys. address of descriptor ring */
 366	unsigned int size;		/* length in bytes */
 367
 368	u16 count;			/* amount of descriptors */
 369
 370	u8 queue_index; /* needed for multiqueue queue management */
 371	u8 reg_idx;			/* holds the special value that gets
 372					 * the hardware register offset
 373					 * associated with this ring, which is
 374					 * different for DCB and RSS modes
 375					 */
 376	u16 next_to_use;
 377	u16 next_to_clean;
 378
 379	unsigned long last_rx_timestamp;
 380
 381	union {
 382		u16 next_to_alloc;
 383		struct {
 384			u8 atr_sample_rate;
 385			u8 atr_count;
 386		};
 387	};
 388
 389	u8 dcb_tc;
 390	struct ixgbe_queue_stats stats;
 391	struct u64_stats_sync syncp;
 392	union {
 393		struct ixgbe_tx_queue_stats tx_stats;
 394		struct ixgbe_rx_queue_stats rx_stats;
 395	};
 396	u16 rx_offset;
 397	struct xdp_rxq_info xdp_rxq;
 398	spinlock_t tx_lock;	/* used in XDP mode */
 399	struct xsk_buff_pool *xsk_pool;
 400	u16 ring_idx;		/* {rx,tx,xdp}_ring back reference idx */
 401	u16 rx_buf_len;
 402} ____cacheline_internodealigned_in_smp;
 403
 404enum ixgbe_ring_f_enum {
 405	RING_F_NONE = 0,
 406	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
 407	RING_F_RSS,
 408	RING_F_FDIR,
 409#ifdef IXGBE_FCOE
 410	RING_F_FCOE,
 411#endif /* IXGBE_FCOE */
 412
 413	RING_F_ARRAY_SIZE      /* must be last in enum set */
 414};
 415
 416#define IXGBE_MAX_RSS_INDICES		16
 417#define IXGBE_MAX_RSS_INDICES_X550	63
 418#define IXGBE_MAX_VMDQ_INDICES		64
 419#define IXGBE_MAX_FDIR_INDICES		63	/* based on q_vector limit */
 420#define IXGBE_MAX_FCOE_INDICES		8
 421#define MAX_RX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
 422#define MAX_TX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
 423#define IXGBE_MAX_XDP_QS		(IXGBE_MAX_FDIR_INDICES + 1)
 424#define IXGBE_MAX_L2A_QUEUES		4
 425#define IXGBE_BAD_L2A_QUEUE		3
 426#define IXGBE_MAX_MACVLANS		63
 427
 428DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
 429
 430struct ixgbe_ring_feature {
 431	u16 limit;	/* upper limit on feature indices */
 432	u16 indices;	/* current value of indices */
 433	u16 mask;	/* Mask used for feature to ring mapping */
 434	u16 offset;	/* offset to start of feature */
 435} ____cacheline_internodealigned_in_smp;
 436
 437#define IXGBE_82599_VMDQ_8Q_MASK 0x78
 438#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
 439#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
 440
 441/*
 442 * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
 443 * this is twice the size of a half page we need to double the page order
 444 * for FCoE enabled Rx queues.
 445 */
 446static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 447{
 448	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 449		return IXGBE_RXBUFFER_3K;
 450#if (PAGE_SIZE < 8192)
 451	if (ring_uses_build_skb(ring))
 452		return IXGBE_MAX_2K_FRAME_BUILD_SKB;
 453#endif
 454	return IXGBE_RXBUFFER_2K;
 455}
 456
 457static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 458{
 459#if (PAGE_SIZE < 8192)
 460	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 461		return 1;
 462#endif
 463	return 0;
 464}
 465#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
 466
 467#define IXGBE_ITR_ADAPTIVE_MIN_INC	2
 468#define IXGBE_ITR_ADAPTIVE_MIN_USECS	10
 469#define IXGBE_ITR_ADAPTIVE_MAX_USECS	126
 470#define IXGBE_ITR_ADAPTIVE_LATENCY	0x80
 471#define IXGBE_ITR_ADAPTIVE_BULK		0x00
 472
 473struct ixgbe_ring_container {
 474	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
 475	unsigned long next_update;	/* jiffies value of last update */
 476	unsigned int total_bytes;	/* total bytes processed this int */
 477	unsigned int total_packets;	/* total packets processed this int */
 478	u16 work_limit;			/* total work allowed per interrupt */
 479	u8 count;			/* total number of rings in vector */
 480	u8 itr;				/* current ITR setting for ring */
 481};
 482
 483/* iterator for handling rings in ring container */
 484#define ixgbe_for_each_ring(pos, head) \
 485	for (pos = (head).ring; pos != NULL; pos = pos->next)
 486
 487#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
 488			      ? 8 : 1)
 489#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
 490
 491/* MAX_Q_VECTORS of these are allocated,
 492 * but we only use one per queue-specific vector.
 493 */
 494struct ixgbe_q_vector {
 495	struct ixgbe_adapter *adapter;
 496#ifdef CONFIG_IXGBE_DCA
 497	int cpu;	    /* CPU for DCA */
 498#endif
 499	u16 v_idx;		/* index of q_vector within array, also used for
 500				 * finding the bit in EICR and friends that
 501				 * represents the vector for this ring */
 502	u16 itr;		/* Interrupt throttle rate written to EITR */
 503	struct ixgbe_ring_container rx, tx;
 504
 505	struct napi_struct napi;
 506	cpumask_t affinity_mask;
 507	int numa_node;
 508	struct rcu_head rcu;	/* to avoid race with update stats on free */
 509	char name[IFNAMSIZ + 9];
 510
 511	/* for dynamic allocation of rings associated with this q_vector */
 512	struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
 513};
 514
 515#ifdef CONFIG_IXGBE_HWMON
 516
 517#define IXGBE_HWMON_TYPE_LOC		0
 518#define IXGBE_HWMON_TYPE_TEMP		1
 519#define IXGBE_HWMON_TYPE_CAUTION	2
 520#define IXGBE_HWMON_TYPE_MAX		3
 521
 522struct hwmon_attr {
 523	struct device_attribute dev_attr;
 524	struct ixgbe_hw *hw;
 525	struct ixgbe_thermal_diode_data *sensor;
 526	char name[12];
 527};
 528
 529struct hwmon_buff {
 530	struct attribute_group group;
 531	const struct attribute_group *groups[2];
 532	struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
 533	struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
 534	unsigned int n_hwmon;
 535};
 536#endif /* CONFIG_IXGBE_HWMON */
 537
 538/*
 539 * microsecond values for various ITR rates shifted by 2 to fit itr register
 540 * with the first 3 bits reserved 0
 541 */
 542#define IXGBE_MIN_RSC_ITR	24
 543#define IXGBE_100K_ITR		40
 544#define IXGBE_20K_ITR		200
 545#define IXGBE_12K_ITR		336
 546
 547/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
 548static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
 549					const u32 stat_err_bits)
 550{
 551	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
 552}
 553
 554static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
 555{
 556	u16 ntc = ring->next_to_clean;
 557	u16 ntu = ring->next_to_use;
 558
 559	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 560}
 561
 562#define IXGBE_RX_DESC(R, i)	    \
 563	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 564#define IXGBE_TX_DESC(R, i)	    \
 565	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
 566#define IXGBE_TX_CTXTDESC(R, i)	    \
 567	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 568
 569#define IXGBE_MAX_JUMBO_FRAME_SIZE	9728 /* Maximum Supported Size 9.5KB */
 570#ifdef IXGBE_FCOE
 571/* Use 3K as the baby jumbo frame size for FCoE */
 572#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
 573#endif /* IXGBE_FCOE */
 574
 575#define OTHER_VECTOR 1
 576#define NON_Q_VECTORS (OTHER_VECTOR)
 577
 578#define MAX_MSIX_VECTORS_82599 64
 579#define MAX_Q_VECTORS_82599 64
 580#define MAX_MSIX_VECTORS_82598 18
 581#define MAX_Q_VECTORS_82598 16
 582
 583struct ixgbe_mac_addr {
 584	u8 addr[ETH_ALEN];
 585	u16 pool;
 586	u16 state; /* bitmask */
 587};
 588
 589#define IXGBE_MAC_STATE_DEFAULT		0x1
 590#define IXGBE_MAC_STATE_MODIFIED	0x2
 591#define IXGBE_MAC_STATE_IN_USE		0x4
 592
 593#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
 594#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
 595
 596#define MIN_MSIX_Q_VECTORS 1
 597#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 598
 599/* default to trying for four seconds */
 600#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 601#define IXGBE_SFP_POLL_JIFFIES (2 * HZ)	/* SFP poll every 2 seconds */
 602
 603#define IXGBE_PRIMARY_ABORT_LIMIT	5
 604
 605/* board specific private data structure */
 606struct ixgbe_adapter {
 607	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 608	/* OS defined structs */
 609	struct net_device *netdev;
 610	struct bpf_prog *xdp_prog;
 611	struct pci_dev *pdev;
 612	struct mii_bus *mii_bus;
 613
 614	unsigned long state;
 615
 616	/* Some features need tri-state capability,
 617	 * thus the additional *_CAPABLE flags.
 618	 */
 619	u32 flags;
 620#define IXGBE_FLAG_MSI_ENABLED			BIT(1)
 621#define IXGBE_FLAG_MSIX_ENABLED			BIT(3)
 622#define IXGBE_FLAG_RX_1BUF_CAPABLE		BIT(4)
 623#define IXGBE_FLAG_RX_PS_CAPABLE		BIT(5)
 624#define IXGBE_FLAG_RX_PS_ENABLED		BIT(6)
 625#define IXGBE_FLAG_DCA_ENABLED			BIT(8)
 626#define IXGBE_FLAG_DCA_CAPABLE			BIT(9)
 627#define IXGBE_FLAG_IMIR_ENABLED			BIT(10)
 628#define IXGBE_FLAG_MQ_CAPABLE			BIT(11)
 629#define IXGBE_FLAG_DCB_ENABLED			BIT(12)
 630#define IXGBE_FLAG_VMDQ_CAPABLE			BIT(13)
 631#define IXGBE_FLAG_VMDQ_ENABLED			BIT(14)
 632#define IXGBE_FLAG_FAN_FAIL_CAPABLE		BIT(15)
 633#define IXGBE_FLAG_NEED_LINK_UPDATE		BIT(16)
 634#define IXGBE_FLAG_NEED_LINK_CONFIG		BIT(17)
 635#define IXGBE_FLAG_FDIR_HASH_CAPABLE		BIT(18)
 636#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE		BIT(19)
 637#define IXGBE_FLAG_FCOE_CAPABLE			BIT(20)
 638#define IXGBE_FLAG_FCOE_ENABLED			BIT(21)
 639#define IXGBE_FLAG_SRIOV_CAPABLE		BIT(22)
 640#define IXGBE_FLAG_SRIOV_ENABLED		BIT(23)
 641#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED		BIT(25)
 642#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER	BIT(26)
 643#define IXGBE_FLAG_DCB_CAPABLE			BIT(27)
 644
 645	u32 flags2;
 646#define IXGBE_FLAG2_RSC_CAPABLE			BIT(0)
 647#define IXGBE_FLAG2_RSC_ENABLED			BIT(1)
 648#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE		BIT(2)
 649#define IXGBE_FLAG2_TEMP_SENSOR_EVENT		BIT(3)
 650#define IXGBE_FLAG2_SEARCH_FOR_SFP		BIT(4)
 651#define IXGBE_FLAG2_SFP_NEEDS_RESET		BIT(5)
 652#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT	BIT(7)
 653#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		BIT(8)
 654#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		BIT(9)
 655#define IXGBE_FLAG2_PTP_PPS_ENABLED		BIT(10)
 656#define IXGBE_FLAG2_PHY_INTERRUPT		BIT(11)
 657#define IXGBE_FLAG2_VLAN_PROMISC		BIT(13)
 658#define IXGBE_FLAG2_EEE_CAPABLE			BIT(14)
 659#define IXGBE_FLAG2_EEE_ENABLED			BIT(15)
 660#define IXGBE_FLAG2_RX_LEGACY			BIT(16)
 661#define IXGBE_FLAG2_IPSEC_ENABLED		BIT(17)
 662#define IXGBE_FLAG2_VF_IPSEC_ENABLED		BIT(18)
 663#define IXGBE_FLAG2_AUTO_DISABLE_VF		BIT(19)
 664
 665	/* Tx fast path data */
 666	int num_tx_queues;
 667	u16 tx_itr_setting;
 668	u16 tx_work_limit;
 669	u64 tx_ipsec;
 670
 671	/* Rx fast path data */
 672	int num_rx_queues;
 673	u16 rx_itr_setting;
 674	u64 rx_ipsec;
 675
 676	/* Port number used to identify VXLAN traffic */
 677	__be16 vxlan_port;
 678	__be16 geneve_port;
 679
 680	/* XDP */
 681	int num_xdp_queues;
 682	struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
 683	unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
 684
 685	/* TX */
 686	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 687
 688	u64 restart_queue;
 689	u64 lsc_int;
 690	u32 tx_timeout_count;
 691
 692	/* RX */
 693	struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
 694	int num_rx_pools;		/* == num_rx_queues in 82598 */
 695	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
 696	u64 hw_csum_rx_error;
 697	u64 hw_rx_no_dma_resources;
 698	u64 rsc_total_count;
 699	u64 rsc_total_flush;
 700	u64 non_eop_descs;
 701	u32 alloc_rx_page;
 702	u32 alloc_rx_page_failed;
 703	u32 alloc_rx_buff_failed;
 704
 705	struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
 706
 707	/* DCB parameters */
 708	struct ieee_pfc *ixgbe_ieee_pfc;
 709	struct ieee_ets *ixgbe_ieee_ets;
 710	struct ixgbe_dcb_config dcb_cfg;
 711	struct ixgbe_dcb_config temp_dcb_cfg;
 712	u8 hw_tcs;
 713	u8 dcb_set_bitmap;
 714	u8 dcbx_cap;
 715	enum ixgbe_fc_mode last_lfc_mode;
 716
 717	int num_q_vectors;	/* current number of q_vectors for device */
 718	int max_q_vectors;	/* true count of q_vectors for device */
 719	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
 720	struct msix_entry *msix_entries;
 721
 722	u32 test_icr;
 723	struct ixgbe_ring test_tx_ring;
 724	struct ixgbe_ring test_rx_ring;
 725
 726	/* structs defined in ixgbe_hw.h */
 727	struct ixgbe_hw hw;
 728	u16 msg_enable;
 729	struct ixgbe_hw_stats stats;
 730
 731	u64 tx_busy;
 732	unsigned int tx_ring_count;
 733	unsigned int xdp_ring_count;
 734	unsigned int rx_ring_count;
 735
 736	u32 link_speed;
 737	bool link_up;
 738	unsigned long sfp_poll_time;
 739	unsigned long link_check_timeout;
 740
 741	struct timer_list service_timer;
 742	struct work_struct service_task;
 743
 744	struct hlist_head fdir_filter_list;
 745	unsigned long fdir_overflow; /* number of times ATR was backed off */
 746	union ixgbe_atr_input fdir_mask;
 747	int fdir_filter_count;
 748	u32 fdir_pballoc;
 749	u32 atr_sample_rate;
 750	spinlock_t fdir_perfect_lock;
 751
 752#ifdef IXGBE_FCOE
 753	struct ixgbe_fcoe fcoe;
 754#endif /* IXGBE_FCOE */
 755	u8 __iomem *io_addr; /* Mainly for iounmap use */
 756	u32 wol;
 757
 758	u16 bridge_mode;
 759
 760	char eeprom_id[NVM_VER_SIZE];
 761	u16 eeprom_cap;
 762
 763	u32 interrupt_event;
 764	u32 led_reg;
 765
 766	struct ptp_clock *ptp_clock;
 767	struct ptp_clock_info ptp_caps;
 768	struct work_struct ptp_tx_work;
 769	struct sk_buff *ptp_tx_skb;
 770	struct hwtstamp_config tstamp_config;
 771	unsigned long ptp_tx_start;
 772	unsigned long last_overflow_check;
 773	unsigned long last_rx_ptp_check;
 774	unsigned long last_rx_timestamp;
 775	spinlock_t tmreg_lock;
 776	struct cyclecounter hw_cc;
 777	struct timecounter hw_tc;
 778	u32 base_incval;
 779	u32 tx_hwtstamp_timeouts;
 780	u32 tx_hwtstamp_skipped;
 781	u32 rx_hwtstamp_cleared;
 782	void (*ptp_setup_sdp)(struct ixgbe_adapter *);
 783
 784	/* SR-IOV */
 785	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
 786	unsigned int num_vfs;
 787	struct vf_data_storage *vfinfo;
 788	int vf_rate_link_speed;
 789	struct vf_macvlans vf_mvs;
 790	struct vf_macvlans *mv_list;
 791
 792	u32 timer_event_accumulator;
 793	u32 vferr_refcount;
 794	struct ixgbe_mac_addr *mac_table;
 795	struct kobject *info_kobj;
 796#ifdef CONFIG_IXGBE_HWMON
 797	struct hwmon_buff *ixgbe_hwmon_buff;
 798#endif /* CONFIG_IXGBE_HWMON */
 799#ifdef CONFIG_DEBUG_FS
 800	struct dentry *ixgbe_dbg_adapter;
 801#endif /*CONFIG_DEBUG_FS*/
 802
 803	u8 default_up;
 804	/* Bitmask indicating in use pools */
 805	DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
 806
 807#define IXGBE_MAX_LINK_HANDLE 10
 808	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
 809	unsigned long tables;
 810
 811/* maximum number of RETA entries among all devices supported by ixgbe
 812 * driver: currently it's x550 device in non-SRIOV mode
 813 */
 814#define IXGBE_MAX_RETA_ENTRIES 512
 815	u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
 816
 817#define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
 818	u32 *rss_key;
 819
 820#ifdef CONFIG_IXGBE_IPSEC
 821	struct ixgbe_ipsec *ipsec;
 822#endif /* CONFIG_IXGBE_IPSEC */
 823	spinlock_t vfs_lock;
 824};
 825
 826static inline int ixgbe_determine_xdp_q_idx(int cpu)
 827{
 828	if (static_key_enabled(&ixgbe_xdp_locking_key))
 829		return cpu % IXGBE_MAX_XDP_QS;
 830	else
 831		return cpu;
 832}
 833
 834static inline
 835struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
 836{
 837	int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
 838
 839	return adapter->xdp_ring[index];
 840}
 841
 842static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
 843{
 844	switch (adapter->hw.mac.type) {
 845	case ixgbe_mac_82598EB:
 846	case ixgbe_mac_82599EB:
 847	case ixgbe_mac_X540:
 848		return IXGBE_MAX_RSS_INDICES;
 849	case ixgbe_mac_X550:
 850	case ixgbe_mac_X550EM_x:
 851	case ixgbe_mac_x550em_a:
 852		return IXGBE_MAX_RSS_INDICES_X550;
 853	default:
 854		return 0;
 855	}
 856}
 857
 858struct ixgbe_fdir_filter {
 859	struct hlist_node fdir_node;
 860	union ixgbe_atr_input filter;
 861	u16 sw_idx;
 862	u64 action;
 863};
 864
 865enum ixgbe_state_t {
 866	__IXGBE_TESTING,
 867	__IXGBE_RESETTING,
 868	__IXGBE_DOWN,
 869	__IXGBE_DISABLED,
 870	__IXGBE_REMOVING,
 871	__IXGBE_SERVICE_SCHED,
 872	__IXGBE_SERVICE_INITED,
 873	__IXGBE_IN_SFP_INIT,
 874	__IXGBE_PTP_RUNNING,
 875	__IXGBE_PTP_TX_IN_PROGRESS,
 876	__IXGBE_RESET_REQUESTED,
 877};
 878
 879struct ixgbe_cb {
 880	union {				/* Union defining head/tail partner */
 881		struct sk_buff *head;
 882		struct sk_buff *tail;
 883	};
 884	dma_addr_t dma;
 885	u16 append_cnt;
 886	bool page_released;
 887};
 888#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
 889
 890enum ixgbe_boards {
 891	board_82598,
 892	board_82599,
 893	board_X540,
 894	board_X550,
 895	board_X550EM_x,
 896	board_x550em_x_fw,
 897	board_x550em_a,
 898	board_x550em_a_fw,
 899};
 900
 901extern const struct ixgbe_info ixgbe_82598_info;
 902extern const struct ixgbe_info ixgbe_82599_info;
 903extern const struct ixgbe_info ixgbe_X540_info;
 904extern const struct ixgbe_info ixgbe_X550_info;
 905extern const struct ixgbe_info ixgbe_X550EM_x_info;
 906extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
 907extern const struct ixgbe_info ixgbe_x550em_a_info;
 908extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
 909#ifdef CONFIG_IXGBE_DCB
 910extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
 911#endif
 912
 913extern char ixgbe_driver_name[];
 914#ifdef IXGBE_FCOE
 915extern char ixgbe_default_device_descr[];
 916#endif /* IXGBE_FCOE */
 917
 918int ixgbe_open(struct net_device *netdev);
 919int ixgbe_close(struct net_device *netdev);
 920void ixgbe_up(struct ixgbe_adapter *adapter);
 921void ixgbe_down(struct ixgbe_adapter *adapter);
 922void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 923void ixgbe_reset(struct ixgbe_adapter *adapter);
 924void ixgbe_set_ethtool_ops(struct net_device *netdev);
 925int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
 926int ixgbe_setup_tx_resources(struct ixgbe_ring *);
 927void ixgbe_free_rx_resources(struct ixgbe_ring *);
 928void ixgbe_free_tx_resources(struct ixgbe_ring *);
 929void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 930void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 931void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
 932void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
 933void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 934int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 935bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
 936			 u16 subdevice_id);
 937#ifdef CONFIG_PCI_IOV
 938void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
 939#endif
 940int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
 941			 const u8 *addr, u16 queue);
 942int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
 943			 const u8 *addr, u16 queue);
 944void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
 945void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 946netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
 947				  struct ixgbe_ring *);
 
 
 948void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 949void ixgbe_write_eitr(struct ixgbe_q_vector *);
 950int ixgbe_poll(struct napi_struct *napi, int budget);
 951int ethtool_ioctl(struct ifreq *ifr);
 952s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 953s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 954s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 955s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 956					  union ixgbe_atr_hash_dword input,
 957					  union ixgbe_atr_hash_dword common,
 958					  u8 queue);
 959s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 960				    union ixgbe_atr_input *input_mask);
 961s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 962					  union ixgbe_atr_input *input,
 963					  u16 soft_id, u8 queue);
 964s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
 965					  union ixgbe_atr_input *input,
 966					  u16 soft_id);
 967void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 968					  union ixgbe_atr_input *mask);
 969int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
 970				    struct ixgbe_fdir_filter *input,
 971				    u16 sw_idx);
 972void ixgbe_set_rx_mode(struct net_device *netdev);
 973#ifdef CONFIG_IXGBE_DCB
 974void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 975#endif
 976int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 977void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
 978void ixgbe_do_reset(struct net_device *netdev);
 979#ifdef CONFIG_IXGBE_HWMON
 980void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
 981int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 982#endif /* CONFIG_IXGBE_HWMON */
 983#ifdef IXGBE_FCOE
 984void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 985int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
 986	      u8 *hdr_len);
 987int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
 988		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
 989int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 990		       struct scatterlist *sgl, unsigned int sgc);
 991int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
 992			  struct scatterlist *sgl, unsigned int sgc);
 993int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
 994int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 995void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 996int ixgbe_fcoe_enable(struct net_device *netdev);
 997int ixgbe_fcoe_disable(struct net_device *netdev);
 
 
 
 
 998int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
 999int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
1000			   struct netdev_fcoe_hbainfo *info);
1001u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
1002#endif /* IXGBE_FCOE */
1003#ifdef CONFIG_DEBUG_FS
1004void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
1005void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
1006void ixgbe_dbg_init(void);
1007void ixgbe_dbg_exit(void);
1008#else
1009static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
1010static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
1011static inline void ixgbe_dbg_init(void) {}
1012static inline void ixgbe_dbg_exit(void) {}
1013#endif /* CONFIG_DEBUG_FS */
1014static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
1015{
1016	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
1017}
1018
1019void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
1020void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
1021void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
1022void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
1023void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
1024void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
1025void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
1026void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
1027static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
1028					 union ixgbe_adv_rx_desc *rx_desc,
1029					 struct sk_buff *skb)
1030{
1031	if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
1032		ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
1033		return;
1034	}
1035
1036	if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1037		return;
1038
1039	ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
1040
1041	/* Update the last_rx_timestamp timer in order to enable watchdog check
1042	 * for error case of latched timestamp on a dropped packet.
1043	 */
1044	rx_ring->last_rx_timestamp = jiffies;
1045}
1046
1047int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1048int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1049void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
1050void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1051void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1052#ifdef CONFIG_PCI_IOV
1053void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1054#endif
1055
1056netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1057				  struct ixgbe_adapter *adapter,
1058				  struct ixgbe_ring *tx_ring);
1059u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1060void ixgbe_store_key(struct ixgbe_adapter *adapter);
1061void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1062s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1063		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1064#ifdef CONFIG_IXGBE_IPSEC
1065void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1066void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1067void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1068void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1069		    union ixgbe_adv_rx_desc *rx_desc,
1070		    struct sk_buff *skb);
1071int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1072		   struct ixgbe_ipsec_tx_data *itd);
1073void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
1074int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1075int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1076#else
1077static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
1078static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
1079static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
1080static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1081				  union ixgbe_adv_rx_desc *rx_desc,
1082				  struct sk_buff *skb) { }
1083static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1084				 struct ixgbe_tx_buffer *first,
1085				 struct ixgbe_ipsec_tx_data *itd) { return 0; }
1086static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
1087					u32 vf) { }
1088static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
1089					u32 *mbuf, u32 vf) { return -EACCES; }
1090static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
1091					u32 *mbuf, u32 vf) { return -EACCES; }
1092#endif /* CONFIG_IXGBE_IPSEC */
1093
1094static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
1095{
1096	return !!adapter->xdp_prog;
1097}
1098
1099#endif /* _IXGBE_H_ */
v6.2
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* Copyright(c) 1999 - 2018 Intel Corporation. */
   3
   4#ifndef _IXGBE_H_
   5#define _IXGBE_H_
   6
   7#include <linux/bitops.h>
   8#include <linux/types.h>
   9#include <linux/pci.h>
  10#include <linux/netdevice.h>
  11#include <linux/cpumask.h>
  12#include <linux/aer.h>
  13#include <linux/if_vlan.h>
  14#include <linux/jiffies.h>
  15#include <linux/phy.h>
  16
  17#include <linux/timecounter.h>
  18#include <linux/net_tstamp.h>
  19#include <linux/ptp_clock_kernel.h>
  20
  21#include "ixgbe_type.h"
  22#include "ixgbe_common.h"
  23#include "ixgbe_dcb.h"
  24#if IS_ENABLED(CONFIG_FCOE)
  25#define IXGBE_FCOE
  26#include "ixgbe_fcoe.h"
  27#endif /* IS_ENABLED(CONFIG_FCOE) */
  28#ifdef CONFIG_IXGBE_DCA
  29#include <linux/dca.h>
  30#endif
  31#include "ixgbe_ipsec.h"
  32
  33#include <net/xdp.h>
  34
  35/* common prefix used by pr_<> macros */
  36#undef pr_fmt
  37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38
  39/* TX/RX descriptor defines */
  40#define IXGBE_DEFAULT_TXD		    512
  41#define IXGBE_DEFAULT_TX_WORK		    256
  42#define IXGBE_MAX_TXD_82598		   4096
  43#define IXGBE_MAX_TXD_82599		   8192
  44#define IXGBE_MAX_TXD_X540		   8192
  45#define IXGBE_MAX_TXD_X550		  32768
  46#define IXGBE_MIN_TXD			     64
  47
  48#if (PAGE_SIZE < 8192)
  49#define IXGBE_DEFAULT_RXD		    512
  50#else
  51#define IXGBE_DEFAULT_RXD		    128
  52#endif
  53#define IXGBE_MAX_RXD_82598		   4096
  54#define IXGBE_MAX_RXD_82599		   8192
  55#define IXGBE_MAX_RXD_X540		   8192
  56#define IXGBE_MAX_RXD_X550		  32768
  57#define IXGBE_MIN_RXD			     64
  58
  59/* flow control */
  60#define IXGBE_MIN_FCRTL			   0x40
  61#define IXGBE_MAX_FCRTL			0x7FF80
  62#define IXGBE_MIN_FCRTH			  0x600
  63#define IXGBE_MAX_FCRTH			0x7FFF0
  64#define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
  65#define IXGBE_MIN_FCPAUSE		      0
  66#define IXGBE_MAX_FCPAUSE		 0xFFFF
  67
  68/* Supported Rx Buffer Sizes */
  69#define IXGBE_RXBUFFER_256    256  /* Used for skb receive header */
  70#define IXGBE_RXBUFFER_1536  1536
  71#define IXGBE_RXBUFFER_2K    2048
  72#define IXGBE_RXBUFFER_3K    3072
  73#define IXGBE_RXBUFFER_4K    4096
  74#define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
  75
  76#define IXGBE_PKT_HDR_PAD   (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
  77
  78/* Attempt to maximize the headroom available for incoming frames.  We
  79 * use a 2K buffer for receives and need 1536/1534 to store the data for
  80 * the frame.  This leaves us with 512 bytes of room.  From that we need
  81 * to deduct the space needed for the shared info and the padding needed
  82 * to IP align the frame.
  83 *
  84 * Note: For cache line sizes 256 or larger this value is going to end
  85 *	 up negative.  In these cases we should fall back to the 3K
  86 *	 buffers.
  87 */
  88#if (PAGE_SIZE < 8192)
  89#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN)
  90#define IXGBE_2K_TOO_SMALL_WITH_PADDING \
  91((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K))
  92
  93static inline int ixgbe_compute_pad(int rx_buf_len)
  94{
  95	int page_size, pad_size;
  96
  97	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
  98	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
  99
 100	return pad_size;
 101}
 102
 103static inline int ixgbe_skb_pad(void)
 104{
 105	int rx_buf_len;
 106
 107	/* If a 2K buffer cannot handle a standard Ethernet frame then
 108	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
 109	 *
 110	 * For a 3K buffer we need to add enough padding to allow for
 111	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
 112	 * cache-line alignment.
 113	 */
 114	if (IXGBE_2K_TOO_SMALL_WITH_PADDING)
 115		rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN);
 116	else
 117		rx_buf_len = IXGBE_RXBUFFER_1536;
 118
 119	/* if needed make room for NET_IP_ALIGN */
 120	rx_buf_len -= NET_IP_ALIGN;
 121
 122	return ixgbe_compute_pad(rx_buf_len);
 123}
 124
 125#define IXGBE_SKB_PAD	ixgbe_skb_pad()
 126#else
 127#define IXGBE_SKB_PAD	(NET_SKB_PAD + NET_IP_ALIGN)
 128#endif
 129
 130/*
 131 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
 132 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
 133 * this adds up to 448 bytes of extra data.
 134 *
 135 * Since netdev_alloc_skb now allocates a page fragment we can use a value
 136 * of 256 and the resultant skb will have a truesize of 960 or less.
 137 */
 138#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
 139
 140/* How many Rx Buffers do we bundle into one write to the hardware ? */
 141#define IXGBE_RX_BUFFER_WRITE	16	/* Must be power of 2 */
 142
 143#define IXGBE_RX_DMA_ATTR \
 144	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 145
 146enum ixgbe_tx_flags {
 147	/* cmd_type flags */
 148	IXGBE_TX_FLAGS_HW_VLAN	= 0x01,
 149	IXGBE_TX_FLAGS_TSO	= 0x02,
 150	IXGBE_TX_FLAGS_TSTAMP	= 0x04,
 151
 152	/* olinfo flags */
 153	IXGBE_TX_FLAGS_CC	= 0x08,
 154	IXGBE_TX_FLAGS_IPV4	= 0x10,
 155	IXGBE_TX_FLAGS_CSUM	= 0x20,
 156	IXGBE_TX_FLAGS_IPSEC	= 0x40,
 157
 158	/* software defined flags */
 159	IXGBE_TX_FLAGS_SW_VLAN	= 0x80,
 160	IXGBE_TX_FLAGS_FCOE	= 0x100,
 161};
 162
 163/* VLAN info */
 164#define IXGBE_TX_FLAGS_VLAN_MASK	0xffff0000
 165#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 166#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT  29
 167#define IXGBE_TX_FLAGS_VLAN_SHIFT	16
 168
 169#define IXGBE_MAX_VF_MC_ENTRIES         30
 170#define IXGBE_MAX_VF_FUNCTIONS          64
 171#define IXGBE_MAX_VFTA_ENTRIES          128
 172#define MAX_EMULATION_MAC_ADDRS         16
 173#define IXGBE_MAX_PF_MACVLANS           15
 174#define VMDQ_P(p)   ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
 175#define IXGBE_82599_VF_DEVICE_ID        0x10ED
 176#define IXGBE_X540_VF_DEVICE_ID         0x1515
 177
 178#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter)	\
 179	{							\
 180		u32 current_counter = IXGBE_READ_REG(hw, reg);	\
 181		if (current_counter < last_counter)		\
 182			counter += 0x100000000LL;		\
 183		last_counter = current_counter;			\
 184		counter &= 0xFFFFFFFF00000000LL;		\
 185		counter |= current_counter;			\
 186	}
 187
 188#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
 189	{								 \
 190		u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb);	 \
 191		u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb);	 \
 192		u64 current_counter = (current_counter_msb << 32) |	 \
 193			current_counter_lsb;				 \
 194		if (current_counter < last_counter)			 \
 195			counter += 0x1000000000LL;			 \
 196		last_counter = current_counter;				 \
 197		counter &= 0xFFFFFFF000000000LL;			 \
 198		counter |= current_counter;				 \
 199	}
 200
 201struct vf_stats {
 202	u64 gprc;
 203	u64 gorc;
 204	u64 gptc;
 205	u64 gotc;
 206	u64 mprc;
 207};
 208
 209struct vf_data_storage {
 210	struct pci_dev *vfdev;
 211	unsigned char vf_mac_addresses[ETH_ALEN];
 212	u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
 213	u16 num_vf_mc_hashes;
 214	bool clear_to_send;
 215	struct vf_stats vfstats;
 216	struct vf_stats last_vfstats;
 217	struct vf_stats saved_rst_vfstats;
 218	bool pf_set_mac;
 219	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
 220	u16 pf_qos;
 221	u16 tx_rate;
 222	int link_enable;
 223	int link_state;
 224	u8 spoofchk_enabled;
 225	bool rss_query_enabled;
 226	u8 trusted;
 227	int xcast_mode;
 228	unsigned int vf_api;
 229	u8 primary_abort_count;
 230};
 231
 232enum ixgbevf_xcast_modes {
 233	IXGBEVF_XCAST_MODE_NONE = 0,
 234	IXGBEVF_XCAST_MODE_MULTI,
 235	IXGBEVF_XCAST_MODE_ALLMULTI,
 236	IXGBEVF_XCAST_MODE_PROMISC,
 237};
 238
 239struct vf_macvlans {
 240	struct list_head l;
 241	int vf;
 242	bool free;
 243	bool is_macvlan;
 244	u8 vf_macvlan[ETH_ALEN];
 245};
 246
 247#define IXGBE_MAX_TXD_PWR	14
 248#define IXGBE_MAX_DATA_PER_TXD	(1u << IXGBE_MAX_TXD_PWR)
 249
 250/* Tx Descriptors needed, worst case */
 251#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
 252#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 253
 254/* wrapper around a pointer to a socket buffer,
 255 * so a DMA handle can be stored along with the buffer */
 256struct ixgbe_tx_buffer {
 257	union ixgbe_adv_tx_desc *next_to_watch;
 258	unsigned long time_stamp;
 259	union {
 260		struct sk_buff *skb;
 261		struct xdp_frame *xdpf;
 262	};
 263	unsigned int bytecount;
 264	unsigned short gso_segs;
 265	__be16 protocol;
 266	DEFINE_DMA_UNMAP_ADDR(dma);
 267	DEFINE_DMA_UNMAP_LEN(len);
 268	u32 tx_flags;
 269};
 270
 271struct ixgbe_rx_buffer {
 272	union {
 273		struct {
 274			struct sk_buff *skb;
 275			dma_addr_t dma;
 276			struct page *page;
 277			__u32 page_offset;
 278			__u16 pagecnt_bias;
 279		};
 280		struct {
 281			bool discard;
 282			struct xdp_buff *xdp;
 283		};
 284	};
 285};
 286
 287struct ixgbe_queue_stats {
 288	u64 packets;
 289	u64 bytes;
 290};
 291
 292struct ixgbe_tx_queue_stats {
 293	u64 restart_queue;
 294	u64 tx_busy;
 295	u64 tx_done_old;
 296};
 297
 298struct ixgbe_rx_queue_stats {
 299	u64 rsc_count;
 300	u64 rsc_flush;
 301	u64 non_eop_descs;
 302	u64 alloc_rx_page;
 303	u64 alloc_rx_page_failed;
 304	u64 alloc_rx_buff_failed;
 305	u64 csum_err;
 306};
 307
 308#define IXGBE_TS_HDR_LEN 8
 309
 310enum ixgbe_ring_state_t {
 311	__IXGBE_RX_3K_BUFFER,
 312	__IXGBE_RX_BUILD_SKB_ENABLED,
 313	__IXGBE_RX_RSC_ENABLED,
 314	__IXGBE_RX_CSUM_UDP_ZERO_ERR,
 315	__IXGBE_RX_FCOE,
 316	__IXGBE_TX_FDIR_INIT_DONE,
 317	__IXGBE_TX_XPS_INIT_DONE,
 318	__IXGBE_TX_DETECT_HANG,
 319	__IXGBE_HANG_CHECK_ARMED,
 320	__IXGBE_TX_XDP_RING,
 321	__IXGBE_TX_DISABLED,
 322};
 323
 324#define ring_uses_build_skb(ring) \
 325	test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
 326
 327struct ixgbe_fwd_adapter {
 328	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 329	struct net_device *netdev;
 330	unsigned int tx_base_queue;
 331	unsigned int rx_base_queue;
 332	int pool;
 333};
 334
 335#define check_for_tx_hang(ring) \
 336	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 337#define set_check_for_tx_hang(ring) \
 338	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 339#define clear_check_for_tx_hang(ring) \
 340	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
 341#define ring_is_rsc_enabled(ring) \
 342	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 343#define set_ring_rsc_enabled(ring) \
 344	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 345#define clear_ring_rsc_enabled(ring) \
 346	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 347#define ring_is_xdp(ring) \
 348	test_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 349#define set_ring_xdp(ring) \
 350	set_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 351#define clear_ring_xdp(ring) \
 352	clear_bit(__IXGBE_TX_XDP_RING, &(ring)->state)
 353struct ixgbe_ring {
 354	struct ixgbe_ring *next;	/* pointer to next ring in q_vector */
 355	struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
 356	struct net_device *netdev;	/* netdev ring belongs to */
 357	struct bpf_prog *xdp_prog;
 358	struct device *dev;		/* device for DMA mapping */
 359	void *desc;			/* descriptor ring memory */
 360	union {
 361		struct ixgbe_tx_buffer *tx_buffer_info;
 362		struct ixgbe_rx_buffer *rx_buffer_info;
 363	};
 364	unsigned long state;
 365	u8 __iomem *tail;
 366	dma_addr_t dma;			/* phys. address of descriptor ring */
 367	unsigned int size;		/* length in bytes */
 368
 369	u16 count;			/* amount of descriptors */
 370
 371	u8 queue_index; /* needed for multiqueue queue management */
 372	u8 reg_idx;			/* holds the special value that gets
 373					 * the hardware register offset
 374					 * associated with this ring, which is
 375					 * different for DCB and RSS modes
 376					 */
 377	u16 next_to_use;
 378	u16 next_to_clean;
 379
 380	unsigned long last_rx_timestamp;
 381
 382	union {
 383		u16 next_to_alloc;
 384		struct {
 385			u8 atr_sample_rate;
 386			u8 atr_count;
 387		};
 388	};
 389
 390	u8 dcb_tc;
 391	struct ixgbe_queue_stats stats;
 392	struct u64_stats_sync syncp;
 393	union {
 394		struct ixgbe_tx_queue_stats tx_stats;
 395		struct ixgbe_rx_queue_stats rx_stats;
 396	};
 397	u16 rx_offset;
 398	struct xdp_rxq_info xdp_rxq;
 399	spinlock_t tx_lock;	/* used in XDP mode */
 400	struct xsk_buff_pool *xsk_pool;
 401	u16 ring_idx;		/* {rx,tx,xdp}_ring back reference idx */
 402	u16 rx_buf_len;
 403} ____cacheline_internodealigned_in_smp;
 404
 405enum ixgbe_ring_f_enum {
 406	RING_F_NONE = 0,
 407	RING_F_VMDQ,  /* SR-IOV uses the same ring feature */
 408	RING_F_RSS,
 409	RING_F_FDIR,
 410#ifdef IXGBE_FCOE
 411	RING_F_FCOE,
 412#endif /* IXGBE_FCOE */
 413
 414	RING_F_ARRAY_SIZE      /* must be last in enum set */
 415};
 416
 417#define IXGBE_MAX_RSS_INDICES		16
 418#define IXGBE_MAX_RSS_INDICES_X550	63
 419#define IXGBE_MAX_VMDQ_INDICES		64
 420#define IXGBE_MAX_FDIR_INDICES		63	/* based on q_vector limit */
 421#define IXGBE_MAX_FCOE_INDICES		8
 422#define MAX_RX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
 423#define MAX_TX_QUEUES			(IXGBE_MAX_FDIR_INDICES + 1)
 424#define IXGBE_MAX_XDP_QS		(IXGBE_MAX_FDIR_INDICES + 1)
 425#define IXGBE_MAX_L2A_QUEUES		4
 426#define IXGBE_BAD_L2A_QUEUE		3
 427#define IXGBE_MAX_MACVLANS		63
 428
 429DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
 430
 431struct ixgbe_ring_feature {
 432	u16 limit;	/* upper limit on feature indices */
 433	u16 indices;	/* current value of indices */
 434	u16 mask;	/* Mask used for feature to ring mapping */
 435	u16 offset;	/* offset to start of feature */
 436} ____cacheline_internodealigned_in_smp;
 437
 438#define IXGBE_82599_VMDQ_8Q_MASK 0x78
 439#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
 440#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
 441
 442/*
 443 * FCoE requires that all Rx buffers be over 2200 bytes in length.  Since
 444 * this is twice the size of a half page we need to double the page order
 445 * for FCoE enabled Rx queues.
 446 */
 447static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 448{
 449	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 450		return IXGBE_RXBUFFER_3K;
 451#if (PAGE_SIZE < 8192)
 452	if (ring_uses_build_skb(ring))
 453		return IXGBE_MAX_2K_FRAME_BUILD_SKB;
 454#endif
 455	return IXGBE_RXBUFFER_2K;
 456}
 457
 458static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 459{
 460#if (PAGE_SIZE < 8192)
 461	if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
 462		return 1;
 463#endif
 464	return 0;
 465}
 466#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
 467
 468#define IXGBE_ITR_ADAPTIVE_MIN_INC	2
 469#define IXGBE_ITR_ADAPTIVE_MIN_USECS	10
 470#define IXGBE_ITR_ADAPTIVE_MAX_USECS	126
 471#define IXGBE_ITR_ADAPTIVE_LATENCY	0x80
 472#define IXGBE_ITR_ADAPTIVE_BULK		0x00
 473
 474struct ixgbe_ring_container {
 475	struct ixgbe_ring *ring;	/* pointer to linked list of rings */
 476	unsigned long next_update;	/* jiffies value of last update */
 477	unsigned int total_bytes;	/* total bytes processed this int */
 478	unsigned int total_packets;	/* total packets processed this int */
 479	u16 work_limit;			/* total work allowed per interrupt */
 480	u8 count;			/* total number of rings in vector */
 481	u8 itr;				/* current ITR setting for ring */
 482};
 483
 484/* iterator for handling rings in ring container */
 485#define ixgbe_for_each_ring(pos, head) \
 486	for (pos = (head).ring; pos != NULL; pos = pos->next)
 487
 488#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
 489			      ? 8 : 1)
 490#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
 491
 492/* MAX_Q_VECTORS of these are allocated,
 493 * but we only use one per queue-specific vector.
 494 */
 495struct ixgbe_q_vector {
 496	struct ixgbe_adapter *adapter;
 497#ifdef CONFIG_IXGBE_DCA
 498	int cpu;	    /* CPU for DCA */
 499#endif
 500	u16 v_idx;		/* index of q_vector within array, also used for
 501				 * finding the bit in EICR and friends that
 502				 * represents the vector for this ring */
 503	u16 itr;		/* Interrupt throttle rate written to EITR */
 504	struct ixgbe_ring_container rx, tx;
 505
 506	struct napi_struct napi;
 507	cpumask_t affinity_mask;
 508	int numa_node;
 509	struct rcu_head rcu;	/* to avoid race with update stats on free */
 510	char name[IFNAMSIZ + 9];
 511
 512	/* for dynamic allocation of rings associated with this q_vector */
 513	struct ixgbe_ring ring[] ____cacheline_internodealigned_in_smp;
 514};
 515
 516#ifdef CONFIG_IXGBE_HWMON
 517
 518#define IXGBE_HWMON_TYPE_LOC		0
 519#define IXGBE_HWMON_TYPE_TEMP		1
 520#define IXGBE_HWMON_TYPE_CAUTION	2
 521#define IXGBE_HWMON_TYPE_MAX		3
 522
 523struct hwmon_attr {
 524	struct device_attribute dev_attr;
 525	struct ixgbe_hw *hw;
 526	struct ixgbe_thermal_diode_data *sensor;
 527	char name[12];
 528};
 529
 530struct hwmon_buff {
 531	struct attribute_group group;
 532	const struct attribute_group *groups[2];
 533	struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1];
 534	struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4];
 535	unsigned int n_hwmon;
 536};
 537#endif /* CONFIG_IXGBE_HWMON */
 538
 539/*
 540 * microsecond values for various ITR rates shifted by 2 to fit itr register
 541 * with the first 3 bits reserved 0
 542 */
 543#define IXGBE_MIN_RSC_ITR	24
 544#define IXGBE_100K_ITR		40
 545#define IXGBE_20K_ITR		200
 546#define IXGBE_12K_ITR		336
 547
 548/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
 549static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
 550					const u32 stat_err_bits)
 551{
 552	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
 553}
 554
 555static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
 556{
 557	u16 ntc = ring->next_to_clean;
 558	u16 ntu = ring->next_to_use;
 559
 560	return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 561}
 562
 563#define IXGBE_RX_DESC(R, i)	    \
 564	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 565#define IXGBE_TX_DESC(R, i)	    \
 566	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
 567#define IXGBE_TX_CTXTDESC(R, i)	    \
 568	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
 569
 570#define IXGBE_MAX_JUMBO_FRAME_SIZE	9728 /* Maximum Supported Size 9.5KB */
 571#ifdef IXGBE_FCOE
 572/* Use 3K as the baby jumbo frame size for FCoE */
 573#define IXGBE_FCOE_JUMBO_FRAME_SIZE       3072
 574#endif /* IXGBE_FCOE */
 575
 576#define OTHER_VECTOR 1
 577#define NON_Q_VECTORS (OTHER_VECTOR)
 578
 579#define MAX_MSIX_VECTORS_82599 64
 580#define MAX_Q_VECTORS_82599 64
 581#define MAX_MSIX_VECTORS_82598 18
 582#define MAX_Q_VECTORS_82598 16
 583
 584struct ixgbe_mac_addr {
 585	u8 addr[ETH_ALEN];
 586	u16 pool;
 587	u16 state; /* bitmask */
 588};
 589
 590#define IXGBE_MAC_STATE_DEFAULT		0x1
 591#define IXGBE_MAC_STATE_MODIFIED	0x2
 592#define IXGBE_MAC_STATE_IN_USE		0x4
 593
 594#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
 595#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
 596
 597#define MIN_MSIX_Q_VECTORS 1
 598#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
 599
 600/* default to trying for four seconds */
 601#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
 602#define IXGBE_SFP_POLL_JIFFIES (2 * HZ)	/* SFP poll every 2 seconds */
 603
 604#define IXGBE_PRIMARY_ABORT_LIMIT	5
 605
 606/* board specific private data structure */
 607struct ixgbe_adapter {
 608	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 609	/* OS defined structs */
 610	struct net_device *netdev;
 611	struct bpf_prog *xdp_prog;
 612	struct pci_dev *pdev;
 613	struct mii_bus *mii_bus;
 614
 615	unsigned long state;
 616
 617	/* Some features need tri-state capability,
 618	 * thus the additional *_CAPABLE flags.
 619	 */
 620	u32 flags;
 621#define IXGBE_FLAG_MSI_ENABLED			BIT(1)
 622#define IXGBE_FLAG_MSIX_ENABLED			BIT(3)
 623#define IXGBE_FLAG_RX_1BUF_CAPABLE		BIT(4)
 624#define IXGBE_FLAG_RX_PS_CAPABLE		BIT(5)
 625#define IXGBE_FLAG_RX_PS_ENABLED		BIT(6)
 626#define IXGBE_FLAG_DCA_ENABLED			BIT(8)
 627#define IXGBE_FLAG_DCA_CAPABLE			BIT(9)
 628#define IXGBE_FLAG_IMIR_ENABLED			BIT(10)
 629#define IXGBE_FLAG_MQ_CAPABLE			BIT(11)
 630#define IXGBE_FLAG_DCB_ENABLED			BIT(12)
 631#define IXGBE_FLAG_VMDQ_CAPABLE			BIT(13)
 632#define IXGBE_FLAG_VMDQ_ENABLED			BIT(14)
 633#define IXGBE_FLAG_FAN_FAIL_CAPABLE		BIT(15)
 634#define IXGBE_FLAG_NEED_LINK_UPDATE		BIT(16)
 635#define IXGBE_FLAG_NEED_LINK_CONFIG		BIT(17)
 636#define IXGBE_FLAG_FDIR_HASH_CAPABLE		BIT(18)
 637#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE		BIT(19)
 638#define IXGBE_FLAG_FCOE_CAPABLE			BIT(20)
 639#define IXGBE_FLAG_FCOE_ENABLED			BIT(21)
 640#define IXGBE_FLAG_SRIOV_CAPABLE		BIT(22)
 641#define IXGBE_FLAG_SRIOV_ENABLED		BIT(23)
 642#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED		BIT(25)
 643#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER	BIT(26)
 644#define IXGBE_FLAG_DCB_CAPABLE			BIT(27)
 645
 646	u32 flags2;
 647#define IXGBE_FLAG2_RSC_CAPABLE			BIT(0)
 648#define IXGBE_FLAG2_RSC_ENABLED			BIT(1)
 649#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE		BIT(2)
 650#define IXGBE_FLAG2_TEMP_SENSOR_EVENT		BIT(3)
 651#define IXGBE_FLAG2_SEARCH_FOR_SFP		BIT(4)
 652#define IXGBE_FLAG2_SFP_NEEDS_RESET		BIT(5)
 653#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT	BIT(7)
 654#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP		BIT(8)
 655#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		BIT(9)
 656#define IXGBE_FLAG2_PTP_PPS_ENABLED		BIT(10)
 657#define IXGBE_FLAG2_PHY_INTERRUPT		BIT(11)
 658#define IXGBE_FLAG2_VLAN_PROMISC		BIT(13)
 659#define IXGBE_FLAG2_EEE_CAPABLE			BIT(14)
 660#define IXGBE_FLAG2_EEE_ENABLED			BIT(15)
 661#define IXGBE_FLAG2_RX_LEGACY			BIT(16)
 662#define IXGBE_FLAG2_IPSEC_ENABLED		BIT(17)
 663#define IXGBE_FLAG2_VF_IPSEC_ENABLED		BIT(18)
 664#define IXGBE_FLAG2_AUTO_DISABLE_VF		BIT(19)
 665
 666	/* Tx fast path data */
 667	int num_tx_queues;
 668	u16 tx_itr_setting;
 669	u16 tx_work_limit;
 670	u64 tx_ipsec;
 671
 672	/* Rx fast path data */
 673	int num_rx_queues;
 674	u16 rx_itr_setting;
 675	u64 rx_ipsec;
 676
 677	/* Port number used to identify VXLAN traffic */
 678	__be16 vxlan_port;
 679	__be16 geneve_port;
 680
 681	/* XDP */
 682	int num_xdp_queues;
 683	struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
 684	unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
 685
 686	/* TX */
 687	struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
 688
 689	u64 restart_queue;
 690	u64 lsc_int;
 691	u32 tx_timeout_count;
 692
 693	/* RX */
 694	struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
 695	int num_rx_pools;		/* == num_rx_queues in 82598 */
 696	int num_rx_queues_per_pool;	/* 1 if 82598, can be many if 82599 */
 697	u64 hw_csum_rx_error;
 698	u64 hw_rx_no_dma_resources;
 699	u64 rsc_total_count;
 700	u64 rsc_total_flush;
 701	u64 non_eop_descs;
 702	u32 alloc_rx_page;
 703	u32 alloc_rx_page_failed;
 704	u32 alloc_rx_buff_failed;
 705
 706	struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
 707
 708	/* DCB parameters */
 709	struct ieee_pfc *ixgbe_ieee_pfc;
 710	struct ieee_ets *ixgbe_ieee_ets;
 711	struct ixgbe_dcb_config dcb_cfg;
 712	struct ixgbe_dcb_config temp_dcb_cfg;
 713	u8 hw_tcs;
 714	u8 dcb_set_bitmap;
 715	u8 dcbx_cap;
 716	enum ixgbe_fc_mode last_lfc_mode;
 717
 718	int num_q_vectors;	/* current number of q_vectors for device */
 719	int max_q_vectors;	/* true count of q_vectors for device */
 720	struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
 721	struct msix_entry *msix_entries;
 722
 723	u32 test_icr;
 724	struct ixgbe_ring test_tx_ring;
 725	struct ixgbe_ring test_rx_ring;
 726
 727	/* structs defined in ixgbe_hw.h */
 728	struct ixgbe_hw hw;
 729	u16 msg_enable;
 730	struct ixgbe_hw_stats stats;
 731
 732	u64 tx_busy;
 733	unsigned int tx_ring_count;
 734	unsigned int xdp_ring_count;
 735	unsigned int rx_ring_count;
 736
 737	u32 link_speed;
 738	bool link_up;
 739	unsigned long sfp_poll_time;
 740	unsigned long link_check_timeout;
 741
 742	struct timer_list service_timer;
 743	struct work_struct service_task;
 744
 745	struct hlist_head fdir_filter_list;
 746	unsigned long fdir_overflow; /* number of times ATR was backed off */
 747	union ixgbe_atr_input fdir_mask;
 748	int fdir_filter_count;
 749	u32 fdir_pballoc;
 750	u32 atr_sample_rate;
 751	spinlock_t fdir_perfect_lock;
 752
 753#ifdef IXGBE_FCOE
 754	struct ixgbe_fcoe fcoe;
 755#endif /* IXGBE_FCOE */
 756	u8 __iomem *io_addr; /* Mainly for iounmap use */
 757	u32 wol;
 758
 759	u16 bridge_mode;
 760
 761	char eeprom_id[NVM_VER_SIZE];
 762	u16 eeprom_cap;
 763
 764	u32 interrupt_event;
 765	u32 led_reg;
 766
 767	struct ptp_clock *ptp_clock;
 768	struct ptp_clock_info ptp_caps;
 769	struct work_struct ptp_tx_work;
 770	struct sk_buff *ptp_tx_skb;
 771	struct hwtstamp_config tstamp_config;
 772	unsigned long ptp_tx_start;
 773	unsigned long last_overflow_check;
 774	unsigned long last_rx_ptp_check;
 775	unsigned long last_rx_timestamp;
 776	spinlock_t tmreg_lock;
 777	struct cyclecounter hw_cc;
 778	struct timecounter hw_tc;
 779	u32 base_incval;
 780	u32 tx_hwtstamp_timeouts;
 781	u32 tx_hwtstamp_skipped;
 782	u32 rx_hwtstamp_cleared;
 783	void (*ptp_setup_sdp)(struct ixgbe_adapter *);
 784
 785	/* SR-IOV */
 786	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
 787	unsigned int num_vfs;
 788	struct vf_data_storage *vfinfo;
 789	int vf_rate_link_speed;
 790	struct vf_macvlans vf_mvs;
 791	struct vf_macvlans *mv_list;
 792
 793	u32 timer_event_accumulator;
 794	u32 vferr_refcount;
 795	struct ixgbe_mac_addr *mac_table;
 796	struct kobject *info_kobj;
 797#ifdef CONFIG_IXGBE_HWMON
 798	struct hwmon_buff *ixgbe_hwmon_buff;
 799#endif /* CONFIG_IXGBE_HWMON */
 800#ifdef CONFIG_DEBUG_FS
 801	struct dentry *ixgbe_dbg_adapter;
 802#endif /*CONFIG_DEBUG_FS*/
 803
 804	u8 default_up;
 805	/* Bitmask indicating in use pools */
 806	DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1);
 807
 808#define IXGBE_MAX_LINK_HANDLE 10
 809	struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
 810	unsigned long tables;
 811
 812/* maximum number of RETA entries among all devices supported by ixgbe
 813 * driver: currently it's x550 device in non-SRIOV mode
 814 */
 815#define IXGBE_MAX_RETA_ENTRIES 512
 816	u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES];
 817
 818#define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
 819	u32 *rss_key;
 820
 821#ifdef CONFIG_IXGBE_IPSEC
 822	struct ixgbe_ipsec *ipsec;
 823#endif /* CONFIG_IXGBE_IPSEC */
 824	spinlock_t vfs_lock;
 825};
 826
 827static inline int ixgbe_determine_xdp_q_idx(int cpu)
 828{
 829	if (static_key_enabled(&ixgbe_xdp_locking_key))
 830		return cpu % IXGBE_MAX_XDP_QS;
 831	else
 832		return cpu;
 833}
 834
 835static inline
 836struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
 837{
 838	int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
 839
 840	return adapter->xdp_ring[index];
 841}
 842
 843static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
 844{
 845	switch (adapter->hw.mac.type) {
 846	case ixgbe_mac_82598EB:
 847	case ixgbe_mac_82599EB:
 848	case ixgbe_mac_X540:
 849		return IXGBE_MAX_RSS_INDICES;
 850	case ixgbe_mac_X550:
 851	case ixgbe_mac_X550EM_x:
 852	case ixgbe_mac_x550em_a:
 853		return IXGBE_MAX_RSS_INDICES_X550;
 854	default:
 855		return 0;
 856	}
 857}
 858
 859struct ixgbe_fdir_filter {
 860	struct hlist_node fdir_node;
 861	union ixgbe_atr_input filter;
 862	u16 sw_idx;
 863	u64 action;
 864};
 865
 866enum ixgbe_state_t {
 867	__IXGBE_TESTING,
 868	__IXGBE_RESETTING,
 869	__IXGBE_DOWN,
 870	__IXGBE_DISABLED,
 871	__IXGBE_REMOVING,
 872	__IXGBE_SERVICE_SCHED,
 873	__IXGBE_SERVICE_INITED,
 874	__IXGBE_IN_SFP_INIT,
 875	__IXGBE_PTP_RUNNING,
 876	__IXGBE_PTP_TX_IN_PROGRESS,
 877	__IXGBE_RESET_REQUESTED,
 878};
 879
 880struct ixgbe_cb {
 881	union {				/* Union defining head/tail partner */
 882		struct sk_buff *head;
 883		struct sk_buff *tail;
 884	};
 885	dma_addr_t dma;
 886	u16 append_cnt;
 887	bool page_released;
 888};
 889#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
 890
 891enum ixgbe_boards {
 892	board_82598,
 893	board_82599,
 894	board_X540,
 895	board_X550,
 896	board_X550EM_x,
 897	board_x550em_x_fw,
 898	board_x550em_a,
 899	board_x550em_a_fw,
 900};
 901
 902extern const struct ixgbe_info ixgbe_82598_info;
 903extern const struct ixgbe_info ixgbe_82599_info;
 904extern const struct ixgbe_info ixgbe_X540_info;
 905extern const struct ixgbe_info ixgbe_X550_info;
 906extern const struct ixgbe_info ixgbe_X550EM_x_info;
 907extern const struct ixgbe_info ixgbe_x550em_x_fw_info;
 908extern const struct ixgbe_info ixgbe_x550em_a_info;
 909extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
 910#ifdef CONFIG_IXGBE_DCB
 911extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
 912#endif
 913
 914extern char ixgbe_driver_name[];
 915#ifdef IXGBE_FCOE
 916extern char ixgbe_default_device_descr[];
 917#endif /* IXGBE_FCOE */
 918
 919int ixgbe_open(struct net_device *netdev);
 920int ixgbe_close(struct net_device *netdev);
 921void ixgbe_up(struct ixgbe_adapter *adapter);
 922void ixgbe_down(struct ixgbe_adapter *adapter);
 923void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 924void ixgbe_reset(struct ixgbe_adapter *adapter);
 925void ixgbe_set_ethtool_ops(struct net_device *netdev);
 926int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
 927int ixgbe_setup_tx_resources(struct ixgbe_ring *);
 928void ixgbe_free_rx_resources(struct ixgbe_ring *);
 929void ixgbe_free_tx_resources(struct ixgbe_ring *);
 930void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 931void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
 932void ixgbe_disable_rx(struct ixgbe_adapter *adapter);
 933void ixgbe_disable_tx(struct ixgbe_adapter *adapter);
 934void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 935int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 936bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
 937			 u16 subdevice_id);
 938#ifdef CONFIG_PCI_IOV
 939void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
 940#endif
 941int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
 942			 const u8 *addr, u16 queue);
 943int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
 944			 const u8 *addr, u16 queue);
 945void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
 946void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 947netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
 948				  struct ixgbe_ring *);
 949void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
 950				      struct ixgbe_tx_buffer *);
 951void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 952void ixgbe_write_eitr(struct ixgbe_q_vector *);
 953int ixgbe_poll(struct napi_struct *napi, int budget);
 954int ethtool_ioctl(struct ifreq *ifr);
 955s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 956s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 957s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 958s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 959					  union ixgbe_atr_hash_dword input,
 960					  union ixgbe_atr_hash_dword common,
 961					  u8 queue);
 962s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
 963				    union ixgbe_atr_input *input_mask);
 964s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
 965					  union ixgbe_atr_input *input,
 966					  u16 soft_id, u8 queue);
 967s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
 968					  union ixgbe_atr_input *input,
 969					  u16 soft_id);
 970void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 971					  union ixgbe_atr_input *mask);
 972int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
 973				    struct ixgbe_fdir_filter *input,
 974				    u16 sw_idx);
 975void ixgbe_set_rx_mode(struct net_device *netdev);
 976#ifdef CONFIG_IXGBE_DCB
 977void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 978#endif
 979int ixgbe_setup_tc(struct net_device *dev, u8 tc);
 980void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
 981void ixgbe_do_reset(struct net_device *netdev);
 982#ifdef CONFIG_IXGBE_HWMON
 983void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
 984int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 985#endif /* CONFIG_IXGBE_HWMON */
 986#ifdef IXGBE_FCOE
 987void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 988int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
 989	      u8 *hdr_len);
 990int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
 991		   union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
 992int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 993		       struct scatterlist *sgl, unsigned int sgc);
 994int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
 995			  struct scatterlist *sgl, unsigned int sgc);
 996int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
 997int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 998void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 999int ixgbe_fcoe_enable(struct net_device *netdev);
1000int ixgbe_fcoe_disable(struct net_device *netdev);
1001#ifdef CONFIG_IXGBE_DCB
1002u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
1003u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
1004#endif /* CONFIG_IXGBE_DCB */
1005int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
1006int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
1007			   struct netdev_fcoe_hbainfo *info);
1008u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
1009#endif /* IXGBE_FCOE */
1010#ifdef CONFIG_DEBUG_FS
1011void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
1012void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
1013void ixgbe_dbg_init(void);
1014void ixgbe_dbg_exit(void);
1015#else
1016static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
1017static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
1018static inline void ixgbe_dbg_init(void) {}
1019static inline void ixgbe_dbg_exit(void) {}
1020#endif /* CONFIG_DEBUG_FS */
1021static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
1022{
1023	return netdev_get_tx_queue(ring->netdev, ring->queue_index);
1024}
1025
1026void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
1027void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
1028void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
1029void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
1030void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
1031void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter);
1032void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *, struct sk_buff *);
1033void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *, struct sk_buff *skb);
1034static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
1035					 union ixgbe_adv_rx_desc *rx_desc,
1036					 struct sk_buff *skb)
1037{
1038	if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) {
1039		ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb);
1040		return;
1041	}
1042
1043	if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
1044		return;
1045
1046	ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
1047
1048	/* Update the last_rx_timestamp timer in order to enable watchdog check
1049	 * for error case of latched timestamp on a dropped packet.
1050	 */
1051	rx_ring->last_rx_timestamp = jiffies;
1052}
1053
1054int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1055int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
1056void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
1057void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
1058void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter);
1059#ifdef CONFIG_PCI_IOV
1060void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
1061#endif
1062
1063netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
1064				  struct ixgbe_adapter *adapter,
1065				  struct ixgbe_ring *tx_ring);
1066u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
1067void ixgbe_store_key(struct ixgbe_adapter *adapter);
1068void ixgbe_store_reta(struct ixgbe_adapter *adapter);
1069s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
1070		       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
1071#ifdef CONFIG_IXGBE_IPSEC
1072void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
1073void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
1074void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
1075void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1076		    union ixgbe_adv_rx_desc *rx_desc,
1077		    struct sk_buff *skb);
1078int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
1079		   struct ixgbe_ipsec_tx_data *itd);
1080void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf);
1081int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1082int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf);
1083#else
1084static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }
1085static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }
1086static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }
1087static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
1088				  union ixgbe_adv_rx_desc *rx_desc,
1089				  struct sk_buff *skb) { }
1090static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
1091				 struct ixgbe_tx_buffer *first,
1092				 struct ixgbe_ipsec_tx_data *itd) { return 0; }
1093static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter,
1094					u32 vf) { }
1095static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
1096					u32 *mbuf, u32 vf) { return -EACCES; }
1097static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
1098					u32 *mbuf, u32 vf) { return -EACCES; }
1099#endif /* CONFIG_IXGBE_IPSEC */
1100
1101static inline bool ixgbe_enabled_xdp_adapter(struct ixgbe_adapter *adapter)
1102{
1103	return !!adapter->xdp_prog;
1104}
1105
1106#endif /* _IXGBE_H_ */