Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright (c) 2018, Intel Corporation. */
  3
  4#ifndef _ICE_TXRX_H_
  5#define _ICE_TXRX_H_
  6
 
 
  7#define ICE_DFLT_IRQ_WORK	256
 
  8#define ICE_RXBUF_2048		2048
 
  9#define ICE_MAX_CHAINED_RX_BUFS	5
 10#define ICE_MAX_BUF_TXD		8
 11#define ICE_MIN_TX_LEN		17
 12
 13/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 14 * In order to align with the read requests we will align the value to
 15 * the nearest 4K which represents our maximum read request size.
 16 */
 17#define ICE_MAX_READ_REQ_SIZE	4096
 18#define ICE_MAX_DATA_PER_TXD	(16 * 1024 - 1)
 19#define ICE_MAX_DATA_PER_TXD_ALIGNED \
 20	(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
 21
 22#define ICE_RX_BUF_WRITE	16	/* Must be power of 2 */
 23#define ICE_MAX_TXQ_PER_TXQG	128
 24
 25/* Tx Descriptors needed, worst case */
 26#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27#define ICE_DESC_UNUSED(R)	\
 28	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
 29	(R)->next_to_clean - (R)->next_to_use - 1)
 30
 31#define ICE_TX_FLAGS_TSO	BIT(0)
 32#define ICE_TX_FLAGS_HW_VLAN	BIT(1)
 33#define ICE_TX_FLAGS_SW_VLAN	BIT(2)
 
 
 
 
 
 
 
 
 34#define ICE_TX_FLAGS_VLAN_M	0xffff0000
 
 
 35#define ICE_TX_FLAGS_VLAN_S	16
 36
 
 
 
 
 
 
 
 
 
 
 
 
 37struct ice_tx_buf {
 38	struct ice_tx_desc *next_to_watch;
 39	struct sk_buff *skb;
 
 
 
 40	unsigned int bytecount;
 41	unsigned short gso_segs;
 42	u32 tx_flags;
 43	DEFINE_DMA_UNMAP_ADDR(dma);
 44	DEFINE_DMA_UNMAP_LEN(len);
 
 45};
 46
 47struct ice_tx_offload_params {
 48	u8 header_len;
 
 49	u32 td_cmd;
 50	u32 td_offset;
 51	u32 td_l2tag1;
 52	u16 cd_l2tag2;
 53	u32 cd_tunnel_params;
 54	u64 cd_qw1;
 55	struct ice_ring *tx_ring;
 56};
 57
 58struct ice_rx_buf {
 59	struct sk_buff *skb;
 60	dma_addr_t dma;
 61	struct page *page;
 62	unsigned int page_offset;
 
 
 
 
 
 
 
 63};
 64
 65struct ice_q_stats {
 66	u64 pkts;
 67	u64 bytes;
 68};
 69
 70struct ice_txq_stats {
 71	u64 restart_q;
 72	u64 tx_busy;
 73	u64 tx_linearize;
 
 74};
 75
 76struct ice_rxq_stats {
 77	u64 non_eop_descs;
 78	u64 alloc_page_failed;
 79	u64 alloc_buf_failed;
 80	u64 page_reuse_count;
 
 
 
 
 81};
 82
 83/* this enum matches hardware bits and is meant to be used by DYN_CTLN
 84 * registers and QINT registers or more generally anywhere in the manual
 85 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
 86 * register but instead is a special value meaning "don't update" ITR0/1/2.
 87 */
 88enum ice_dyn_idx_t {
 89	ICE_IDX_ITR0 = 0,
 90	ICE_IDX_ITR1 = 1,
 91	ICE_IDX_ITR2 = 2,
 92	ICE_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
 93};
 94
 95/* Header split modes defined by DTYPE field of Rx RLAN context */
 96enum ice_rx_dtype {
 97	ICE_RX_DTYPE_NO_SPLIT		= 0,
 98	ICE_RX_DTYPE_HEADER_SPLIT	= 1,
 99	ICE_RX_DTYPE_SPLIT_ALWAYS	= 2,
100};
101
102/* indices into GLINT_ITR registers */
103#define ICE_RX_ITR	ICE_IDX_ITR0
104#define ICE_TX_ITR	ICE_IDX_ITR1
105#define ICE_ITR_DYNAMIC	0x8000  /* use top bit as a flag */
106#define ICE_ITR_8K	0x003E
 
 
 
 
 
 
 
107
108/* apply ITR HW granularity translation to program the HW registers */
109#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> (itr_gran))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
111/* Legacy or Advanced Mode Queue */
112#define ICE_TX_ADVANCED	0
113#define ICE_TX_LEGACY	1
114
115/* descriptor ring, associated with a VSI */
116struct ice_ring {
 
117	struct ice_ring *next;		/* pointer to next ring in q_vector */
118	void *desc;			/* Descriptor ring memory */
119	struct device *dev;		/* Used for DMA mapping */
120	struct net_device *netdev;	/* netdev ring maps to */
121	struct ice_vsi *vsi;		/* Backreference to associated VSI */
122	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
123	u8 __iomem *tail;
124	union {
125		struct ice_tx_buf *tx_buf;
126		struct ice_rx_buf *rx_buf;
127	};
 
128	u16 q_index;			/* Queue number of ring */
129	u32 txq_teid;			/* Added Tx queue TEID */
130
131	/* high bit set means dynamic, use accessor routines to read/write.
132	 * hardware supports 2us/1us resolution for the ITR registers.
133	 * these values always store the USER setting, and must be converted
134	 * before programming to a register.
135	 */
136	u16 rx_itr_setting;
137	u16 tx_itr_setting;
138
139	u16 count;			/* Number of descriptors */
140	u16 reg_idx;			/* HW register index of the ring */
141
142	/* used in interrupt processing */
143	u16 next_to_use;
144	u16 next_to_clean;
145
146	bool ring_active;		/* is ring online or not */
147
148	/* stats structs */
149	struct ice_q_stats	stats;
150	struct u64_stats_sync syncp;
151	union {
152		struct ice_txq_stats tx_stats;
153		struct ice_rxq_stats rx_stats;
154	};
155
156	unsigned int size;		/* length of descriptor ring in bytes */
157	dma_addr_t dma;			/* physical address of ring */
158	struct rcu_head rcu;		/* to avoid race on free */
159	u16 next_to_alloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160} ____cacheline_internodealigned_in_smp;
161
162enum ice_latency_range {
163	ICE_LOWEST_LATENCY = 0,
164	ICE_LOW_LATENCY = 1,
165	ICE_BULK_LATENCY = 2,
166	ICE_ULTRA_LATENCY = 3,
167};
 
 
 
 
 
 
 
 
 
 
 
 
 
168
169struct ice_ring_container {
170	/* array of pointers to rings */
171	struct ice_ring *ring;
172	unsigned int total_bytes;	/* total bytes processed this int */
173	unsigned int total_pkts;	/* total packets processed this int */
174	enum ice_latency_range latency_range;
175	u16 itr;
 
 
 
 
 
 
 
 
 
 
 
 
176};
177
178/* iterator for handling rings in ring container */
179#define ice_for_each_ring(pos, head) \
180	for (pos = (head).ring; pos; pos = pos->next)
181
 
 
 
 
 
 
 
 
 
 
 
 
 
182bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
183netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
184void ice_clean_tx_ring(struct ice_ring *tx_ring);
185void ice_clean_rx_ring(struct ice_ring *rx_ring);
186int ice_setup_tx_ring(struct ice_ring *tx_ring);
187int ice_setup_rx_ring(struct ice_ring *rx_ring);
188void ice_free_tx_ring(struct ice_ring *tx_ring);
189void ice_free_rx_ring(struct ice_ring *rx_ring);
190int ice_napi_poll(struct napi_struct *napi, int budget);
191
 
 
 
 
192#endif /* _ICE_TXRX_H_ */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright (c) 2018, Intel Corporation. */
  3
  4#ifndef _ICE_TXRX_H_
  5#define _ICE_TXRX_H_
  6
  7#include "ice_type.h"
  8
  9#define ICE_DFLT_IRQ_WORK	256
 10#define ICE_RXBUF_3072		3072
 11#define ICE_RXBUF_2048		2048
 12#define ICE_RXBUF_1536		1536
 13#define ICE_MAX_CHAINED_RX_BUFS	5
 14#define ICE_MAX_BUF_TXD		8
 15#define ICE_MIN_TX_LEN		17
 16
 17/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 18 * In order to align with the read requests we will align the value to
 19 * the nearest 4K which represents our maximum read request size.
 20 */
 21#define ICE_MAX_READ_REQ_SIZE	4096
 22#define ICE_MAX_DATA_PER_TXD	(16 * 1024 - 1)
 23#define ICE_MAX_DATA_PER_TXD_ALIGNED \
 24	(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
 25
 26#define ICE_RX_BUF_WRITE	16	/* Must be power of 2 */
 27#define ICE_MAX_TXQ_PER_TXQG	128
 28
 29/* Attempt to maximize the headroom available for incoming frames. We use a 2K
 30 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
 31 * This leaves us with 512 bytes of room.  From that we need to deduct the
 32 * space needed for the shared info and the padding needed to IP align the
 33 * frame.
 34 *
 35 * Note: For cache line sizes 256 or larger this value is going to end
 36 *	 up negative.  In these cases we should fall back to the legacy
 37 *	 receive path.
 38 */
 39#if (PAGE_SIZE < 8192)
 40#define ICE_2K_TOO_SMALL_WITH_PADDING \
 41	((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
 42			SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
 43
 44/**
 45 * ice_compute_pad - compute the padding
 46 * @rx_buf_len: buffer length
 47 *
 48 * Figure out the size of half page based on given buffer length and
 49 * then subtract the skb_shared_info followed by subtraction of the
 50 * actual buffer length; this in turn results in the actual space that
 51 * is left for padding usage
 52 */
 53static inline int ice_compute_pad(int rx_buf_len)
 54{
 55	int half_page_size;
 56
 57	half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
 58	return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
 59}
 60
 61/**
 62 * ice_skb_pad - determine the padding that we can supply
 63 *
 64 * Figure out the right Rx buffer size and based on that calculate the
 65 * padding
 66 */
 67static inline int ice_skb_pad(void)
 68{
 69	int rx_buf_len;
 70
 71	/* If a 2K buffer cannot handle a standard Ethernet frame then
 72	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
 73	 *
 74	 * For a 3K buffer we need to add enough padding to allow for
 75	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
 76	 * cache-line alignment.
 77	 */
 78	if (ICE_2K_TOO_SMALL_WITH_PADDING)
 79		rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
 80	else
 81		rx_buf_len = ICE_RXBUF_1536;
 82
 83	/* if needed make room for NET_IP_ALIGN */
 84	rx_buf_len -= NET_IP_ALIGN;
 85
 86	return ice_compute_pad(rx_buf_len);
 87}
 88
 89#define ICE_SKB_PAD ice_skb_pad()
 90#else
 91#define ICE_2K_TOO_SMALL_WITH_PADDING false
 92#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
 93#endif
 94
 95/* We are assuming that the cache line is always 64 Bytes here for ice.
 96 * In order to make sure that is a correct assumption there is a check in probe
 97 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
 98 * size is 128 bytes. We do it this way because we do not want to read the
 99 * GLPCI_CNF2 register or a variable containing the value on every pass through
100 * the Tx path.
101 */
102#define ICE_CACHE_LINE_BYTES		64
103#define ICE_DESCS_PER_CACHE_LINE	(ICE_CACHE_LINE_BYTES / \
104					 sizeof(struct ice_tx_desc))
105#define ICE_DESCS_FOR_CTX_DESC		1
106#define ICE_DESCS_FOR_SKB_DATA_PTR	1
107/* Tx descriptors needed, worst case */
108#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
109		     ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
110#define ICE_DESC_UNUSED(R)	\
111	(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
112	      (R)->next_to_clean - (R)->next_to_use - 1)
113
114#define ICE_TX_FLAGS_TSO	BIT(0)
115#define ICE_TX_FLAGS_HW_VLAN	BIT(1)
116#define ICE_TX_FLAGS_SW_VLAN	BIT(2)
117/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
118 * freed instead of returned like skb packets.
119 */
120#define ICE_TX_FLAGS_DUMMY_PKT	BIT(3)
121#define ICE_TX_FLAGS_TSYN	BIT(4)
122#define ICE_TX_FLAGS_IPV4	BIT(5)
123#define ICE_TX_FLAGS_IPV6	BIT(6)
124#define ICE_TX_FLAGS_TUNNEL	BIT(7)
125#define ICE_TX_FLAGS_VLAN_M	0xffff0000
126#define ICE_TX_FLAGS_VLAN_PR_M	0xe0000000
127#define ICE_TX_FLAGS_VLAN_PR_S	29
128#define ICE_TX_FLAGS_VLAN_S	16
129
130#define ICE_XDP_PASS		0
131#define ICE_XDP_CONSUMED	BIT(0)
132#define ICE_XDP_TX		BIT(1)
133#define ICE_XDP_REDIR		BIT(2)
134
135#define ICE_RX_DMA_ATTR \
136	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
137
138#define ICE_ETH_PKT_HDR_PAD	(ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
139
140#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
141
142struct ice_tx_buf {
143	struct ice_tx_desc *next_to_watch;
144	union {
145		struct sk_buff *skb;
146		void *raw_buf; /* used for XDP */
147	};
148	unsigned int bytecount;
149	unsigned short gso_segs;
150	u32 tx_flags;
 
151	DEFINE_DMA_UNMAP_LEN(len);
152	DEFINE_DMA_UNMAP_ADDR(dma);
153};
154
155struct ice_tx_offload_params {
156	u64 cd_qw1;
157	struct ice_ring *tx_ring;
158	u32 td_cmd;
159	u32 td_offset;
160	u32 td_l2tag1;
 
161	u32 cd_tunnel_params;
162	u16 cd_l2tag2;
163	u8 header_len;
164};
165
166struct ice_rx_buf {
167	union {
168		struct {
169			dma_addr_t dma;
170			struct page *page;
171			unsigned int page_offset;
172			u16 pagecnt_bias;
173		};
174		struct {
175			struct xdp_buff *xdp;
176		};
177	};
178};
179
180struct ice_q_stats {
181	u64 pkts;
182	u64 bytes;
183};
184
185struct ice_txq_stats {
186	u64 restart_q;
187	u64 tx_busy;
188	u64 tx_linearize;
189	int prev_pkt; /* negative if no pending Tx descriptors */
190};
191
192struct ice_rxq_stats {
193	u64 non_eop_descs;
194	u64 alloc_page_failed;
195	u64 alloc_buf_failed;
196};
197
198enum ice_ring_state_t {
199	ICE_TX_XPS_INIT_DONE,
200	ICE_TX_NBITS,
201};
202
203/* this enum matches hardware bits and is meant to be used by DYN_CTLN
204 * registers and QINT registers or more generally anywhere in the manual
205 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
206 * register but instead is a special value meaning "don't update" ITR0/1/2.
207 */
208enum ice_dyn_idx_t {
209	ICE_IDX_ITR0 = 0,
210	ICE_IDX_ITR1 = 1,
211	ICE_IDX_ITR2 = 2,
212	ICE_ITR_NONE = 3	/* ITR_NONE must not be used as an index */
213};
214
215/* Header split modes defined by DTYPE field of Rx RLAN context */
216enum ice_rx_dtype {
217	ICE_RX_DTYPE_NO_SPLIT		= 0,
218	ICE_RX_DTYPE_HEADER_SPLIT	= 1,
219	ICE_RX_DTYPE_SPLIT_ALWAYS	= 2,
220};
221
222/* indices into GLINT_ITR registers */
223#define ICE_RX_ITR	ICE_IDX_ITR0
224#define ICE_TX_ITR	ICE_IDX_ITR1
225#define ICE_ITR_8K	124
226#define ICE_ITR_20K	50
227#define ICE_ITR_MAX	8160 /* 0x1FE0 */
228#define ICE_DFLT_TX_ITR	ICE_ITR_20K
229#define ICE_DFLT_RX_ITR	ICE_ITR_20K
230enum ice_dynamic_itr {
231	ITR_STATIC = 0,
232	ITR_DYNAMIC = 1
233};
234
235#define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
236#define ICE_ITR_GRAN_S		1	/* ITR granularity is always 2us */
237#define ICE_ITR_GRAN_US		BIT(ICE_ITR_GRAN_S)
238#define ICE_ITR_MASK		0x1FFE	/* ITR register value alignment mask */
239#define ITR_REG_ALIGN(setting)	((setting) & ICE_ITR_MASK)
240
241#define ICE_DFLT_INTRL	0
242#define ICE_MAX_INTRL	236
243
244#define ICE_IN_WB_ON_ITR_MODE	255
245/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
246 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
247 * set the write-back latency to the usecs passed in.
248 */
249#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx)	\
250	((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
251	  GLINT_DYN_CTL_INTERVAL_M) | \
252	 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
253	  GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
254	 GLINT_DYN_CTL_WB_ON_ITR_M)
255
256/* Legacy or Advanced Mode Queue */
257#define ICE_TX_ADVANCED	0
258#define ICE_TX_LEGACY	1
259
260/* descriptor ring, associated with a VSI */
261struct ice_ring {
262	/* CL1 - 1st cacheline starts here */
263	struct ice_ring *next;		/* pointer to next ring in q_vector */
264	void *desc;			/* Descriptor ring memory */
265	struct device *dev;		/* Used for DMA mapping */
266	struct net_device *netdev;	/* netdev ring maps to */
267	struct ice_vsi *vsi;		/* Backreference to associated VSI */
268	struct ice_q_vector *q_vector;	/* Backreference to associated vector */
269	u8 __iomem *tail;
270	union {
271		struct ice_tx_buf *tx_buf;
272		struct ice_rx_buf *rx_buf;
273	};
274	/* CL2 - 2nd cacheline starts here */
275	u16 q_index;			/* Queue number of ring */
276	u16 q_handle;			/* Queue handle per TC */
277
278	u8 ring_active:1;		/* is ring online or not */
 
 
 
 
 
 
279
280	u16 count;			/* Number of descriptors */
281	u16 reg_idx;			/* HW register index of the ring */
282
283	/* used in interrupt processing */
284	u16 next_to_use;
285	u16 next_to_clean;
286	u16 next_to_alloc;
 
287
288	/* stats structs */
289	struct ice_q_stats	stats;
290	struct u64_stats_sync syncp;
291	union {
292		struct ice_txq_stats tx_stats;
293		struct ice_rxq_stats rx_stats;
294	};
295
 
 
296	struct rcu_head rcu;		/* to avoid race on free */
297	DECLARE_BITMAP(xps_state, ICE_TX_NBITS);	/* XPS Config State */
298	struct bpf_prog *xdp_prog;
299	struct xsk_buff_pool *xsk_pool;
300	u16 rx_offset;
301	/* CL3 - 3rd cacheline starts here */
302	struct xdp_rxq_info xdp_rxq;
303	struct sk_buff *skb;
304	/* CLX - the below items are only accessed infrequently and should be
305	 * in their own cache line if possible
306	 */
307#define ICE_TX_FLAGS_RING_XDP		BIT(0)
308#define ICE_RX_FLAGS_RING_BUILD_SKB	BIT(1)
309	u8 flags;
310	dma_addr_t dma;			/* physical address of ring */
311	unsigned int size;		/* length of descriptor ring in bytes */
312	u32 txq_teid;			/* Added Tx queue TEID */
313	u16 rx_buf_len;
314	u8 dcb_tc;			/* Traffic class of ring */
315	struct ice_ptp_tx *tx_tstamps;
316	u64 cached_phctime;
317	u8 ptp_rx:1;
318	u8 ptp_tx:1;
319} ____cacheline_internodealigned_in_smp;
320
321static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
322{
323	return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
324}
325
326static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
327{
328	ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
329}
330
331static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
332{
333	ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
334}
335
336static inline bool ice_ring_is_xdp(struct ice_ring *ring)
337{
338	return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
339}
340
341struct ice_ring_container {
342	/* head of linked-list of rings */
343	struct ice_ring *ring;
344	struct dim dim;		/* data for net_dim algorithm */
345	u16 itr_idx;		/* index in the interrupt vector */
346	/* this matches the maximum number of ITR bits, but in usec
347	 * values, so it is shifted left one bit (bit zero is ignored)
348	 */
349	u16 itr_setting:13;
350	u16 itr_reserved:2;
351	u16 itr_mode:1;
352};
353
354struct ice_coalesce_stored {
355	u16 itr_tx;
356	u16 itr_rx;
357	u8 intrl;
358	u8 tx_valid;
359	u8 rx_valid;
360};
361
362/* iterator for handling rings in ring container */
363#define ice_for_each_ring(pos, head) \
364	for (pos = (head).ring; pos; pos = pos->next)
365
366static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
367{
368#if (PAGE_SIZE < 8192)
369	if (ring->rx_buf_len > (PAGE_SIZE / 2))
370		return 1;
371#endif
372	return 0;
373}
374
375#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
376
377union ice_32b_rx_flex_desc;
378
379bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
380netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
381void ice_clean_tx_ring(struct ice_ring *tx_ring);
382void ice_clean_rx_ring(struct ice_ring *rx_ring);
383int ice_setup_tx_ring(struct ice_ring *tx_ring);
384int ice_setup_rx_ring(struct ice_ring *rx_ring);
385void ice_free_tx_ring(struct ice_ring *tx_ring);
386void ice_free_rx_ring(struct ice_ring *rx_ring);
387int ice_napi_poll(struct napi_struct *napi, int budget);
388int
389ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
390		   u8 *raw_packet);
391int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
392void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
393#endif /* _ICE_TXRX_H_ */