Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_TXRX_H_
5#define _ICE_TXRX_H_
6
7#include "ice_type.h"
8
9#define ICE_DFLT_IRQ_WORK 256
10#define ICE_RXBUF_3072 3072
11#define ICE_RXBUF_2048 2048
12#define ICE_RXBUF_1664 1664
13#define ICE_RXBUF_1536 1536
14#define ICE_MAX_CHAINED_RX_BUFS 5
15#define ICE_MAX_BUF_TXD 8
16#define ICE_MIN_TX_LEN 17
17#define ICE_MAX_FRAME_LEGACY_RX 8320
18
19/* The size limit for a transmit buffer in a descriptor is (16K - 1).
20 * In order to align with the read requests we will align the value to
21 * the nearest 4K which represents our maximum read request size.
22 */
23#define ICE_MAX_READ_REQ_SIZE 4096
24#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
25#define ICE_MAX_DATA_PER_TXD_ALIGNED \
26 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
27
28#define ICE_MAX_TXQ_PER_TXQG 128
29
30/* Attempt to maximize the headroom available for incoming frames. We use a 2K
31 * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
32 * This leaves us with 512 bytes of room. From that we need to deduct the
33 * space needed for the shared info and the padding needed to IP align the
34 * frame.
35 *
36 * Note: For cache line sizes 256 or larger this value is going to end
37 * up negative. In these cases we should fall back to the legacy
38 * receive path.
39 */
40#if (PAGE_SIZE < 8192)
41#define ICE_2K_TOO_SMALL_WITH_PADDING \
42 ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
43 SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
44
45/**
46 * ice_compute_pad - compute the padding
47 * @rx_buf_len: buffer length
48 *
49 * Figure out the size of half page based on given buffer length and
50 * then subtract the skb_shared_info followed by subtraction of the
51 * actual buffer length; this in turn results in the actual space that
52 * is left for padding usage
53 */
54static inline int ice_compute_pad(int rx_buf_len)
55{
56 int half_page_size;
57
58 half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
59 return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
60}
61
62/**
63 * ice_skb_pad - determine the padding that we can supply
64 *
65 * Figure out the right Rx buffer size and based on that calculate the
66 * padding
67 */
68static inline int ice_skb_pad(void)
69{
70 int rx_buf_len;
71
72 /* If a 2K buffer cannot handle a standard Ethernet frame then
73 * optimize padding for a 3K buffer instead of a 1.5K buffer.
74 *
75 * For a 3K buffer we need to add enough padding to allow for
76 * tailroom due to NET_IP_ALIGN possibly shifting us out of
77 * cache-line alignment.
78 */
79 if (ICE_2K_TOO_SMALL_WITH_PADDING)
80 rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
81 else
82 rx_buf_len = ICE_RXBUF_1536;
83
84 /* if needed make room for NET_IP_ALIGN */
85 rx_buf_len -= NET_IP_ALIGN;
86
87 return ice_compute_pad(rx_buf_len);
88}
89
90#define ICE_SKB_PAD ice_skb_pad()
91#else
92#define ICE_2K_TOO_SMALL_WITH_PADDING false
93#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
94#endif
95
96/* We are assuming that the cache line is always 64 Bytes here for ice.
97 * In order to make sure that is a correct assumption there is a check in probe
98 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
99 * size is 128 bytes. We do it this way because we do not want to read the
100 * GLPCI_CNF2 register or a variable containing the value on every pass through
101 * the Tx path.
102 */
103#define ICE_CACHE_LINE_BYTES 64
104#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
105 sizeof(struct ice_tx_desc))
106#define ICE_DESCS_FOR_CTX_DESC 1
107#define ICE_DESCS_FOR_SKB_DATA_PTR 1
108/* Tx descriptors needed, worst case */
109#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
110 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
111#define ICE_DESC_UNUSED(R) \
112 (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
113 (R)->next_to_clean - (R)->next_to_use - 1)
114
115#define ICE_RX_DESC_UNUSED(R) \
116 ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
117 (R)->first_desc - (R)->next_to_use - 1)
118
119#define ICE_RING_QUARTER(R) ((R)->count >> 2)
120
121#define ICE_TX_FLAGS_TSO BIT(0)
122#define ICE_TX_FLAGS_HW_VLAN BIT(1)
123#define ICE_TX_FLAGS_SW_VLAN BIT(2)
124/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
125#define ICE_TX_FLAGS_TSYN BIT(4)
126#define ICE_TX_FLAGS_IPV4 BIT(5)
127#define ICE_TX_FLAGS_IPV6 BIT(6)
128#define ICE_TX_FLAGS_TUNNEL BIT(7)
129#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
130
131#define ICE_XDP_PASS 0
132#define ICE_XDP_CONSUMED BIT(0)
133#define ICE_XDP_TX BIT(1)
134#define ICE_XDP_REDIR BIT(2)
135#define ICE_XDP_EXIT BIT(3)
136#define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
137
138#define ICE_RX_DMA_ATTR \
139 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
140
141#define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
142
143#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
144
145/**
146 * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
147 * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
148 * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
149 * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
150 * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
151 * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
152 * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
153 * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
154 */
155enum ice_tx_buf_type {
156 ICE_TX_BUF_EMPTY = 0U,
157 ICE_TX_BUF_DUMMY,
158 ICE_TX_BUF_FRAG,
159 ICE_TX_BUF_SKB,
160 ICE_TX_BUF_XDP_TX,
161 ICE_TX_BUF_XDP_XMIT,
162 ICE_TX_BUF_XSK_TX,
163};
164
165struct ice_tx_buf {
166 union {
167 struct ice_tx_desc *next_to_watch;
168 u32 rs_idx;
169 };
170 union {
171 void *raw_buf; /* used for XDP_TX and FDir rules */
172 struct sk_buff *skb; /* used for .ndo_start_xmit() */
173 struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
174 struct xdp_buff *xdp; /* used for XDP_TX ZC */
175 };
176 unsigned int bytecount;
177 union {
178 unsigned int gso_segs;
179 unsigned int nr_frags; /* used for mbuf XDP */
180 };
181 u32 tx_flags:12;
182 u32 type:4; /* &ice_tx_buf_type */
183 u32 vid:16;
184 DEFINE_DMA_UNMAP_LEN(len);
185 DEFINE_DMA_UNMAP_ADDR(dma);
186};
187
188struct ice_tx_offload_params {
189 u64 cd_qw1;
190 struct ice_tx_ring *tx_ring;
191 u32 td_cmd;
192 u32 td_offset;
193 u32 td_l2tag1;
194 u32 cd_tunnel_params;
195 u16 cd_l2tag2;
196 u8 header_len;
197};
198
199struct ice_rx_buf {
200 dma_addr_t dma;
201 struct page *page;
202 unsigned int page_offset;
203 unsigned int pgcnt;
204 unsigned int pagecnt_bias;
205};
206
207struct ice_q_stats {
208 u64 pkts;
209 u64 bytes;
210};
211
212struct ice_txq_stats {
213 u64 restart_q;
214 u64 tx_busy;
215 u64 tx_linearize;
216 int prev_pkt; /* negative if no pending Tx descriptors */
217};
218
219struct ice_rxq_stats {
220 u64 non_eop_descs;
221 u64 alloc_page_failed;
222 u64 alloc_buf_failed;
223};
224
225struct ice_ring_stats {
226 struct rcu_head rcu; /* to avoid race on free */
227 struct ice_q_stats stats;
228 struct u64_stats_sync syncp;
229 union {
230 struct ice_txq_stats tx_stats;
231 struct ice_rxq_stats rx_stats;
232 };
233};
234
235enum ice_ring_state_t {
236 ICE_TX_XPS_INIT_DONE,
237 ICE_TX_NBITS,
238};
239
240/* this enum matches hardware bits and is meant to be used by DYN_CTLN
241 * registers and QINT registers or more generally anywhere in the manual
242 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
243 * register but instead is a special value meaning "don't update" ITR0/1/2.
244 */
245enum ice_dyn_idx_t {
246 ICE_IDX_ITR0 = 0,
247 ICE_IDX_ITR1 = 1,
248 ICE_IDX_ITR2 = 2,
249 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
250};
251
252/* Header split modes defined by DTYPE field of Rx RLAN context */
253enum ice_rx_dtype {
254 ICE_RX_DTYPE_NO_SPLIT = 0,
255 ICE_RX_DTYPE_HEADER_SPLIT = 1,
256 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
257};
258
259struct ice_pkt_ctx {
260 u64 cached_phctime;
261 __be16 vlan_proto;
262};
263
264struct ice_xdp_buff {
265 struct xdp_buff xdp_buff;
266 const union ice_32b_rx_flex_desc *eop_desc;
267 const struct ice_pkt_ctx *pkt_ctx;
268};
269
270/* Required for compatibility with xdp_buffs from xsk_pool */
271static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
272
273/* indices into GLINT_ITR registers */
274#define ICE_RX_ITR ICE_IDX_ITR0
275#define ICE_TX_ITR ICE_IDX_ITR1
276#define ICE_ITR_8K 124
277#define ICE_ITR_20K 50
278#define ICE_ITR_MAX 8160 /* 0x1FE0 */
279#define ICE_DFLT_TX_ITR ICE_ITR_20K
280#define ICE_DFLT_RX_ITR ICE_ITR_20K
281enum ice_dynamic_itr {
282 ITR_STATIC = 0,
283 ITR_DYNAMIC = 1
284};
285
286#define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC)
287#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
288#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
289#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
290#define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
291
292#define ICE_DFLT_INTRL 0
293#define ICE_MAX_INTRL 236
294
295#define ICE_IN_WB_ON_ITR_MODE 255
296/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
297 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
298 * set the write-back latency to the usecs passed in.
299 */
300#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
301 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
302 GLINT_DYN_CTL_INTERVAL_M) | \
303 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
304 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
305 GLINT_DYN_CTL_WB_ON_ITR_M)
306
307/* Legacy or Advanced Mode Queue */
308#define ICE_TX_ADVANCED 0
309#define ICE_TX_LEGACY 1
310
311/* descriptor ring, associated with a VSI */
312struct ice_rx_ring {
313 /* CL1 - 1st cacheline starts here */
314 void *desc; /* Descriptor ring memory */
315 struct device *dev; /* Used for DMA mapping */
316 struct net_device *netdev; /* netdev ring maps to */
317 struct ice_vsi *vsi; /* Backreference to associated VSI */
318 struct ice_q_vector *q_vector; /* Backreference to associated vector */
319 u8 __iomem *tail;
320 u16 q_index; /* Queue number of ring */
321
322 u16 count; /* Number of descriptors */
323 u16 reg_idx; /* HW register index of the ring */
324 u16 next_to_alloc;
325
326 union {
327 struct ice_rx_buf *rx_buf;
328 struct xdp_buff **xdp_buf;
329 };
330 /* CL2 - 2nd cacheline starts here */
331 union {
332 struct ice_xdp_buff xdp_ext;
333 struct xdp_buff xdp;
334 };
335 /* CL3 - 3rd cacheline starts here */
336 union {
337 struct ice_pkt_ctx pkt_ctx;
338 struct {
339 u64 cached_phctime;
340 __be16 vlan_proto;
341 };
342 };
343 struct bpf_prog *xdp_prog;
344 u16 rx_offset;
345
346 /* used in interrupt processing */
347 u16 next_to_use;
348 u16 next_to_clean;
349 u16 first_desc;
350
351 /* stats structs */
352 struct ice_ring_stats *ring_stats;
353
354 struct rcu_head rcu; /* to avoid race on free */
355 /* CL4 - 4th cacheline starts here */
356 struct ice_channel *ch;
357 struct ice_tx_ring *xdp_ring;
358 struct ice_rx_ring *next; /* pointer to next ring in q_vector */
359 struct xsk_buff_pool *xsk_pool;
360 u32 nr_frags;
361 u16 max_frame;
362 u16 rx_buf_len;
363 dma_addr_t dma; /* physical address of ring */
364 u8 dcb_tc; /* Traffic class of ring */
365 u8 ptp_rx;
366#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
367#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
368#define ICE_RX_FLAGS_MULTIDEV BIT(3)
369 u8 flags;
370 /* CL5 - 5th cacheline starts here */
371 struct xdp_rxq_info xdp_rxq;
372} ____cacheline_internodealigned_in_smp;
373
374struct ice_tx_ring {
375 /* CL1 - 1st cacheline starts here */
376 struct ice_tx_ring *next; /* pointer to next ring in q_vector */
377 void *desc; /* Descriptor ring memory */
378 struct device *dev; /* Used for DMA mapping */
379 u8 __iomem *tail;
380 struct ice_tx_buf *tx_buf;
381 struct ice_q_vector *q_vector; /* Backreference to associated vector */
382 struct net_device *netdev; /* netdev ring maps to */
383 struct ice_vsi *vsi; /* Backreference to associated VSI */
384 /* CL2 - 2nd cacheline starts here */
385 dma_addr_t dma; /* physical address of ring */
386 struct xsk_buff_pool *xsk_pool;
387 u16 next_to_use;
388 u16 next_to_clean;
389 u16 q_handle; /* Queue handle per TC */
390 u16 reg_idx; /* HW register index of the ring */
391 u16 count; /* Number of descriptors */
392 u16 q_index; /* Queue number of ring */
393 u16 xdp_tx_active;
394 /* stats structs */
395 struct ice_ring_stats *ring_stats;
396 /* CL3 - 3rd cacheline starts here */
397 struct rcu_head rcu; /* to avoid race on free */
398 DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
399 struct ice_channel *ch;
400 struct ice_ptp_tx *tx_tstamps;
401 spinlock_t tx_lock;
402 u32 txq_teid; /* Added Tx queue TEID */
403 /* CL4 - 4th cacheline starts here */
404#define ICE_TX_FLAGS_RING_XDP BIT(0)
405#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
406#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
407 u8 flags;
408 u8 dcb_tc; /* Traffic class of ring */
409 u16 quanta_prof_id;
410} ____cacheline_internodealigned_in_smp;
411
412static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
413{
414 return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
415}
416
417static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
418{
419 ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
420}
421
422static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
423{
424 ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
425}
426
427static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
428{
429 return !!ring->ch;
430}
431
432static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
433{
434 return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
435}
436
437enum ice_container_type {
438 ICE_RX_CONTAINER,
439 ICE_TX_CONTAINER,
440};
441
442struct ice_ring_container {
443 /* head of linked-list of rings */
444 union {
445 struct ice_rx_ring *rx_ring;
446 struct ice_tx_ring *tx_ring;
447 };
448 struct dim dim; /* data for net_dim algorithm */
449 u16 itr_idx; /* index in the interrupt vector */
450 /* this matches the maximum number of ITR bits, but in usec
451 * values, so it is shifted left one bit (bit zero is ignored)
452 */
453 union {
454 struct {
455 u16 itr_setting:13;
456 u16 itr_reserved:2;
457 u16 itr_mode:1;
458 };
459 u16 itr_settings;
460 };
461 enum ice_container_type type;
462};
463
464struct ice_coalesce_stored {
465 u16 itr_tx;
466 u16 itr_rx;
467 u8 intrl;
468 u8 tx_valid;
469 u8 rx_valid;
470};
471
472/* iterator for handling rings in ring container */
473#define ice_for_each_rx_ring(pos, head) \
474 for (pos = (head).rx_ring; pos; pos = pos->next)
475
476#define ice_for_each_tx_ring(pos, head) \
477 for (pos = (head).tx_ring; pos; pos = pos->next)
478
479static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
480{
481#if (PAGE_SIZE < 8192)
482 if (ring->rx_buf_len > (PAGE_SIZE / 2))
483 return 1;
484#endif
485 return 0;
486}
487
488#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
489
490union ice_32b_rx_flex_desc;
491
492bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
493netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
494u16
495ice_select_queue(struct net_device *dev, struct sk_buff *skb,
496 struct net_device *sb_dev);
497void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
498void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
499int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
500int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
501void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
502void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
503int ice_napi_poll(struct napi_struct *napi, int budget);
504int
505ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
506 u8 *raw_packet);
507int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
508void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
509#endif /* _ICE_TXRX_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Intel Corporation. */
3
4#ifndef _ICE_TXRX_H_
5#define _ICE_TXRX_H_
6
7#define ICE_DFLT_IRQ_WORK 256
8#define ICE_RXBUF_2048 2048
9#define ICE_MAX_CHAINED_RX_BUFS 5
10#define ICE_MAX_BUF_TXD 8
11#define ICE_MIN_TX_LEN 17
12
13/* The size limit for a transmit buffer in a descriptor is (16K - 1).
14 * In order to align with the read requests we will align the value to
15 * the nearest 4K which represents our maximum read request size.
16 */
17#define ICE_MAX_READ_REQ_SIZE 4096
18#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
19#define ICE_MAX_DATA_PER_TXD_ALIGNED \
20 (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
21
22#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
23#define ICE_MAX_TXQ_PER_TXQG 128
24
25/* We are assuming that the cache line is always 64 Bytes here for ice.
26 * In order to make sure that is a correct assumption there is a check in probe
27 * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
28 * size is 128 bytes. We do it this way because we do not want to read the
29 * GLPCI_CNF2 register or a variable containing the value on every pass through
30 * the Tx path.
31 */
32#define ICE_CACHE_LINE_BYTES 64
33#define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \
34 sizeof(struct ice_tx_desc))
35#define ICE_DESCS_FOR_CTX_DESC 1
36#define ICE_DESCS_FOR_SKB_DATA_PTR 1
37/* Tx descriptors needed, worst case */
38#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
39 ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
40#define ICE_DESC_UNUSED(R) \
41 ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
42 (R)->next_to_clean - (R)->next_to_use - 1)
43
44#define ICE_TX_FLAGS_TSO BIT(0)
45#define ICE_TX_FLAGS_HW_VLAN BIT(1)
46#define ICE_TX_FLAGS_SW_VLAN BIT(2)
47#define ICE_TX_FLAGS_VLAN_M 0xffff0000
48#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
49#define ICE_TX_FLAGS_VLAN_PR_S 29
50#define ICE_TX_FLAGS_VLAN_S 16
51
52#define ICE_RX_DMA_ATTR \
53 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
54
55struct ice_tx_buf {
56 struct ice_tx_desc *next_to_watch;
57 struct sk_buff *skb;
58 unsigned int bytecount;
59 unsigned short gso_segs;
60 u32 tx_flags;
61 DEFINE_DMA_UNMAP_LEN(len);
62 DEFINE_DMA_UNMAP_ADDR(dma);
63};
64
65struct ice_tx_offload_params {
66 u64 cd_qw1;
67 struct ice_ring *tx_ring;
68 u32 td_cmd;
69 u32 td_offset;
70 u32 td_l2tag1;
71 u32 cd_tunnel_params;
72 u16 cd_l2tag2;
73 u8 header_len;
74};
75
76struct ice_rx_buf {
77 struct sk_buff *skb;
78 dma_addr_t dma;
79 struct page *page;
80 unsigned int page_offset;
81 u16 pagecnt_bias;
82};
83
84struct ice_q_stats {
85 u64 pkts;
86 u64 bytes;
87};
88
89struct ice_txq_stats {
90 u64 restart_q;
91 u64 tx_busy;
92 u64 tx_linearize;
93 int prev_pkt; /* negative if no pending Tx descriptors */
94};
95
96struct ice_rxq_stats {
97 u64 non_eop_descs;
98 u64 alloc_page_failed;
99 u64 alloc_buf_failed;
100 u64 page_reuse_count;
101};
102
103/* this enum matches hardware bits and is meant to be used by DYN_CTLN
104 * registers and QINT registers or more generally anywhere in the manual
105 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
106 * register but instead is a special value meaning "don't update" ITR0/1/2.
107 */
108enum ice_dyn_idx_t {
109 ICE_IDX_ITR0 = 0,
110 ICE_IDX_ITR1 = 1,
111 ICE_IDX_ITR2 = 2,
112 ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
113};
114
115/* Header split modes defined by DTYPE field of Rx RLAN context */
116enum ice_rx_dtype {
117 ICE_RX_DTYPE_NO_SPLIT = 0,
118 ICE_RX_DTYPE_HEADER_SPLIT = 1,
119 ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
120};
121
122/* indices into GLINT_ITR registers */
123#define ICE_RX_ITR ICE_IDX_ITR0
124#define ICE_TX_ITR ICE_IDX_ITR1
125#define ICE_ITR_8K 124
126#define ICE_ITR_20K 50
127#define ICE_ITR_MAX 8160
128#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
129#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC)
130#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
131#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
132#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
133#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
134#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
135#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
136#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
137
138#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
139#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
140#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
141#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
142#define ICE_ITR_ADAPTIVE_BULK 0x0000
143
144#define ICE_DFLT_INTRL 0
145#define ICE_MAX_INTRL 236
146
147#define ICE_WB_ON_ITR_USECS 2
148#define ICE_IN_WB_ON_ITR_MODE 255
149/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
150 * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
151 * set the write-back latency to the usecs passed in.
152 */
153#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
154 ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
155 GLINT_DYN_CTL_INTERVAL_M) | \
156 (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
157 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
158 GLINT_DYN_CTL_WB_ON_ITR_M)
159
160/* Legacy or Advanced Mode Queue */
161#define ICE_TX_ADVANCED 0
162#define ICE_TX_LEGACY 1
163
164/* descriptor ring, associated with a VSI */
165struct ice_ring {
166 /* CL1 - 1st cacheline starts here */
167 struct ice_ring *next; /* pointer to next ring in q_vector */
168 void *desc; /* Descriptor ring memory */
169 struct device *dev; /* Used for DMA mapping */
170 struct net_device *netdev; /* netdev ring maps to */
171 struct ice_vsi *vsi; /* Backreference to associated VSI */
172 struct ice_q_vector *q_vector; /* Backreference to associated vector */
173 u8 __iomem *tail;
174 union {
175 struct ice_tx_buf *tx_buf;
176 struct ice_rx_buf *rx_buf;
177 };
178 /* CL2 - 2nd cacheline starts here */
179 u16 q_index; /* Queue number of ring */
180 u16 q_handle; /* Queue handle per TC */
181
182 u8 ring_active:1; /* is ring online or not */
183
184 u16 count; /* Number of descriptors */
185 u16 reg_idx; /* HW register index of the ring */
186
187 /* used in interrupt processing */
188 u16 next_to_use;
189 u16 next_to_clean;
190 u16 next_to_alloc;
191
192 /* stats structs */
193 struct ice_q_stats stats;
194 struct u64_stats_sync syncp;
195 union {
196 struct ice_txq_stats tx_stats;
197 struct ice_rxq_stats rx_stats;
198 };
199
200 struct rcu_head rcu; /* to avoid race on free */
201 /* CLX - the below items are only accessed infrequently and should be
202 * in their own cache line if possible
203 */
204 dma_addr_t dma; /* physical address of ring */
205 unsigned int size; /* length of descriptor ring in bytes */
206 u32 txq_teid; /* Added Tx queue TEID */
207 u16 rx_buf_len;
208#ifdef CONFIG_DCB
209 u8 dcb_tc; /* Traffic class of ring */
210#endif /* CONFIG_DCB */
211} ____cacheline_internodealigned_in_smp;
212
213struct ice_ring_container {
214 /* head of linked-list of rings */
215 struct ice_ring *ring;
216 unsigned long next_update; /* jiffies value of next queue update */
217 unsigned int total_bytes; /* total bytes processed this int */
218 unsigned int total_pkts; /* total packets processed this int */
219 u16 itr_idx; /* index in the interrupt vector */
220 u16 target_itr; /* value in usecs divided by the hw->itr_gran */
221 u16 current_itr; /* value in usecs divided by the hw->itr_gran */
222 /* high bit set means dynamic ITR, rest is used to store user
223 * readable ITR value in usecs and must be converted before programming
224 * to a register.
225 */
226 u16 itr_setting;
227};
228
229/* iterator for handling rings in ring container */
230#define ice_for_each_ring(pos, head) \
231 for (pos = (head).ring; pos; pos = pos->next)
232
233bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
234netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
235void ice_clean_tx_ring(struct ice_ring *tx_ring);
236void ice_clean_rx_ring(struct ice_ring *rx_ring);
237int ice_setup_tx_ring(struct ice_ring *tx_ring);
238int ice_setup_rx_ring(struct ice_ring *rx_ring);
239void ice_free_tx_ring(struct ice_ring *tx_ring);
240void ice_free_rx_ring(struct ice_ring *rx_ring);
241int ice_napi_poll(struct napi_struct *napi, int budget);
242
243#endif /* _ICE_TXRX_H_ */