Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#ifndef _I40E_TXRX_H_
5#define _I40E_TXRX_H_
6
7#include <net/xdp.h>
8#include "i40e_type.h"
9
10/* Interrupt Throttling and Rate Limiting Goodies */
11#define I40E_DEFAULT_IRQ_WORK 256
12
13/* The datasheet for the X710 and XL710 indicate that the maximum value for
14 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
15 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
16 * the register value which is divided by 2 lets use the actual values and
17 * avoid an excessive amount of translation.
18 */
19#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
20#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
21#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
22#define I40E_ITR_20K 50
23#define I40E_ITR_8K 122
24#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
25#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
26#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
27#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
28
29#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
30#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
31
32/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
33 * the value of the rate limit is non-zero
34 */
35#define INTRL_ENA BIT(6)
36#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
37#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
38
39/**
40 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
41 * @intrl: interrupt rate limit to convert
42 *
43 * This function converts a decimal interrupt rate limit to the appropriate
44 * register format expected by the firmware when setting interrupt rate limit.
45 */
46static inline u16 i40e_intrl_usec_to_reg(int intrl)
47{
48 if (intrl >> 2)
49 return ((intrl >> 2) | INTRL_ENA);
50 else
51 return 0;
52}
53
54#define I40E_QUEUE_END_OF_LIST 0x7FF
55
56/* this enum matches hardware bits and is meant to be used by DYN_CTLN
57 * registers and QINT registers or more generally anywhere in the manual
58 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
59 * register but instead is a special value meaning "don't update" ITR0/1/2.
60 */
61enum i40e_dyn_idx {
62 I40E_IDX_ITR0 = 0,
63 I40E_IDX_ITR1 = 1,
64 I40E_IDX_ITR2 = 2,
65 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
66};
67
68/* these are indexes into ITRN registers */
69#define I40E_RX_ITR I40E_IDX_ITR0
70#define I40E_TX_ITR I40E_IDX_ITR1
71#define I40E_SW_ITR I40E_IDX_ITR2
72
73/* Supported RSS offloads */
74#define I40E_DEFAULT_RSS_HENA ( \
75 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
76 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
77 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
78 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
79 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
80 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
81 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
84 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
85 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
86
87#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
91 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
92 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
93 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
94
95#define i40e_pf_get_default_rss_hena(pf) \
96 (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
97 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
98
99/* Supported Rx Buffer Sizes (a multiple of 128) */
100#define I40E_RXBUFFER_256 256
101#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
102#define I40E_RXBUFFER_2048 2048
103#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
104#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
105
106/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
107 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
108 * this adds up to 512 bytes of extra data meaning the smallest allocation
109 * we could have is 1K.
110 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
111 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
112 */
113#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
114#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
115#define i40e_rx_desc i40e_16byte_rx_desc
116
117#define I40E_RX_DMA_ATTR \
118 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
119
120/* Attempt to maximize the headroom available for incoming frames. We
121 * use a 2K buffer for receives and need 1536/1534 to store the data for
122 * the frame. This leaves us with 512 bytes of room. From that we need
123 * to deduct the space needed for the shared info and the padding needed
124 * to IP align the frame.
125 *
126 * Note: For cache line sizes 256 or larger this value is going to end
127 * up negative. In these cases we should fall back to the legacy
128 * receive path.
129 */
130#if (PAGE_SIZE < 8192)
131#define I40E_2K_TOO_SMALL_WITH_PADDING \
132((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
133
134static inline int i40e_compute_pad(int rx_buf_len)
135{
136 int page_size, pad_size;
137
138 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
139 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
140
141 return pad_size;
142}
143
144static inline int i40e_skb_pad(void)
145{
146 int rx_buf_len;
147
148 /* If a 2K buffer cannot handle a standard Ethernet frame then
149 * optimize padding for a 3K buffer instead of a 1.5K buffer.
150 *
151 * For a 3K buffer we need to add enough padding to allow for
152 * tailroom due to NET_IP_ALIGN possibly shifting us out of
153 * cache-line alignment.
154 */
155 if (I40E_2K_TOO_SMALL_WITH_PADDING)
156 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
157 else
158 rx_buf_len = I40E_RXBUFFER_1536;
159
160 /* if needed make room for NET_IP_ALIGN */
161 rx_buf_len -= NET_IP_ALIGN;
162
163 return i40e_compute_pad(rx_buf_len);
164}
165
166#define I40E_SKB_PAD i40e_skb_pad()
167#else
168#define I40E_2K_TOO_SMALL_WITH_PADDING false
169#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
170#endif
171
172/**
173 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
174 * @rx_desc: pointer to receive descriptor (in le64 format)
175 * @stat_err_bits: value to mask
176 *
177 * This function does some fast chicanery in order to return the
178 * value of the mask which is really only used for boolean tests.
179 * The status_error_len doesn't need to be shifted because it begins
180 * at offset zero.
181 */
182static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
183 const u64 stat_err_bits)
184{
185 return !!(rx_desc->wb.qword1.status_error_len &
186 cpu_to_le64(stat_err_bits));
187}
188
189/* How many Rx Buffers do we bundle into one write to the hardware ? */
190#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
191
192#define I40E_RX_NEXT_DESC(r, i, n) \
193 do { \
194 (i)++; \
195 if ((i) == (r)->count) \
196 i = 0; \
197 (n) = I40E_RX_DESC((r), (i)); \
198 } while (0)
199
200
201#define I40E_MAX_BUFFER_TXD 8
202#define I40E_MIN_TX_LEN 17
203
204/* The size limit for a transmit buffer in a descriptor is (16K - 1).
205 * In order to align with the read requests we will align the value to
206 * the nearest 4K which represents our maximum read request size.
207 */
208#define I40E_MAX_READ_REQ_SIZE 4096
209#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
210#define I40E_MAX_DATA_PER_TXD_ALIGNED \
211 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
212
213/**
214 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
215 * @size: transmit request size in bytes
216 *
217 * Due to hardware alignment restrictions (4K alignment), we need to
218 * assume that we can have no more than 12K of data per descriptor, even
219 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
220 * Thus, we need to divide by 12K. But division is slow! Instead,
221 * we decompose the operation into shifts and one relatively cheap
222 * multiply operation.
223 *
224 * To divide by 12K, we first divide by 4K, then divide by 3:
225 * To divide by 4K, shift right by 12 bits
226 * To divide by 3, multiply by 85, then divide by 256
227 * (Divide by 256 is done by shifting right by 8 bits)
228 * Finally, we add one to round up. Because 256 isn't an exact multiple of
229 * 3, we'll underestimate near each multiple of 12K. This is actually more
230 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
231 * segment. For our purposes this is accurate out to 1M which is orders of
232 * magnitude greater than our largest possible GSO size.
233 *
234 * This would then be implemented as:
235 * return (((size >> 12) * 85) >> 8) + 1;
236 *
237 * Since multiplication and division are commutative, we can reorder
238 * operations into:
239 * return ((size * 85) >> 20) + 1;
240 */
241static inline unsigned int i40e_txd_use_count(unsigned int size)
242{
243 return ((size * 85) >> 20) + 1;
244}
245
246/* Tx Descriptors needed, worst case */
247#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
248
249#define I40E_TX_FLAGS_HW_VLAN BIT(1)
250#define I40E_TX_FLAGS_SW_VLAN BIT(2)
251#define I40E_TX_FLAGS_TSO BIT(3)
252#define I40E_TX_FLAGS_IPV4 BIT(4)
253#define I40E_TX_FLAGS_IPV6 BIT(5)
254#define I40E_TX_FLAGS_TSYN BIT(8)
255#define I40E_TX_FLAGS_FD_SB BIT(9)
256#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
257#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
258#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
259#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
260#define I40E_TX_FLAGS_VLAN_SHIFT 16
261
262struct i40e_tx_buffer {
263 struct i40e_tx_desc *next_to_watch;
264 union {
265 struct xdp_frame *xdpf;
266 struct sk_buff *skb;
267 void *raw_buf;
268 };
269 unsigned int bytecount;
270 unsigned short gso_segs;
271
272 DEFINE_DMA_UNMAP_ADDR(dma);
273 DEFINE_DMA_UNMAP_LEN(len);
274 u32 tx_flags;
275};
276
277struct i40e_rx_buffer {
278 dma_addr_t dma;
279 struct page *page;
280 __u32 page_offset;
281 __u16 pagecnt_bias;
282 __u32 page_count;
283};
284
285struct i40e_queue_stats {
286 u64 packets;
287 u64 bytes;
288};
289
290struct i40e_tx_queue_stats {
291 u64 restart_queue;
292 u64 tx_busy;
293 u64 tx_done_old;
294 u64 tx_linearize;
295 u64 tx_force_wb;
296 u64 tx_stopped;
297 int prev_pkt_ctr;
298};
299
300struct i40e_rx_queue_stats {
301 u64 non_eop_descs;
302 u64 alloc_page_failed;
303 u64 alloc_buff_failed;
304 u64 page_reuse_count;
305 u64 page_alloc_count;
306 u64 page_waive_count;
307 u64 page_busy_count;
308};
309
310enum i40e_ring_state {
311 __I40E_TX_FDIR_INIT_DONE,
312 __I40E_TX_XPS_INIT_DONE,
313 __I40E_RING_STATE_NBITS /* must be last */
314};
315
316/* some useful defines for virtchannel interface, which
317 * is the only remaining user of header split
318 */
319#define I40E_RX_DTYPE_HEADER_SPLIT 1
320#define I40E_RX_SPLIT_L2 0x1
321#define I40E_RX_SPLIT_IP 0x2
322#define I40E_RX_SPLIT_TCP_UDP 0x4
323#define I40E_RX_SPLIT_SCTP 0x8
324
325/* struct that defines a descriptor ring, associated with a VSI */
326struct i40e_ring {
327 struct i40e_ring *next; /* pointer to next ring in q_vector */
328 void *desc; /* Descriptor ring memory */
329 struct device *dev; /* Used for DMA mapping */
330 struct net_device *netdev; /* netdev ring maps to */
331 struct bpf_prog *xdp_prog;
332 union {
333 struct i40e_tx_buffer *tx_bi;
334 struct i40e_rx_buffer *rx_bi;
335 struct xdp_buff **rx_bi_zc;
336 };
337 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
338 u16 queue_index; /* Queue number of ring */
339 u8 dcb_tc; /* Traffic class of ring */
340 u8 __iomem *tail;
341
342 /* Storing xdp_buff on ring helps in saving the state of partially built
343 * packet when i40e_clean_rx_ring_irq() must return before it sees EOP
344 * and to resume packet building for this ring in the next call to
345 * i40e_clean_rx_ring_irq().
346 */
347 struct xdp_buff xdp;
348
349 /* Next descriptor to be processed; next_to_clean is updated only on
350 * processing EOP descriptor
351 */
352 u16 next_to_process;
353 /* high bit set means dynamic, use accessor routines to read/write.
354 * hardware only supports 2us resolution for the ITR registers.
355 * these values always store the USER setting, and must be converted
356 * before programming to a register.
357 */
358 u16 itr_setting;
359
360 u16 count; /* Number of descriptors */
361 u16 reg_idx; /* HW register index of the ring */
362 u16 rx_buf_len;
363
364 /* used in interrupt processing */
365 u16 next_to_use;
366 u16 next_to_clean;
367 u16 xdp_tx_active;
368
369 u8 atr_sample_rate;
370 u8 atr_count;
371
372 bool ring_active; /* is ring online or not */
373 bool arm_wb; /* do something to arm write back */
374 u8 packet_stride;
375
376 u16 flags;
377#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
378#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
379#define I40E_TXR_FLAGS_XDP BIT(2)
380
381 /* stats structs */
382 struct i40e_queue_stats stats;
383 struct u64_stats_sync syncp;
384 union {
385 struct i40e_tx_queue_stats tx_stats;
386 struct i40e_rx_queue_stats rx_stats;
387 };
388
389 unsigned int size; /* length of descriptor ring in bytes */
390 dma_addr_t dma; /* physical address of ring */
391
392 struct i40e_vsi *vsi; /* Backreference to associated VSI */
393 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
394
395 struct rcu_head rcu; /* to avoid race on free */
396 u16 next_to_alloc;
397
398 struct i40e_channel *ch;
399 u16 rx_offset;
400 struct xdp_rxq_info xdp_rxq;
401 struct xsk_buff_pool *xsk_pool;
402} ____cacheline_internodealigned_in_smp;
403
404static inline bool ring_uses_build_skb(struct i40e_ring *ring)
405{
406 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
407}
408
409static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
410{
411 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
412}
413
414static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
415{
416 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
417}
418
419static inline bool ring_is_xdp(struct i40e_ring *ring)
420{
421 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
422}
423
424static inline void set_ring_xdp(struct i40e_ring *ring)
425{
426 ring->flags |= I40E_TXR_FLAGS_XDP;
427}
428
429#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
430#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
431#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
432#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
433#define I40E_ITR_ADAPTIVE_BULK 0x0000
434
435struct i40e_ring_container {
436 struct i40e_ring *ring; /* pointer to linked list of ring(s) */
437 unsigned long next_update; /* jiffies value of next update */
438 unsigned int total_bytes; /* total bytes processed this int */
439 unsigned int total_packets; /* total packets processed this int */
440 u16 count;
441 u16 target_itr; /* target ITR setting for ring(s) */
442 u16 current_itr; /* current ITR setting for ring(s) */
443};
444
445/* iterator for handling rings in ring container */
446#define i40e_for_each_ring(pos, head) \
447 for (pos = (head).ring; pos != NULL; pos = pos->next)
448
449static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
450{
451#if (PAGE_SIZE < 8192)
452 if (ring->rx_buf_len > (PAGE_SIZE / 2))
453 return 1;
454#endif
455 return 0;
456}
457
458#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
459
460bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
461netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
462u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
463 struct net_device *sb_dev);
464void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
465void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
466int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
467int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
468void i40e_free_tx_resources(struct i40e_ring *tx_ring);
469void i40e_free_rx_resources(struct i40e_ring *rx_ring);
470int i40e_napi_poll(struct napi_struct *napi, int budget);
471void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
472u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
473void i40e_detect_recover_hung(struct i40e_vsi *vsi);
474int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
475bool __i40e_chk_linearize(struct sk_buff *skb);
476int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
477 u32 flags);
478bool i40e_is_non_eop(struct i40e_ring *rx_ring,
479 union i40e_rx_desc *rx_desc);
480
481/**
482 * i40e_get_head - Retrieve head from head writeback
483 * @tx_ring: tx ring to fetch head of
484 *
485 * Returns value of Tx ring head based on value stored
486 * in head write-back location
487 **/
488static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
489{
490 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
491
492 return le32_to_cpu(*(volatile __le32 *)head);
493}
494
495/**
496 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
497 * @skb: send buffer
498 *
499 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
500 * there is not enough descriptors available in this ring since we need at least
501 * one descriptor.
502 **/
503static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
504{
505 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
506 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
507 int count = 0, size = skb_headlen(skb);
508
509 for (;;) {
510 count += i40e_txd_use_count(size);
511
512 if (!nr_frags--)
513 break;
514
515 size = skb_frag_size(frag++);
516 }
517
518 return count;
519}
520
521/**
522 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
523 * @tx_ring: the ring to be checked
524 * @size: the size buffer we want to assure is available
525 *
526 * Returns 0 if stop is not needed
527 **/
528static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
529{
530 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
531 return 0;
532 return __i40e_maybe_stop_tx(tx_ring, size);
533}
534
535/**
536 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
537 * @skb: send buffer
538 * @count: number of buffers used
539 *
540 * Note: Our HW can't scatter-gather more than 8 fragments to build
541 * a packet on the wire and so we need to figure out the cases where we
542 * need to linearize the skb.
543 **/
544static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
545{
546 /* Both TSO and single send will work if count is less than 8 */
547 if (likely(count < I40E_MAX_BUFFER_TXD))
548 return false;
549
550 if (skb_is_gso(skb))
551 return __i40e_chk_linearize(skb);
552
553 /* we can support up to 8 data buffers for a single send */
554 return count != I40E_MAX_BUFFER_TXD;
555}
556
557/**
558 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
559 * @ring: Tx ring to find the netdev equivalent of
560 **/
561static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
562{
563 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
564}
565#endif /* _I40E_TXRX_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*******************************************************************************
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#ifndef _I40E_TXRX_H_
29#define _I40E_TXRX_H_
30
31#include <net/xdp.h>
32
33/* Interrupt Throttling and Rate Limiting Goodies */
34#define I40E_DEFAULT_IRQ_WORK 256
35
36/* The datasheet for the X710 and XL710 indicate that the maximum value for
37 * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
38 * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
39 * the register value which is divided by 2 lets use the actual values and
40 * avoid an excessive amount of translation.
41 */
42#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
43#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
44#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
45#define I40E_ITR_100K 10 /* all values below must be even */
46#define I40E_ITR_50K 20
47#define I40E_ITR_20K 50
48#define I40E_ITR_18K 60
49#define I40E_ITR_8K 122
50#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
51#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
52#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
53#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
54
55#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
56#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
57
58/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
59 * the value of the rate limit is non-zero
60 */
61#define INTRL_ENA BIT(6)
62#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
63#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
64
65/**
66 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
67 * @intrl: interrupt rate limit to convert
68 *
69 * This function converts a decimal interrupt rate limit to the appropriate
70 * register format expected by the firmware when setting interrupt rate limit.
71 */
72static inline u16 i40e_intrl_usec_to_reg(int intrl)
73{
74 if (intrl >> 2)
75 return ((intrl >> 2) | INTRL_ENA);
76 else
77 return 0;
78}
79#define I40E_INTRL_8K 125 /* 8000 ints/sec */
80#define I40E_INTRL_62K 16 /* 62500 ints/sec */
81#define I40E_INTRL_83K 12 /* 83333 ints/sec */
82
83#define I40E_QUEUE_END_OF_LIST 0x7FF
84
85/* this enum matches hardware bits and is meant to be used by DYN_CTLN
86 * registers and QINT registers or more generally anywhere in the manual
87 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
88 * register but instead is a special value meaning "don't update" ITR0/1/2.
89 */
90enum i40e_dyn_idx_t {
91 I40E_IDX_ITR0 = 0,
92 I40E_IDX_ITR1 = 1,
93 I40E_IDX_ITR2 = 2,
94 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
95};
96
97/* these are indexes into ITRN registers */
98#define I40E_RX_ITR I40E_IDX_ITR0
99#define I40E_TX_ITR I40E_IDX_ITR1
100#define I40E_PE_ITR I40E_IDX_ITR2
101
102/* Supported RSS offloads */
103#define I40E_DEFAULT_RSS_HENA ( \
104 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
105 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
106 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
107 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
108 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
109 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
110 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
111 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
112 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
113 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
114 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
115
116#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
117 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
118 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
119 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
120 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
121 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
122 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
123
124#define i40e_pf_get_default_rss_hena(pf) \
125 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
126 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
127
128/* Supported Rx Buffer Sizes (a multiple of 128) */
129#define I40E_RXBUFFER_256 256
130#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
131#define I40E_RXBUFFER_2048 2048
132#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
133#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
134
135/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
136 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
137 * this adds up to 512 bytes of extra data meaning the smallest allocation
138 * we could have is 1K.
139 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
140 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
141 */
142#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
143#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
144#define i40e_rx_desc i40e_32byte_rx_desc
145
146#define I40E_RX_DMA_ATTR \
147 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
148
149/* Attempt to maximize the headroom available for incoming frames. We
150 * use a 2K buffer for receives and need 1536/1534 to store the data for
151 * the frame. This leaves us with 512 bytes of room. From that we need
152 * to deduct the space needed for the shared info and the padding needed
153 * to IP align the frame.
154 *
155 * Note: For cache line sizes 256 or larger this value is going to end
156 * up negative. In these cases we should fall back to the legacy
157 * receive path.
158 */
159#if (PAGE_SIZE < 8192)
160#define I40E_2K_TOO_SMALL_WITH_PADDING \
161((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
162
163static inline int i40e_compute_pad(int rx_buf_len)
164{
165 int page_size, pad_size;
166
167 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
168 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
169
170 return pad_size;
171}
172
173static inline int i40e_skb_pad(void)
174{
175 int rx_buf_len;
176
177 /* If a 2K buffer cannot handle a standard Ethernet frame then
178 * optimize padding for a 3K buffer instead of a 1.5K buffer.
179 *
180 * For a 3K buffer we need to add enough padding to allow for
181 * tailroom due to NET_IP_ALIGN possibly shifting us out of
182 * cache-line alignment.
183 */
184 if (I40E_2K_TOO_SMALL_WITH_PADDING)
185 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
186 else
187 rx_buf_len = I40E_RXBUFFER_1536;
188
189 /* if needed make room for NET_IP_ALIGN */
190 rx_buf_len -= NET_IP_ALIGN;
191
192 return i40e_compute_pad(rx_buf_len);
193}
194
195#define I40E_SKB_PAD i40e_skb_pad()
196#else
197#define I40E_2K_TOO_SMALL_WITH_PADDING false
198#define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
199#endif
200
201/**
202 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
203 * @rx_desc: pointer to receive descriptor (in le64 format)
204 * @stat_err_bits: value to mask
205 *
206 * This function does some fast chicanery in order to return the
207 * value of the mask which is really only used for boolean tests.
208 * The status_error_len doesn't need to be shifted because it begins
209 * at offset zero.
210 */
211static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
212 const u64 stat_err_bits)
213{
214 return !!(rx_desc->wb.qword1.status_error_len &
215 cpu_to_le64(stat_err_bits));
216}
217
218/* How many Rx Buffers do we bundle into one write to the hardware ? */
219#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
220#define I40E_RX_INCREMENT(r, i) \
221 do { \
222 (i)++; \
223 if ((i) == (r)->count) \
224 i = 0; \
225 r->next_to_clean = i; \
226 } while (0)
227
228#define I40E_RX_NEXT_DESC(r, i, n) \
229 do { \
230 (i)++; \
231 if ((i) == (r)->count) \
232 i = 0; \
233 (n) = I40E_RX_DESC((r), (i)); \
234 } while (0)
235
236#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
237 do { \
238 I40E_RX_NEXT_DESC((r), (i), (n)); \
239 prefetch((n)); \
240 } while (0)
241
242#define I40E_MAX_BUFFER_TXD 8
243#define I40E_MIN_TX_LEN 17
244
245/* The size limit for a transmit buffer in a descriptor is (16K - 1).
246 * In order to align with the read requests we will align the value to
247 * the nearest 4K which represents our maximum read request size.
248 */
249#define I40E_MAX_READ_REQ_SIZE 4096
250#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
251#define I40E_MAX_DATA_PER_TXD_ALIGNED \
252 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
253
254/**
255 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
256 * @size: transmit request size in bytes
257 *
258 * Due to hardware alignment restrictions (4K alignment), we need to
259 * assume that we can have no more than 12K of data per descriptor, even
260 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
261 * Thus, we need to divide by 12K. But division is slow! Instead,
262 * we decompose the operation into shifts and one relatively cheap
263 * multiply operation.
264 *
265 * To divide by 12K, we first divide by 4K, then divide by 3:
266 * To divide by 4K, shift right by 12 bits
267 * To divide by 3, multiply by 85, then divide by 256
268 * (Divide by 256 is done by shifting right by 8 bits)
269 * Finally, we add one to round up. Because 256 isn't an exact multiple of
270 * 3, we'll underestimate near each multiple of 12K. This is actually more
271 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
272 * segment. For our purposes this is accurate out to 1M which is orders of
273 * magnitude greater than our largest possible GSO size.
274 *
275 * This would then be implemented as:
276 * return (((size >> 12) * 85) >> 8) + 1;
277 *
278 * Since multiplication and division are commutative, we can reorder
279 * operations into:
280 * return ((size * 85) >> 20) + 1;
281 */
282static inline unsigned int i40e_txd_use_count(unsigned int size)
283{
284 return ((size * 85) >> 20) + 1;
285}
286
287/* Tx Descriptors needed, worst case */
288#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
289#define I40E_MIN_DESC_PENDING 4
290
291#define I40E_TX_FLAGS_HW_VLAN BIT(1)
292#define I40E_TX_FLAGS_SW_VLAN BIT(2)
293#define I40E_TX_FLAGS_TSO BIT(3)
294#define I40E_TX_FLAGS_IPV4 BIT(4)
295#define I40E_TX_FLAGS_IPV6 BIT(5)
296#define I40E_TX_FLAGS_FCCRC BIT(6)
297#define I40E_TX_FLAGS_FSO BIT(7)
298#define I40E_TX_FLAGS_TSYN BIT(8)
299#define I40E_TX_FLAGS_FD_SB BIT(9)
300#define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
301#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
302#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
303#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
304#define I40E_TX_FLAGS_VLAN_SHIFT 16
305
306struct i40e_tx_buffer {
307 struct i40e_tx_desc *next_to_watch;
308 union {
309 struct sk_buff *skb;
310 void *raw_buf;
311 };
312 unsigned int bytecount;
313 unsigned short gso_segs;
314
315 DEFINE_DMA_UNMAP_ADDR(dma);
316 DEFINE_DMA_UNMAP_LEN(len);
317 u32 tx_flags;
318};
319
320struct i40e_rx_buffer {
321 dma_addr_t dma;
322 struct page *page;
323#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
324 __u32 page_offset;
325#else
326 __u16 page_offset;
327#endif
328 __u16 pagecnt_bias;
329};
330
331struct i40e_queue_stats {
332 u64 packets;
333 u64 bytes;
334};
335
336struct i40e_tx_queue_stats {
337 u64 restart_queue;
338 u64 tx_busy;
339 u64 tx_done_old;
340 u64 tx_linearize;
341 u64 tx_force_wb;
342 int prev_pkt_ctr;
343};
344
345struct i40e_rx_queue_stats {
346 u64 non_eop_descs;
347 u64 alloc_page_failed;
348 u64 alloc_buff_failed;
349 u64 page_reuse_count;
350 u64 realloc_count;
351};
352
353enum i40e_ring_state_t {
354 __I40E_TX_FDIR_INIT_DONE,
355 __I40E_TX_XPS_INIT_DONE,
356 __I40E_RING_STATE_NBITS /* must be last */
357};
358
359/* some useful defines for virtchannel interface, which
360 * is the only remaining user of header split
361 */
362#define I40E_RX_DTYPE_NO_SPLIT 0
363#define I40E_RX_DTYPE_HEADER_SPLIT 1
364#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
365#define I40E_RX_SPLIT_L2 0x1
366#define I40E_RX_SPLIT_IP 0x2
367#define I40E_RX_SPLIT_TCP_UDP 0x4
368#define I40E_RX_SPLIT_SCTP 0x8
369
370/* struct that defines a descriptor ring, associated with a VSI */
371struct i40e_ring {
372 struct i40e_ring *next; /* pointer to next ring in q_vector */
373 void *desc; /* Descriptor ring memory */
374 struct device *dev; /* Used for DMA mapping */
375 struct net_device *netdev; /* netdev ring maps to */
376 struct bpf_prog *xdp_prog;
377 union {
378 struct i40e_tx_buffer *tx_bi;
379 struct i40e_rx_buffer *rx_bi;
380 };
381 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
382 u16 queue_index; /* Queue number of ring */
383 u8 dcb_tc; /* Traffic class of ring */
384 u8 __iomem *tail;
385
386 /* high bit set means dynamic, use accessor routines to read/write.
387 * hardware only supports 2us resolution for the ITR registers.
388 * these values always store the USER setting, and must be converted
389 * before programming to a register.
390 */
391 u16 itr_setting;
392
393 u16 count; /* Number of descriptors */
394 u16 reg_idx; /* HW register index of the ring */
395 u16 rx_buf_len;
396
397 /* used in interrupt processing */
398 u16 next_to_use;
399 u16 next_to_clean;
400
401 u8 atr_sample_rate;
402 u8 atr_count;
403
404 bool ring_active; /* is ring online or not */
405 bool arm_wb; /* do something to arm write back */
406 u8 packet_stride;
407
408 u16 flags;
409#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
410#define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
411#define I40E_TXR_FLAGS_XDP BIT(2)
412
413 /* stats structs */
414 struct i40e_queue_stats stats;
415 struct u64_stats_sync syncp;
416 union {
417 struct i40e_tx_queue_stats tx_stats;
418 struct i40e_rx_queue_stats rx_stats;
419 };
420
421 unsigned int size; /* length of descriptor ring in bytes */
422 dma_addr_t dma; /* physical address of ring */
423
424 struct i40e_vsi *vsi; /* Backreference to associated VSI */
425 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
426
427 struct rcu_head rcu; /* to avoid race on free */
428 u16 next_to_alloc;
429 struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
430 * return before it sees the EOP for
431 * the current packet, we save that skb
432 * here and resume receiving this
433 * packet the next time
434 * i40e_clean_rx_ring_irq() is called
435 * for this ring.
436 */
437
438 struct i40e_channel *ch;
439 struct xdp_rxq_info xdp_rxq;
440} ____cacheline_internodealigned_in_smp;
441
442static inline bool ring_uses_build_skb(struct i40e_ring *ring)
443{
444 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
445}
446
447static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
448{
449 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
450}
451
452static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
453{
454 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
455}
456
457static inline bool ring_is_xdp(struct i40e_ring *ring)
458{
459 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
460}
461
462static inline void set_ring_xdp(struct i40e_ring *ring)
463{
464 ring->flags |= I40E_TXR_FLAGS_XDP;
465}
466
467#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
468#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
469#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
470#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
471#define I40E_ITR_ADAPTIVE_BULK 0x0000
472#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
473
474struct i40e_ring_container {
475 struct i40e_ring *ring; /* pointer to linked list of ring(s) */
476 unsigned long next_update; /* jiffies value of next update */
477 unsigned int total_bytes; /* total bytes processed this int */
478 unsigned int total_packets; /* total packets processed this int */
479 u16 count;
480 u16 target_itr; /* target ITR setting for ring(s) */
481 u16 current_itr; /* current ITR setting for ring(s) */
482};
483
484/* iterator for handling rings in ring container */
485#define i40e_for_each_ring(pos, head) \
486 for (pos = (head).ring; pos != NULL; pos = pos->next)
487
488static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
489{
490#if (PAGE_SIZE < 8192)
491 if (ring->rx_buf_len > (PAGE_SIZE / 2))
492 return 1;
493#endif
494 return 0;
495}
496
497#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
498
499bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
500netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
501void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
502void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
503int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
504int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
505void i40e_free_tx_resources(struct i40e_ring *tx_ring);
506void i40e_free_rx_resources(struct i40e_ring *rx_ring);
507int i40e_napi_poll(struct napi_struct *napi, int budget);
508void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
509u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
510void i40e_detect_recover_hung(struct i40e_vsi *vsi);
511int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
512bool __i40e_chk_linearize(struct sk_buff *skb);
513int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp);
514void i40e_xdp_flush(struct net_device *dev);
515
516/**
517 * i40e_get_head - Retrieve head from head writeback
518 * @tx_ring: tx ring to fetch head of
519 *
520 * Returns value of Tx ring head based on value stored
521 * in head write-back location
522 **/
523static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
524{
525 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
526
527 return le32_to_cpu(*(volatile __le32 *)head);
528}
529
530/**
531 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
532 * @skb: send buffer
533 * @tx_ring: ring to send buffer on
534 *
535 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
536 * there is not enough descriptors available in this ring since we need at least
537 * one descriptor.
538 **/
539static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
540{
541 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
542 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
543 int count = 0, size = skb_headlen(skb);
544
545 for (;;) {
546 count += i40e_txd_use_count(size);
547
548 if (!nr_frags--)
549 break;
550
551 size = skb_frag_size(frag++);
552 }
553
554 return count;
555}
556
557/**
558 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
559 * @tx_ring: the ring to be checked
560 * @size: the size buffer we want to assure is available
561 *
562 * Returns 0 if stop is not needed
563 **/
564static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
565{
566 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
567 return 0;
568 return __i40e_maybe_stop_tx(tx_ring, size);
569}
570
571/**
572 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
573 * @skb: send buffer
574 * @count: number of buffers used
575 *
576 * Note: Our HW can't scatter-gather more than 8 fragments to build
577 * a packet on the wire and so we need to figure out the cases where we
578 * need to linearize the skb.
579 **/
580static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
581{
582 /* Both TSO and single send will work if count is less than 8 */
583 if (likely(count < I40E_MAX_BUFFER_TXD))
584 return false;
585
586 if (skb_is_gso(skb))
587 return __i40e_chk_linearize(skb);
588
589 /* we can support up to 8 data buffers for a single send */
590 return count != I40E_MAX_BUFFER_TXD;
591}
592
593/**
594 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
595 * @ring: Tx ring to find the netdev equivalent of
596 **/
597static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
598{
599 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
600}
601#endif /* _I40E_TXRX_H_ */