Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* The driver transmit and receive code */
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include "ice.h"
9#include "ice_dcb_lib.h"
10
11#define ICE_RX_HDR_SIZE 256
12
13/**
14 * ice_unmap_and_free_tx_buf - Release a Tx buffer
15 * @ring: the ring that owns the buffer
16 * @tx_buf: the buffer to free
17 */
18static void
19ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
20{
21 if (tx_buf->skb) {
22 dev_kfree_skb_any(tx_buf->skb);
23 if (dma_unmap_len(tx_buf, len))
24 dma_unmap_single(ring->dev,
25 dma_unmap_addr(tx_buf, dma),
26 dma_unmap_len(tx_buf, len),
27 DMA_TO_DEVICE);
28 } else if (dma_unmap_len(tx_buf, len)) {
29 dma_unmap_page(ring->dev,
30 dma_unmap_addr(tx_buf, dma),
31 dma_unmap_len(tx_buf, len),
32 DMA_TO_DEVICE);
33 }
34
35 tx_buf->next_to_watch = NULL;
36 tx_buf->skb = NULL;
37 dma_unmap_len_set(tx_buf, len, 0);
38 /* tx_buf must be completely set up in the transmit path */
39}
40
41static struct netdev_queue *txring_txq(const struct ice_ring *ring)
42{
43 return netdev_get_tx_queue(ring->netdev, ring->q_index);
44}
45
46/**
47 * ice_clean_tx_ring - Free any empty Tx buffers
48 * @tx_ring: ring to be cleaned
49 */
50void ice_clean_tx_ring(struct ice_ring *tx_ring)
51{
52 u16 i;
53
54 /* ring already cleared, nothing to do */
55 if (!tx_ring->tx_buf)
56 return;
57
58 /* Free all the Tx ring sk_buffs */
59 for (i = 0; i < tx_ring->count; i++)
60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
61
62 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
63
64 /* Zero out the descriptor ring */
65 memset(tx_ring->desc, 0, tx_ring->size);
66
67 tx_ring->next_to_use = 0;
68 tx_ring->next_to_clean = 0;
69
70 if (!tx_ring->netdev)
71 return;
72
73 /* cleanup Tx queue statistics */
74 netdev_tx_reset_queue(txring_txq(tx_ring));
75}
76
77/**
78 * ice_free_tx_ring - Free Tx resources per queue
79 * @tx_ring: Tx descriptor ring for a specific queue
80 *
81 * Free all transmit software resources
82 */
83void ice_free_tx_ring(struct ice_ring *tx_ring)
84{
85 ice_clean_tx_ring(tx_ring);
86 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
87 tx_ring->tx_buf = NULL;
88
89 if (tx_ring->desc) {
90 dmam_free_coherent(tx_ring->dev, tx_ring->size,
91 tx_ring->desc, tx_ring->dma);
92 tx_ring->desc = NULL;
93 }
94}
95
96/**
97 * ice_clean_tx_irq - Reclaim resources after transmit completes
98 * @tx_ring: Tx ring to clean
99 * @napi_budget: Used to determine if we are in netpoll
100 *
101 * Returns true if there's any budget left (e.g. the clean is finished)
102 */
103static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
104{
105 unsigned int total_bytes = 0, total_pkts = 0;
106 unsigned int budget = ICE_DFLT_IRQ_WORK;
107 struct ice_vsi *vsi = tx_ring->vsi;
108 s16 i = tx_ring->next_to_clean;
109 struct ice_tx_desc *tx_desc;
110 struct ice_tx_buf *tx_buf;
111
112 tx_buf = &tx_ring->tx_buf[i];
113 tx_desc = ICE_TX_DESC(tx_ring, i);
114 i -= tx_ring->count;
115
116 prefetch(&vsi->state);
117
118 do {
119 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
120
121 /* if next_to_watch is not set then there is no work pending */
122 if (!eop_desc)
123 break;
124
125 smp_rmb(); /* prevent any other reads prior to eop_desc */
126
127 /* if the descriptor isn't done, no work yet to do */
128 if (!(eop_desc->cmd_type_offset_bsz &
129 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
130 break;
131
132 /* clear next_to_watch to prevent false hangs */
133 tx_buf->next_to_watch = NULL;
134
135 /* update the statistics for this packet */
136 total_bytes += tx_buf->bytecount;
137 total_pkts += tx_buf->gso_segs;
138
139 /* free the skb */
140 napi_consume_skb(tx_buf->skb, napi_budget);
141
142 /* unmap skb header data */
143 dma_unmap_single(tx_ring->dev,
144 dma_unmap_addr(tx_buf, dma),
145 dma_unmap_len(tx_buf, len),
146 DMA_TO_DEVICE);
147
148 /* clear tx_buf data */
149 tx_buf->skb = NULL;
150 dma_unmap_len_set(tx_buf, len, 0);
151
152 /* unmap remaining buffers */
153 while (tx_desc != eop_desc) {
154 tx_buf++;
155 tx_desc++;
156 i++;
157 if (unlikely(!i)) {
158 i -= tx_ring->count;
159 tx_buf = tx_ring->tx_buf;
160 tx_desc = ICE_TX_DESC(tx_ring, 0);
161 }
162
163 /* unmap any remaining paged data */
164 if (dma_unmap_len(tx_buf, len)) {
165 dma_unmap_page(tx_ring->dev,
166 dma_unmap_addr(tx_buf, dma),
167 dma_unmap_len(tx_buf, len),
168 DMA_TO_DEVICE);
169 dma_unmap_len_set(tx_buf, len, 0);
170 }
171 }
172
173 /* move us one more past the eop_desc for start of next pkt */
174 tx_buf++;
175 tx_desc++;
176 i++;
177 if (unlikely(!i)) {
178 i -= tx_ring->count;
179 tx_buf = tx_ring->tx_buf;
180 tx_desc = ICE_TX_DESC(tx_ring, 0);
181 }
182
183 prefetch(tx_desc);
184
185 /* update budget accounting */
186 budget--;
187 } while (likely(budget));
188
189 i += tx_ring->count;
190 tx_ring->next_to_clean = i;
191 u64_stats_update_begin(&tx_ring->syncp);
192 tx_ring->stats.bytes += total_bytes;
193 tx_ring->stats.pkts += total_pkts;
194 u64_stats_update_end(&tx_ring->syncp);
195 tx_ring->q_vector->tx.total_bytes += total_bytes;
196 tx_ring->q_vector->tx.total_pkts += total_pkts;
197
198 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
199 total_bytes);
200
201#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
202 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
203 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
204 /* Make sure that anybody stopping the queue after this
205 * sees the new next_to_clean.
206 */
207 smp_mb();
208 if (__netif_subqueue_stopped(tx_ring->netdev,
209 tx_ring->q_index) &&
210 !test_bit(__ICE_DOWN, vsi->state)) {
211 netif_wake_subqueue(tx_ring->netdev,
212 tx_ring->q_index);
213 ++tx_ring->tx_stats.restart_q;
214 }
215 }
216
217 return !!budget;
218}
219
220/**
221 * ice_setup_tx_ring - Allocate the Tx descriptors
222 * @tx_ring: the Tx ring to set up
223 *
224 * Return 0 on success, negative on error
225 */
226int ice_setup_tx_ring(struct ice_ring *tx_ring)
227{
228 struct device *dev = tx_ring->dev;
229
230 if (!dev)
231 return -ENOMEM;
232
233 /* warn if we are about to overwrite the pointer */
234 WARN_ON(tx_ring->tx_buf);
235 tx_ring->tx_buf =
236 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
237 GFP_KERNEL);
238 if (!tx_ring->tx_buf)
239 return -ENOMEM;
240
241 /* round up to nearest page */
242 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
243 PAGE_SIZE);
244 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
245 GFP_KERNEL);
246 if (!tx_ring->desc) {
247 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
248 tx_ring->size);
249 goto err;
250 }
251
252 tx_ring->next_to_use = 0;
253 tx_ring->next_to_clean = 0;
254 tx_ring->tx_stats.prev_pkt = -1;
255 return 0;
256
257err:
258 devm_kfree(dev, tx_ring->tx_buf);
259 tx_ring->tx_buf = NULL;
260 return -ENOMEM;
261}
262
263/**
264 * ice_clean_rx_ring - Free Rx buffers
265 * @rx_ring: ring to be cleaned
266 */
267void ice_clean_rx_ring(struct ice_ring *rx_ring)
268{
269 struct device *dev = rx_ring->dev;
270 u16 i;
271
272 /* ring already cleared, nothing to do */
273 if (!rx_ring->rx_buf)
274 return;
275
276 /* Free all the Rx ring sk_buffs */
277 for (i = 0; i < rx_ring->count; i++) {
278 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
279
280 if (rx_buf->skb) {
281 dev_kfree_skb(rx_buf->skb);
282 rx_buf->skb = NULL;
283 }
284 if (!rx_buf->page)
285 continue;
286
287 /* Invalidate cache lines that may have been written to by
288 * device so that we avoid corrupting memory.
289 */
290 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
291 rx_buf->page_offset,
292 ICE_RXBUF_2048, DMA_FROM_DEVICE);
293
294 /* free resources associated with mapping */
295 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
296 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
297 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
298
299 rx_buf->page = NULL;
300 rx_buf->page_offset = 0;
301 }
302
303 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
304
305 /* Zero out the descriptor ring */
306 memset(rx_ring->desc, 0, rx_ring->size);
307
308 rx_ring->next_to_alloc = 0;
309 rx_ring->next_to_clean = 0;
310 rx_ring->next_to_use = 0;
311}
312
313/**
314 * ice_free_rx_ring - Free Rx resources
315 * @rx_ring: ring to clean the resources from
316 *
317 * Free all receive software resources
318 */
319void ice_free_rx_ring(struct ice_ring *rx_ring)
320{
321 ice_clean_rx_ring(rx_ring);
322 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
323 rx_ring->rx_buf = NULL;
324
325 if (rx_ring->desc) {
326 dmam_free_coherent(rx_ring->dev, rx_ring->size,
327 rx_ring->desc, rx_ring->dma);
328 rx_ring->desc = NULL;
329 }
330}
331
332/**
333 * ice_setup_rx_ring - Allocate the Rx descriptors
334 * @rx_ring: the Rx ring to set up
335 *
336 * Return 0 on success, negative on error
337 */
338int ice_setup_rx_ring(struct ice_ring *rx_ring)
339{
340 struct device *dev = rx_ring->dev;
341
342 if (!dev)
343 return -ENOMEM;
344
345 /* warn if we are about to overwrite the pointer */
346 WARN_ON(rx_ring->rx_buf);
347 rx_ring->rx_buf =
348 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
349 GFP_KERNEL);
350 if (!rx_ring->rx_buf)
351 return -ENOMEM;
352
353 /* round up to nearest page */
354 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
355 PAGE_SIZE);
356 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
357 GFP_KERNEL);
358 if (!rx_ring->desc) {
359 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
360 rx_ring->size);
361 goto err;
362 }
363
364 rx_ring->next_to_use = 0;
365 rx_ring->next_to_clean = 0;
366 return 0;
367
368err:
369 devm_kfree(dev, rx_ring->rx_buf);
370 rx_ring->rx_buf = NULL;
371 return -ENOMEM;
372}
373
374/**
375 * ice_release_rx_desc - Store the new tail and head values
376 * @rx_ring: ring to bump
377 * @val: new head index
378 */
379static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
380{
381 u16 prev_ntu = rx_ring->next_to_use;
382
383 rx_ring->next_to_use = val;
384
385 /* update next to alloc since we have filled the ring */
386 rx_ring->next_to_alloc = val;
387
388 /* QRX_TAIL will be updated with any tail value, but hardware ignores
389 * the lower 3 bits. This makes it so we only bump tail on meaningful
390 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
391 * the budget depending on the current traffic load.
392 */
393 val &= ~0x7;
394 if (prev_ntu != val) {
395 /* Force memory writes to complete before letting h/w
396 * know there are new descriptors to fetch. (Only
397 * applicable for weak-ordered memory model archs,
398 * such as IA-64).
399 */
400 wmb();
401 writel(val, rx_ring->tail);
402 }
403}
404
405/**
406 * ice_alloc_mapped_page - recycle or make a new page
407 * @rx_ring: ring to use
408 * @bi: rx_buf struct to modify
409 *
410 * Returns true if the page was successfully allocated or
411 * reused.
412 */
413static bool
414ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
415{
416 struct page *page = bi->page;
417 dma_addr_t dma;
418
419 /* since we are recycling buffers we should seldom need to alloc */
420 if (likely(page)) {
421 rx_ring->rx_stats.page_reuse_count++;
422 return true;
423 }
424
425 /* alloc new page for storage */
426 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
427 if (unlikely(!page)) {
428 rx_ring->rx_stats.alloc_page_failed++;
429 return false;
430 }
431
432 /* map page for use */
433 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
434 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
435
436 /* if mapping failed free memory back to system since
437 * there isn't much point in holding memory we can't use
438 */
439 if (dma_mapping_error(rx_ring->dev, dma)) {
440 __free_pages(page, 0);
441 rx_ring->rx_stats.alloc_page_failed++;
442 return false;
443 }
444
445 bi->dma = dma;
446 bi->page = page;
447 bi->page_offset = 0;
448 page_ref_add(page, USHRT_MAX - 1);
449 bi->pagecnt_bias = USHRT_MAX;
450
451 return true;
452}
453
454/**
455 * ice_alloc_rx_bufs - Replace used receive buffers
456 * @rx_ring: ring to place buffers on
457 * @cleaned_count: number of buffers to replace
458 *
459 * Returns false if all allocations were successful, true if any fail. Returning
460 * true signals to the caller that we didn't replace cleaned_count buffers and
461 * there is more work to do.
462 *
463 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
464 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
465 * multiple tail writes per call.
466 */
467bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
468{
469 union ice_32b_rx_flex_desc *rx_desc;
470 u16 ntu = rx_ring->next_to_use;
471 struct ice_rx_buf *bi;
472
473 /* do nothing if no valid netdev defined */
474 if (!rx_ring->netdev || !cleaned_count)
475 return false;
476
477 /* get the Rx descriptor and buffer based on next_to_use */
478 rx_desc = ICE_RX_DESC(rx_ring, ntu);
479 bi = &rx_ring->rx_buf[ntu];
480
481 do {
482 /* if we fail here, we have work remaining */
483 if (!ice_alloc_mapped_page(rx_ring, bi))
484 break;
485
486 /* sync the buffer for use by the device */
487 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
488 bi->page_offset,
489 ICE_RXBUF_2048,
490 DMA_FROM_DEVICE);
491
492 /* Refresh the desc even if buffer_addrs didn't change
493 * because each write-back erases this info.
494 */
495 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
496
497 rx_desc++;
498 bi++;
499 ntu++;
500 if (unlikely(ntu == rx_ring->count)) {
501 rx_desc = ICE_RX_DESC(rx_ring, 0);
502 bi = rx_ring->rx_buf;
503 ntu = 0;
504 }
505
506 /* clear the status bits for the next_to_use descriptor */
507 rx_desc->wb.status_error0 = 0;
508
509 cleaned_count--;
510 } while (cleaned_count);
511
512 if (rx_ring->next_to_use != ntu)
513 ice_release_rx_desc(rx_ring, ntu);
514
515 return !!cleaned_count;
516}
517
518/**
519 * ice_page_is_reserved - check if reuse is possible
520 * @page: page struct to check
521 */
522static bool ice_page_is_reserved(struct page *page)
523{
524 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
525}
526
527/**
528 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
529 * @rx_buf: Rx buffer to adjust
530 * @size: Size of adjustment
531 *
532 * Update the offset within page so that Rx buf will be ready to be reused.
533 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
534 * so the second half of page assigned to Rx buffer will be used, otherwise
535 * the offset is moved by the @size bytes
536 */
537static void
538ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
539{
540#if (PAGE_SIZE < 8192)
541 /* flip page offset to other buffer */
542 rx_buf->page_offset ^= size;
543#else
544 /* move offset up to the next cache line */
545 rx_buf->page_offset += size;
546#endif
547}
548
549/**
550 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
551 * @rx_buf: buffer containing the page
552 *
553 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
554 * which will assign the current buffer to the buffer that next_to_alloc is
555 * pointing to; otherwise, the DMA mapping needs to be destroyed and
556 * page freed
557 */
558static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
559{
560#if (PAGE_SIZE >= 8192)
561 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
562#endif
563 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
564 struct page *page = rx_buf->page;
565
566 /* avoid re-using remote pages */
567 if (unlikely(ice_page_is_reserved(page)))
568 return false;
569
570#if (PAGE_SIZE < 8192)
571 /* if we are only owner of page we can reuse it */
572 if (unlikely((page_count(page) - pagecnt_bias) > 1))
573 return false;
574#else
575 if (rx_buf->page_offset > last_offset)
576 return false;
577#endif /* PAGE_SIZE < 8192) */
578
579 /* If we have drained the page fragment pool we need to update
580 * the pagecnt_bias and page count so that we fully restock the
581 * number of references the driver holds.
582 */
583 if (unlikely(pagecnt_bias == 1)) {
584 page_ref_add(page, USHRT_MAX - 1);
585 rx_buf->pagecnt_bias = USHRT_MAX;
586 }
587
588 return true;
589}
590
591/**
592 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
593 * @rx_buf: buffer containing page to add
594 * @skb: sk_buff to place the data into
595 * @size: packet length from rx_desc
596 *
597 * This function will add the data contained in rx_buf->page to the skb.
598 * It will just attach the page as a frag to the skb.
599 * The function will then update the page offset.
600 */
601static void
602ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
603 unsigned int size)
604{
605#if (PAGE_SIZE >= 8192)
606 unsigned int truesize = SKB_DATA_ALIGN(size);
607#else
608 unsigned int truesize = ICE_RXBUF_2048;
609#endif
610
611 if (!size)
612 return;
613 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
614 rx_buf->page_offset, size, truesize);
615
616 /* page is being used so we must update the page offset */
617 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
618}
619
620/**
621 * ice_reuse_rx_page - page flip buffer and store it back on the ring
622 * @rx_ring: Rx descriptor ring to store buffers on
623 * @old_buf: donor buffer to have page reused
624 *
625 * Synchronizes page for reuse by the adapter
626 */
627static void
628ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
629{
630 u16 nta = rx_ring->next_to_alloc;
631 struct ice_rx_buf *new_buf;
632
633 new_buf = &rx_ring->rx_buf[nta];
634
635 /* update, and store next to alloc */
636 nta++;
637 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
638
639 /* Transfer page from old buffer to new buffer.
640 * Move each member individually to avoid possible store
641 * forwarding stalls and unnecessary copy of skb.
642 */
643 new_buf->dma = old_buf->dma;
644 new_buf->page = old_buf->page;
645 new_buf->page_offset = old_buf->page_offset;
646 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
647}
648
649/**
650 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
651 * @rx_ring: Rx descriptor ring to transact packets on
652 * @skb: skb to be used
653 * @size: size of buffer to add to skb
654 *
655 * This function will pull an Rx buffer from the ring and synchronize it
656 * for use by the CPU.
657 */
658static struct ice_rx_buf *
659ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
660 const unsigned int size)
661{
662 struct ice_rx_buf *rx_buf;
663
664 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
665 prefetchw(rx_buf->page);
666 *skb = rx_buf->skb;
667
668 if (!size)
669 return rx_buf;
670 /* we are reusing so sync this buffer for CPU use */
671 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
672 rx_buf->page_offset, size,
673 DMA_FROM_DEVICE);
674
675 /* We have pulled a buffer for use, so decrement pagecnt_bias */
676 rx_buf->pagecnt_bias--;
677
678 return rx_buf;
679}
680
681/**
682 * ice_construct_skb - Allocate skb and populate it
683 * @rx_ring: Rx descriptor ring to transact packets on
684 * @rx_buf: Rx buffer to pull data from
685 * @size: the length of the packet
686 *
687 * This function allocates an skb. It then populates it with the page
688 * data from the current receive descriptor, taking care to set up the
689 * skb correctly.
690 */
691static struct sk_buff *
692ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
693 unsigned int size)
694{
695 void *va = page_address(rx_buf->page) + rx_buf->page_offset;
696 unsigned int headlen;
697 struct sk_buff *skb;
698
699 /* prefetch first cache line of first page */
700 prefetch(va);
701#if L1_CACHE_BYTES < 128
702 prefetch((u8 *)va + L1_CACHE_BYTES);
703#endif /* L1_CACHE_BYTES */
704
705 /* allocate a skb to store the frags */
706 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
707 GFP_ATOMIC | __GFP_NOWARN);
708 if (unlikely(!skb))
709 return NULL;
710
711 skb_record_rx_queue(skb, rx_ring->q_index);
712 /* Determine available headroom for copy */
713 headlen = size;
714 if (headlen > ICE_RX_HDR_SIZE)
715 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
716
717 /* align pull length to size of long to optimize memcpy performance */
718 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
719
720 /* if we exhaust the linear part then add what is left as a frag */
721 size -= headlen;
722 if (size) {
723#if (PAGE_SIZE >= 8192)
724 unsigned int truesize = SKB_DATA_ALIGN(size);
725#else
726 unsigned int truesize = ICE_RXBUF_2048;
727#endif
728 skb_add_rx_frag(skb, 0, rx_buf->page,
729 rx_buf->page_offset + headlen, size, truesize);
730 /* buffer is used by skb, update page_offset */
731 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
732 } else {
733 /* buffer is unused, reset bias back to rx_buf; data was copied
734 * onto skb's linear part so there's no need for adjusting
735 * page offset and we can reuse this buffer as-is
736 */
737 rx_buf->pagecnt_bias++;
738 }
739
740 return skb;
741}
742
743/**
744 * ice_put_rx_buf - Clean up used buffer and either recycle or free
745 * @rx_ring: Rx descriptor ring to transact packets on
746 * @rx_buf: Rx buffer to pull data from
747 *
748 * This function will clean up the contents of the rx_buf. It will
749 * either recycle the buffer or unmap it and free the associated resources.
750 */
751static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
752{
753 if (!rx_buf)
754 return;
755
756 if (ice_can_reuse_rx_page(rx_buf)) {
757 /* hand second half of page back to the ring */
758 ice_reuse_rx_page(rx_ring, rx_buf);
759 rx_ring->rx_stats.page_reuse_count++;
760 } else {
761 /* we are not reusing the buffer so unmap it */
762 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
763 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
764 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
765 }
766
767 /* clear contents of buffer_info */
768 rx_buf->page = NULL;
769 rx_buf->skb = NULL;
770}
771
772/**
773 * ice_cleanup_headers - Correct empty headers
774 * @skb: pointer to current skb being fixed
775 *
776 * Also address the case where we are pulling data in on pages only
777 * and as such no data is present in the skb header.
778 *
779 * In addition if skb is not at least 60 bytes we need to pad it so that
780 * it is large enough to qualify as a valid Ethernet frame.
781 *
782 * Returns true if an error was encountered and skb was freed.
783 */
784static bool ice_cleanup_headers(struct sk_buff *skb)
785{
786 /* if eth_skb_pad returns an error the skb was freed */
787 if (eth_skb_pad(skb))
788 return true;
789
790 return false;
791}
792
793/**
794 * ice_test_staterr - tests bits in Rx descriptor status and error fields
795 * @rx_desc: pointer to receive descriptor (in le64 format)
796 * @stat_err_bits: value to mask
797 *
798 * This function does some fast chicanery in order to return the
799 * value of the mask which is really only used for boolean tests.
800 * The status_error_len doesn't need to be shifted because it begins
801 * at offset zero.
802 */
803static bool
804ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
805{
806 return !!(rx_desc->wb.status_error0 &
807 cpu_to_le16(stat_err_bits));
808}
809
810/**
811 * ice_is_non_eop - process handling of non-EOP buffers
812 * @rx_ring: Rx ring being processed
813 * @rx_desc: Rx descriptor for current buffer
814 * @skb: Current socket buffer containing buffer in progress
815 *
816 * This function updates next to clean. If the buffer is an EOP buffer
817 * this function exits returning false, otherwise it will place the
818 * sk_buff in the next buffer to be chained and return true indicating
819 * that this is in fact a non-EOP buffer.
820 */
821static bool
822ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
823 struct sk_buff *skb)
824{
825 u32 ntc = rx_ring->next_to_clean + 1;
826
827 /* fetch, update, and store next to clean */
828 ntc = (ntc < rx_ring->count) ? ntc : 0;
829 rx_ring->next_to_clean = ntc;
830
831 prefetch(ICE_RX_DESC(rx_ring, ntc));
832
833 /* if we are the last buffer then there is nothing else to do */
834#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
835 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
836 return false;
837
838 /* place skb in next buffer to be received */
839 rx_ring->rx_buf[ntc].skb = skb;
840 rx_ring->rx_stats.non_eop_descs++;
841
842 return true;
843}
844
845/**
846 * ice_ptype_to_htype - get a hash type
847 * @ptype: the ptype value from the descriptor
848 *
849 * Returns a hash type to be used by skb_set_hash
850 */
851static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
852{
853 return PKT_HASH_TYPE_NONE;
854}
855
856/**
857 * ice_rx_hash - set the hash value in the skb
858 * @rx_ring: descriptor ring
859 * @rx_desc: specific descriptor
860 * @skb: pointer to current skb
861 * @rx_ptype: the ptype value from the descriptor
862 */
863static void
864ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
865 struct sk_buff *skb, u8 rx_ptype)
866{
867 struct ice_32b_rx_flex_desc_nic *nic_mdid;
868 u32 hash;
869
870 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
871 return;
872
873 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
874 return;
875
876 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
877 hash = le32_to_cpu(nic_mdid->rss_hash);
878 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
879}
880
881/**
882 * ice_rx_csum - Indicate in skb if checksum is good
883 * @ring: the ring we care about
884 * @skb: skb currently being received and modified
885 * @rx_desc: the receive descriptor
886 * @ptype: the packet type decoded by hardware
887 *
888 * skb->protocol must be set before this function is called
889 */
890static void
891ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
892 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
893{
894 struct ice_rx_ptype_decoded decoded;
895 u32 rx_error, rx_status;
896 bool ipv4, ipv6;
897
898 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
899 rx_error = rx_status;
900
901 decoded = ice_decode_rx_desc_ptype(ptype);
902
903 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
904 skb->ip_summed = CHECKSUM_NONE;
905 skb_checksum_none_assert(skb);
906
907 /* check if Rx checksum is enabled */
908 if (!(ring->netdev->features & NETIF_F_RXCSUM))
909 return;
910
911 /* check if HW has decoded the packet and checksum */
912 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
913 return;
914
915 if (!(decoded.known && decoded.outer_ip))
916 return;
917
918 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
919 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
920 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
921 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
922
923 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
924 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
925 goto checksum_fail;
926 else if (ipv6 && (rx_status &
927 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
928 goto checksum_fail;
929
930 /* check for L4 errors and handle packets that were not able to be
931 * checksummed due to arrival speed
932 */
933 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
934 goto checksum_fail;
935
936 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
937 switch (decoded.inner_prot) {
938 case ICE_RX_PTYPE_INNER_PROT_TCP:
939 case ICE_RX_PTYPE_INNER_PROT_UDP:
940 case ICE_RX_PTYPE_INNER_PROT_SCTP:
941 skb->ip_summed = CHECKSUM_UNNECESSARY;
942 default:
943 break;
944 }
945 return;
946
947checksum_fail:
948 ring->vsi->back->hw_csum_rx_error++;
949}
950
951/**
952 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
953 * @rx_ring: Rx descriptor ring packet is being transacted on
954 * @rx_desc: pointer to the EOP Rx descriptor
955 * @skb: pointer to current skb being populated
956 * @ptype: the packet type decoded by hardware
957 *
958 * This function checks the ring, descriptor, and packet information in
959 * order to populate the hash, checksum, VLAN, protocol, and
960 * other fields within the skb.
961 */
962static void
963ice_process_skb_fields(struct ice_ring *rx_ring,
964 union ice_32b_rx_flex_desc *rx_desc,
965 struct sk_buff *skb, u8 ptype)
966{
967 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
968
969 /* modifies the skb - consumes the enet header */
970 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
971
972 ice_rx_csum(rx_ring, skb, rx_desc, ptype);
973}
974
975/**
976 * ice_receive_skb - Send a completed packet up the stack
977 * @rx_ring: Rx ring in play
978 * @skb: packet to send up
979 * @vlan_tag: VLAN tag for packet
980 *
981 * This function sends the completed packet (via. skb) up the stack using
982 * gro receive functions (with/without VLAN tag)
983 */
984static void
985ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
986{
987 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
988 (vlan_tag & VLAN_VID_MASK))
989 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
990 napi_gro_receive(&rx_ring->q_vector->napi, skb);
991}
992
993/**
994 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
995 * @rx_ring: Rx descriptor ring to transact packets on
996 * @budget: Total limit on number of packets to process
997 *
998 * This function provides a "bounce buffer" approach to Rx interrupt
999 * processing. The advantage to this is that on systems that have
1000 * expensive overhead for IOMMU access this provides a means of avoiding
1001 * it by maintaining the mapping of the page to the system.
1002 *
1003 * Returns amount of work completed
1004 */
1005static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1006{
1007 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1008 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1009 bool failure;
1010
1011 /* start the loop to process Rx packets bounded by 'budget' */
1012 while (likely(total_rx_pkts < (unsigned int)budget)) {
1013 union ice_32b_rx_flex_desc *rx_desc;
1014 struct ice_rx_buf *rx_buf;
1015 struct sk_buff *skb;
1016 unsigned int size;
1017 u16 stat_err_bits;
1018 u16 vlan_tag = 0;
1019 u8 rx_ptype;
1020
1021 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1022 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1023
1024 /* status_error_len will always be zero for unused descriptors
1025 * because it's cleared in cleanup, and overlaps with hdr_addr
1026 * which is always zero because packet split isn't used, if the
1027 * hardware wrote DD then it will be non-zero
1028 */
1029 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1030 if (!ice_test_staterr(rx_desc, stat_err_bits))
1031 break;
1032
1033 /* This memory barrier is needed to keep us from reading
1034 * any other fields out of the rx_desc until we know the
1035 * DD bit is set.
1036 */
1037 dma_rmb();
1038
1039 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1040 ICE_RX_FLX_DESC_PKT_LEN_M;
1041
1042 /* retrieve a buffer from the ring */
1043 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1044
1045 if (skb)
1046 ice_add_rx_frag(rx_buf, skb, size);
1047 else
1048 skb = ice_construct_skb(rx_ring, rx_buf, size);
1049
1050 /* exit if we failed to retrieve a buffer */
1051 if (!skb) {
1052 rx_ring->rx_stats.alloc_buf_failed++;
1053 if (rx_buf)
1054 rx_buf->pagecnt_bias++;
1055 break;
1056 }
1057
1058 ice_put_rx_buf(rx_ring, rx_buf);
1059 cleaned_count++;
1060
1061 /* skip if it is NOP desc */
1062 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1063 continue;
1064
1065 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1066 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1067 dev_kfree_skb_any(skb);
1068 continue;
1069 }
1070
1071 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1072 if (ice_test_staterr(rx_desc, stat_err_bits))
1073 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1074
1075 /* correct empty headers and pad skb if needed (to make valid
1076 * ethernet frame
1077 */
1078 if (ice_cleanup_headers(skb)) {
1079 skb = NULL;
1080 continue;
1081 }
1082
1083 /* probably a little skewed due to removing CRC */
1084 total_rx_bytes += skb->len;
1085
1086 /* populate checksum, VLAN, and protocol */
1087 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1088 ICE_RX_FLEX_DESC_PTYPE_M;
1089
1090 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1091
1092 /* send completed skb up the stack */
1093 ice_receive_skb(rx_ring, skb, vlan_tag);
1094
1095 /* update budget accounting */
1096 total_rx_pkts++;
1097 }
1098
1099 /* return up to cleaned_count buffers to hardware */
1100 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1101
1102 /* update queue and vector specific stats */
1103 u64_stats_update_begin(&rx_ring->syncp);
1104 rx_ring->stats.pkts += total_rx_pkts;
1105 rx_ring->stats.bytes += total_rx_bytes;
1106 u64_stats_update_end(&rx_ring->syncp);
1107 rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1108 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1109
1110 /* guarantee a trip back through this routine if there was a failure */
1111 return failure ? budget : (int)total_rx_pkts;
1112}
1113
1114/**
1115 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1116 * @port_info: port_info structure containing the current link speed
1117 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1118 * @itr: ITR value to update
1119 *
1120 * Calculate how big of an increment should be applied to the ITR value passed
1121 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
1122 * link speed.
1123 *
1124 * The following is a calculation derived from:
1125 * wmem_default / (size + overhead) = desired_pkts_per_int
1126 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
1127 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1128 *
1129 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1130 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1131 * formula down to:
1132 *
1133 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1134 * ITR = -------------------------------------------- * --------------
1135 * rate pkt_size + 640
1136 */
1137static unsigned int
1138ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1139 unsigned int avg_pkt_size,
1140 unsigned int itr)
1141{
1142 switch (port_info->phy.link_info.link_speed) {
1143 case ICE_AQ_LINK_SPEED_100GB:
1144 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1145 avg_pkt_size + 640);
1146 break;
1147 case ICE_AQ_LINK_SPEED_50GB:
1148 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1149 avg_pkt_size + 640);
1150 break;
1151 case ICE_AQ_LINK_SPEED_40GB:
1152 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1153 avg_pkt_size + 640);
1154 break;
1155 case ICE_AQ_LINK_SPEED_25GB:
1156 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1157 avg_pkt_size + 640);
1158 break;
1159 case ICE_AQ_LINK_SPEED_20GB:
1160 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1161 avg_pkt_size + 640);
1162 break;
1163 case ICE_AQ_LINK_SPEED_10GB:
1164 /* fall through */
1165 default:
1166 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1167 avg_pkt_size + 640);
1168 break;
1169 }
1170
1171 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1172 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1173 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1174 }
1175
1176 return itr;
1177}
1178
1179/**
1180 * ice_update_itr - update the adaptive ITR value based on statistics
1181 * @q_vector: structure containing interrupt and ring information
1182 * @rc: structure containing ring performance data
1183 *
1184 * Stores a new ITR value based on packets and byte
1185 * counts during the last interrupt. The advantage of per interrupt
1186 * computation is faster updates and more accurate ITR for the current
1187 * traffic pattern. Constants in this function were computed
1188 * based on theoretical maximum wire speed and thresholds were set based
1189 * on testing data as well as attempting to minimize response time
1190 * while increasing bulk throughput.
1191 */
1192static void
1193ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1194{
1195 unsigned long next_update = jiffies;
1196 unsigned int packets, bytes, itr;
1197 bool container_is_rx;
1198
1199 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1200 return;
1201
1202 /* If itr_countdown is set it means we programmed an ITR within
1203 * the last 4 interrupt cycles. This has a side effect of us
1204 * potentially firing an early interrupt. In order to work around
1205 * this we need to throw out any data received for a few
1206 * interrupts following the update.
1207 */
1208 if (q_vector->itr_countdown) {
1209 itr = rc->target_itr;
1210 goto clear_counts;
1211 }
1212
1213 container_is_rx = (&q_vector->rx == rc);
1214 /* For Rx we want to push the delay up and default to low latency.
1215 * for Tx we want to pull the delay down and default to high latency.
1216 */
1217 itr = container_is_rx ?
1218 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1219 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1220
1221 /* If we didn't update within up to 1 - 2 jiffies we can assume
1222 * that either packets are coming in so slow there hasn't been
1223 * any work, or that there is so much work that NAPI is dealing
1224 * with interrupt moderation and we don't need to do anything.
1225 */
1226 if (time_after(next_update, rc->next_update))
1227 goto clear_counts;
1228
1229 prefetch(q_vector->vsi->port_info);
1230
1231 packets = rc->total_pkts;
1232 bytes = rc->total_bytes;
1233
1234 if (container_is_rx) {
1235 /* If Rx there are 1 to 4 packets and bytes are less than
1236 * 9000 assume insufficient data to use bulk rate limiting
1237 * approach unless Tx is already in bulk rate limiting. We
1238 * are likely latency driven.
1239 */
1240 if (packets && packets < 4 && bytes < 9000 &&
1241 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1242 itr = ICE_ITR_ADAPTIVE_LATENCY;
1243 goto adjust_by_size_and_speed;
1244 }
1245 } else if (packets < 4) {
1246 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1247 * bulk mode and we are receiving 4 or fewer packets just
1248 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1249 * that the Rx can relax.
1250 */
1251 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1252 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1253 ICE_ITR_ADAPTIVE_MAX_USECS)
1254 goto clear_counts;
1255 } else if (packets > 32) {
1256 /* If we have processed over 32 packets in a single interrupt
1257 * for Tx assume we need to switch over to "bulk" mode.
1258 */
1259 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1260 }
1261
1262 /* We have no packets to actually measure against. This means
1263 * either one of the other queues on this vector is active or
1264 * we are a Tx queue doing TSO with too high of an interrupt rate.
1265 *
1266 * Between 4 and 56 we can assume that our current interrupt delay
1267 * is only slightly too low. As such we should increase it by a small
1268 * fixed amount.
1269 */
1270 if (packets < 56) {
1271 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1272 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1273 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1274 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1275 }
1276 goto clear_counts;
1277 }
1278
1279 if (packets <= 256) {
1280 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1281 itr &= ICE_ITR_MASK;
1282
1283 /* Between 56 and 112 is our "goldilocks" zone where we are
1284 * working out "just right". Just report that our current
1285 * ITR is good for us.
1286 */
1287 if (packets <= 112)
1288 goto clear_counts;
1289
1290 /* If packet count is 128 or greater we are likely looking
1291 * at a slight overrun of the delay we want. Try halving
1292 * our delay to see if that will cut the number of packets
1293 * in half per interrupt.
1294 */
1295 itr >>= 1;
1296 itr &= ICE_ITR_MASK;
1297 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1298 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1299
1300 goto clear_counts;
1301 }
1302
1303 /* The paths below assume we are dealing with a bulk ITR since
1304 * number of packets is greater than 256. We are just going to have
1305 * to compute a value and try to bring the count under control,
1306 * though for smaller packet sizes there isn't much we can do as
1307 * NAPI polling will likely be kicking in sooner rather than later.
1308 */
1309 itr = ICE_ITR_ADAPTIVE_BULK;
1310
1311adjust_by_size_and_speed:
1312
1313 /* based on checks above packets cannot be 0 so division is safe */
1314 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1315 bytes / packets, itr);
1316
1317clear_counts:
1318 /* write back value */
1319 rc->target_itr = itr;
1320
1321 /* next update should occur within next jiffy */
1322 rc->next_update = next_update + 1;
1323
1324 rc->total_bytes = 0;
1325 rc->total_pkts = 0;
1326}
1327
1328/**
1329 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1330 * @itr_idx: interrupt throttling index
1331 * @itr: interrupt throttling value in usecs
1332 */
1333static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1334{
1335 /* The ITR value is reported in microseconds, and the register value is
1336 * recorded in 2 microsecond units. For this reason we only need to
1337 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1338 * granularity as a shift instead of division. The mask makes sure the
1339 * ITR value is never odd so we don't accidentally write into the field
1340 * prior to the ITR field.
1341 */
1342 itr &= ICE_ITR_MASK;
1343
1344 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1345 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1346 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1347}
1348
1349/* The act of updating the ITR will cause it to immediately trigger. In order
1350 * to prevent this from throwing off adaptive update statistics we defer the
1351 * update so that it can only happen so often. So after either Tx or Rx are
1352 * updated we make the adaptive scheme wait until either the ITR completely
1353 * expires via the next_update expiration or we have been through at least
1354 * 3 interrupts.
1355 */
1356#define ITR_COUNTDOWN_START 3
1357
1358/**
1359 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1360 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1361 */
1362static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1363{
1364 struct ice_ring_container *tx = &q_vector->tx;
1365 struct ice_ring_container *rx = &q_vector->rx;
1366 struct ice_vsi *vsi = q_vector->vsi;
1367 u32 itr_val;
1368
1369 /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1370 * interrupts to expire right away in case we have more work ready to go
1371 * already
1372 */
1373 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1374 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1375 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1376 /* set target back to last user set value */
1377 rx->target_itr = rx->itr_setting;
1378 /* set current to what we just wrote and dynamic if needed */
1379 rx->current_itr = ICE_WB_ON_ITR_USECS |
1380 (rx->itr_setting & ICE_ITR_DYNAMIC);
1381 /* allow normal interrupt flow to start */
1382 q_vector->itr_countdown = 0;
1383 return;
1384 }
1385
1386 /* This will do nothing if dynamic updates are not enabled */
1387 ice_update_itr(q_vector, tx);
1388 ice_update_itr(q_vector, rx);
1389
1390 /* This block of logic allows us to get away with only updating
1391 * one ITR value with each interrupt. The idea is to perform a
1392 * pseudo-lazy update with the following criteria.
1393 *
1394 * 1. Rx is given higher priority than Tx if both are in same state
1395 * 2. If we must reduce an ITR that is given highest priority.
1396 * 3. We then give priority to increasing ITR based on amount.
1397 */
1398 if (rx->target_itr < rx->current_itr) {
1399 /* Rx ITR needs to be reduced, this is highest priority */
1400 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1401 rx->current_itr = rx->target_itr;
1402 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1403 } else if ((tx->target_itr < tx->current_itr) ||
1404 ((rx->target_itr - rx->current_itr) <
1405 (tx->target_itr - tx->current_itr))) {
1406 /* Tx ITR needs to be reduced, this is second priority
1407 * Tx ITR needs to be increased more than Rx, fourth priority
1408 */
1409 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1410 tx->current_itr = tx->target_itr;
1411 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1412 } else if (rx->current_itr != rx->target_itr) {
1413 /* Rx ITR needs to be increased, third priority */
1414 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1415 rx->current_itr = rx->target_itr;
1416 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1417 } else {
1418 /* Still have to re-enable the interrupts */
1419 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1420 if (q_vector->itr_countdown)
1421 q_vector->itr_countdown--;
1422 }
1423
1424 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1425 wr32(&q_vector->vsi->back->hw,
1426 GLINT_DYN_CTL(q_vector->reg_idx),
1427 itr_val);
1428}
1429
1430/**
1431 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1432 * @q_vector: q_vector to set WB_ON_ITR on
1433 *
1434 * We need to tell hardware to write-back completed descriptors even when
1435 * interrupts are disabled. Descriptors will be written back on cache line
1436 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1437 * descriptors may not be written back if they don't fill a cache line until the
1438 * next interrupt.
1439 *
1440 * This sets the write-back frequency to 2 microseconds as that is the minimum
1441 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1442 * make sure hardware knows we aren't meddling with the INTENA_M bit.
1443 */
1444static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1445{
1446 struct ice_vsi *vsi = q_vector->vsi;
1447
1448 /* already in WB_ON_ITR mode no need to change it */
1449 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1450 return;
1451
1452 if (q_vector->num_ring_rx)
1453 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1454 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1455 ICE_RX_ITR));
1456
1457 if (q_vector->num_ring_tx)
1458 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1459 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1460 ICE_TX_ITR));
1461
1462 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1463}
1464
1465/**
1466 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1467 * @napi: napi struct with our devices info in it
1468 * @budget: amount of work driver is allowed to do this pass, in packets
1469 *
1470 * This function will clean all queues associated with a q_vector.
1471 *
1472 * Returns the amount of work done
1473 */
1474int ice_napi_poll(struct napi_struct *napi, int budget)
1475{
1476 struct ice_q_vector *q_vector =
1477 container_of(napi, struct ice_q_vector, napi);
1478 bool clean_complete = true;
1479 struct ice_ring *ring;
1480 int budget_per_ring;
1481 int work_done = 0;
1482
1483 /* Since the actual Tx work is minimal, we can give the Tx a larger
1484 * budget and be more aggressive about cleaning up the Tx descriptors.
1485 */
1486 ice_for_each_ring(ring, q_vector->tx)
1487 if (!ice_clean_tx_irq(ring, budget))
1488 clean_complete = false;
1489
1490 /* Handle case where we are called by netpoll with a budget of 0 */
1491 if (unlikely(budget <= 0))
1492 return budget;
1493
1494 /* normally we have 1 Rx ring per q_vector */
1495 if (unlikely(q_vector->num_ring_rx > 1))
1496 /* We attempt to distribute budget to each Rx queue fairly, but
1497 * don't allow the budget to go below 1 because that would exit
1498 * polling early.
1499 */
1500 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1501 else
1502 /* Max of 1 Rx ring in this q_vector so give it the budget */
1503 budget_per_ring = budget;
1504
1505 ice_for_each_ring(ring, q_vector->rx) {
1506 int cleaned;
1507
1508 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1509 work_done += cleaned;
1510 /* if we clean as many as budgeted, we must not be done */
1511 if (cleaned >= budget_per_ring)
1512 clean_complete = false;
1513 }
1514
1515 /* If work not completed, return budget and polling will return */
1516 if (!clean_complete)
1517 return budget;
1518
1519 /* Exit the polling mode, but don't re-enable interrupts if stack might
1520 * poll us due to busy-polling
1521 */
1522 if (likely(napi_complete_done(napi, work_done)))
1523 ice_update_ena_itr(q_vector);
1524 else
1525 ice_set_wb_on_itr(q_vector);
1526
1527 return min_t(int, work_done, budget - 1);
1528}
1529
1530/* helper function for building cmd/type/offset */
1531static __le64
1532build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1533{
1534 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1535 (td_cmd << ICE_TXD_QW1_CMD_S) |
1536 (td_offset << ICE_TXD_QW1_OFFSET_S) |
1537 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1538 (td_tag << ICE_TXD_QW1_L2TAG1_S));
1539}
1540
1541/**
1542 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1543 * @tx_ring: the ring to be checked
1544 * @size: the size buffer we want to assure is available
1545 *
1546 * Returns -EBUSY if a stop is needed, else 0
1547 */
1548static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1549{
1550 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1551 /* Memory barrier before checking head and tail */
1552 smp_mb();
1553
1554 /* Check again in a case another CPU has just made room available. */
1555 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1556 return -EBUSY;
1557
1558 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1559 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1560 ++tx_ring->tx_stats.restart_q;
1561 return 0;
1562}
1563
1564/**
1565 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1566 * @tx_ring: the ring to be checked
1567 * @size: the size buffer we want to assure is available
1568 *
1569 * Returns 0 if stop is not needed
1570 */
1571static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1572{
1573 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1574 return 0;
1575
1576 return __ice_maybe_stop_tx(tx_ring, size);
1577}
1578
1579/**
1580 * ice_tx_map - Build the Tx descriptor
1581 * @tx_ring: ring to send buffer on
1582 * @first: first buffer info buffer to use
1583 * @off: pointer to struct that holds offload parameters
1584 *
1585 * This function loops over the skb data pointed to by *first
1586 * and gets a physical address for each memory location and programs
1587 * it and the length into the transmit descriptor.
1588 */
1589static void
1590ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1591 struct ice_tx_offload_params *off)
1592{
1593 u64 td_offset, td_tag, td_cmd;
1594 u16 i = tx_ring->next_to_use;
1595 skb_frag_t *frag;
1596 unsigned int data_len, size;
1597 struct ice_tx_desc *tx_desc;
1598 struct ice_tx_buf *tx_buf;
1599 struct sk_buff *skb;
1600 dma_addr_t dma;
1601
1602 td_tag = off->td_l2tag1;
1603 td_cmd = off->td_cmd;
1604 td_offset = off->td_offset;
1605 skb = first->skb;
1606
1607 data_len = skb->data_len;
1608 size = skb_headlen(skb);
1609
1610 tx_desc = ICE_TX_DESC(tx_ring, i);
1611
1612 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1613 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1614 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1615 ICE_TX_FLAGS_VLAN_S;
1616 }
1617
1618 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1619
1620 tx_buf = first;
1621
1622 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1623 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1624
1625 if (dma_mapping_error(tx_ring->dev, dma))
1626 goto dma_error;
1627
1628 /* record length, and DMA address */
1629 dma_unmap_len_set(tx_buf, len, size);
1630 dma_unmap_addr_set(tx_buf, dma, dma);
1631
1632 /* align size to end of page */
1633 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1634 tx_desc->buf_addr = cpu_to_le64(dma);
1635
1636 /* account for data chunks larger than the hardware
1637 * can handle
1638 */
1639 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1640 tx_desc->cmd_type_offset_bsz =
1641 build_ctob(td_cmd, td_offset, max_data, td_tag);
1642
1643 tx_desc++;
1644 i++;
1645
1646 if (i == tx_ring->count) {
1647 tx_desc = ICE_TX_DESC(tx_ring, 0);
1648 i = 0;
1649 }
1650
1651 dma += max_data;
1652 size -= max_data;
1653
1654 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1655 tx_desc->buf_addr = cpu_to_le64(dma);
1656 }
1657
1658 if (likely(!data_len))
1659 break;
1660
1661 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1662 size, td_tag);
1663
1664 tx_desc++;
1665 i++;
1666
1667 if (i == tx_ring->count) {
1668 tx_desc = ICE_TX_DESC(tx_ring, 0);
1669 i = 0;
1670 }
1671
1672 size = skb_frag_size(frag);
1673 data_len -= size;
1674
1675 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1676 DMA_TO_DEVICE);
1677
1678 tx_buf = &tx_ring->tx_buf[i];
1679 }
1680
1681 /* record bytecount for BQL */
1682 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1683
1684 /* record SW timestamp if HW timestamp is not available */
1685 skb_tx_timestamp(first->skb);
1686
1687 i++;
1688 if (i == tx_ring->count)
1689 i = 0;
1690
1691 /* write last descriptor with RS and EOP bits */
1692 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1693 tx_desc->cmd_type_offset_bsz =
1694 build_ctob(td_cmd, td_offset, size, td_tag);
1695
1696 /* Force memory writes to complete before letting h/w know there
1697 * are new descriptors to fetch.
1698 *
1699 * We also use this memory barrier to make certain all of the
1700 * status bits have been updated before next_to_watch is written.
1701 */
1702 wmb();
1703
1704 /* set next_to_watch value indicating a packet is present */
1705 first->next_to_watch = tx_desc;
1706
1707 tx_ring->next_to_use = i;
1708
1709 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1710
1711 /* notify HW of packet */
1712 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1713 writel(i, tx_ring->tail);
1714 }
1715
1716 return;
1717
1718dma_error:
1719 /* clear DMA mappings for failed tx_buf map */
1720 for (;;) {
1721 tx_buf = &tx_ring->tx_buf[i];
1722 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1723 if (tx_buf == first)
1724 break;
1725 if (i == 0)
1726 i = tx_ring->count;
1727 i--;
1728 }
1729
1730 tx_ring->next_to_use = i;
1731}
1732
1733/**
1734 * ice_tx_csum - Enable Tx checksum offloads
1735 * @first: pointer to the first descriptor
1736 * @off: pointer to struct that holds offload parameters
1737 *
1738 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1739 */
1740static
1741int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1742{
1743 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1744 struct sk_buff *skb = first->skb;
1745 union {
1746 struct iphdr *v4;
1747 struct ipv6hdr *v6;
1748 unsigned char *hdr;
1749 } ip;
1750 union {
1751 struct tcphdr *tcp;
1752 unsigned char *hdr;
1753 } l4;
1754 __be16 frag_off, protocol;
1755 unsigned char *exthdr;
1756 u32 offset, cmd = 0;
1757 u8 l4_proto = 0;
1758
1759 if (skb->ip_summed != CHECKSUM_PARTIAL)
1760 return 0;
1761
1762 ip.hdr = skb_network_header(skb);
1763 l4.hdr = skb_transport_header(skb);
1764
1765 /* compute outer L2 header size */
1766 l2_len = ip.hdr - skb->data;
1767 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1768
1769 if (skb->encapsulation)
1770 return -1;
1771
1772 /* Enable IP checksum offloads */
1773 protocol = vlan_get_protocol(skb);
1774 if (protocol == htons(ETH_P_IP)) {
1775 l4_proto = ip.v4->protocol;
1776 /* the stack computes the IP header already, the only time we
1777 * need the hardware to recompute it is in the case of TSO.
1778 */
1779 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1780 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1781 else
1782 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1783
1784 } else if (protocol == htons(ETH_P_IPV6)) {
1785 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1786 exthdr = ip.hdr + sizeof(*ip.v6);
1787 l4_proto = ip.v6->nexthdr;
1788 if (l4.hdr != exthdr)
1789 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1790 &frag_off);
1791 } else {
1792 return -1;
1793 }
1794
1795 /* compute inner L3 header size */
1796 l3_len = l4.hdr - ip.hdr;
1797 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1798
1799 /* Enable L4 checksum offloads */
1800 switch (l4_proto) {
1801 case IPPROTO_TCP:
1802 /* enable checksum offloads */
1803 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1804 l4_len = l4.tcp->doff;
1805 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1806 break;
1807 case IPPROTO_UDP:
1808 /* enable UDP checksum offload */
1809 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1810 l4_len = (sizeof(struct udphdr) >> 2);
1811 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1812 break;
1813 case IPPROTO_SCTP:
1814 /* enable SCTP checksum offload */
1815 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1816 l4_len = sizeof(struct sctphdr) >> 2;
1817 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1818 break;
1819
1820 default:
1821 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1822 return -1;
1823 skb_checksum_help(skb);
1824 return 0;
1825 }
1826
1827 off->td_cmd |= cmd;
1828 off->td_offset |= offset;
1829 return 1;
1830}
1831
1832/**
1833 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1834 * @tx_ring: ring to send buffer on
1835 * @first: pointer to struct ice_tx_buf
1836 *
1837 * Checks the skb and set up correspondingly several generic transmit flags
1838 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1839 *
1840 * Returns error code indicate the frame should be dropped upon error and the
1841 * otherwise returns 0 to indicate the flags has been set properly.
1842 */
1843static int
1844ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1845{
1846 struct sk_buff *skb = first->skb;
1847 __be16 protocol = skb->protocol;
1848
1849 if (protocol == htons(ETH_P_8021Q) &&
1850 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1851 /* when HW VLAN acceleration is turned off by the user the
1852 * stack sets the protocol to 8021q so that the driver
1853 * can take any steps required to support the SW only
1854 * VLAN handling. In our case the driver doesn't need
1855 * to take any further steps so just set the protocol
1856 * to the encapsulated ethertype.
1857 */
1858 skb->protocol = vlan_get_protocol(skb);
1859 return 0;
1860 }
1861
1862 /* if we have a HW VLAN tag being added, default to the HW one */
1863 if (skb_vlan_tag_present(skb)) {
1864 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1865 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1866 } else if (protocol == htons(ETH_P_8021Q)) {
1867 struct vlan_hdr *vhdr, _vhdr;
1868
1869 /* for SW VLAN, check the next protocol and store the tag */
1870 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1871 sizeof(_vhdr),
1872 &_vhdr);
1873 if (!vhdr)
1874 return -EINVAL;
1875
1876 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1877 ICE_TX_FLAGS_VLAN_S;
1878 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1879 }
1880
1881 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1882}
1883
1884/**
1885 * ice_tso - computes mss and TSO length to prepare for TSO
1886 * @first: pointer to struct ice_tx_buf
1887 * @off: pointer to struct that holds offload parameters
1888 *
1889 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1890 */
1891static
1892int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1893{
1894 struct sk_buff *skb = first->skb;
1895 union {
1896 struct iphdr *v4;
1897 struct ipv6hdr *v6;
1898 unsigned char *hdr;
1899 } ip;
1900 union {
1901 struct tcphdr *tcp;
1902 unsigned char *hdr;
1903 } l4;
1904 u64 cd_mss, cd_tso_len;
1905 u32 paylen, l4_start;
1906 int err;
1907
1908 if (skb->ip_summed != CHECKSUM_PARTIAL)
1909 return 0;
1910
1911 if (!skb_is_gso(skb))
1912 return 0;
1913
1914 err = skb_cow_head(skb, 0);
1915 if (err < 0)
1916 return err;
1917
1918 /* cppcheck-suppress unreadVariable */
1919 ip.hdr = skb_network_header(skb);
1920 l4.hdr = skb_transport_header(skb);
1921
1922 /* initialize outer IP header fields */
1923 if (ip.v4->version == 4) {
1924 ip.v4->tot_len = 0;
1925 ip.v4->check = 0;
1926 } else {
1927 ip.v6->payload_len = 0;
1928 }
1929
1930 /* determine offset of transport header */
1931 l4_start = l4.hdr - skb->data;
1932
1933 /* remove payload length from checksum */
1934 paylen = skb->len - l4_start;
1935 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1936
1937 /* compute length of segmentation header */
1938 off->header_len = (l4.tcp->doff * 4) + l4_start;
1939
1940 /* update gso_segs and bytecount */
1941 first->gso_segs = skb_shinfo(skb)->gso_segs;
1942 first->bytecount += (first->gso_segs - 1) * off->header_len;
1943
1944 cd_tso_len = skb->len - off->header_len;
1945 cd_mss = skb_shinfo(skb)->gso_size;
1946
1947 /* record cdesc_qw1 with TSO parameters */
1948 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1949 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1950 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1951 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1952 first->tx_flags |= ICE_TX_FLAGS_TSO;
1953 return 1;
1954}
1955
1956/**
1957 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1958 * @size: transmit request size in bytes
1959 *
1960 * Due to hardware alignment restrictions (4K alignment), we need to
1961 * assume that we can have no more than 12K of data per descriptor, even
1962 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1963 * Thus, we need to divide by 12K. But division is slow! Instead,
1964 * we decompose the operation into shifts and one relatively cheap
1965 * multiply operation.
1966 *
1967 * To divide by 12K, we first divide by 4K, then divide by 3:
1968 * To divide by 4K, shift right by 12 bits
1969 * To divide by 3, multiply by 85, then divide by 256
1970 * (Divide by 256 is done by shifting right by 8 bits)
1971 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1972 * 3, we'll underestimate near each multiple of 12K. This is actually more
1973 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1974 * segment. For our purposes this is accurate out to 1M which is orders of
1975 * magnitude greater than our largest possible GSO size.
1976 *
1977 * This would then be implemented as:
1978 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1979 *
1980 * Since multiplication and division are commutative, we can reorder
1981 * operations into:
1982 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1983 */
1984static unsigned int ice_txd_use_count(unsigned int size)
1985{
1986 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1987}
1988
1989/**
1990 * ice_xmit_desc_count - calculate number of Tx descriptors needed
1991 * @skb: send buffer
1992 *
1993 * Returns number of data descriptors needed for this skb.
1994 */
1995static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1996{
1997 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1998 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1999 unsigned int count = 0, size = skb_headlen(skb);
2000
2001 for (;;) {
2002 count += ice_txd_use_count(size);
2003
2004 if (!nr_frags--)
2005 break;
2006
2007 size = skb_frag_size(frag++);
2008 }
2009
2010 return count;
2011}
2012
2013/**
2014 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2015 * @skb: send buffer
2016 *
2017 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2018 * and so we need to figure out the cases where we need to linearize the skb.
2019 *
2020 * For TSO we need to count the TSO header and segment payload separately.
2021 * As such we need to check cases where we have 7 fragments or more as we
2022 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2023 * the segment payload in the first descriptor, and another 7 for the
2024 * fragments.
2025 */
2026static bool __ice_chk_linearize(struct sk_buff *skb)
2027{
2028 const skb_frag_t *frag, *stale;
2029 int nr_frags, sum;
2030
2031 /* no need to check if number of frags is less than 7 */
2032 nr_frags = skb_shinfo(skb)->nr_frags;
2033 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2034 return false;
2035
2036 /* We need to walk through the list and validate that each group
2037 * of 6 fragments totals at least gso_size.
2038 */
2039 nr_frags -= ICE_MAX_BUF_TXD - 2;
2040 frag = &skb_shinfo(skb)->frags[0];
2041
2042 /* Initialize size to the negative value of gso_size minus 1. We
2043 * use this as the worst case scenerio in which the frag ahead
2044 * of us only provides one byte which is why we are limited to 6
2045 * descriptors for a single transmit as the header and previous
2046 * fragment are already consuming 2 descriptors.
2047 */
2048 sum = 1 - skb_shinfo(skb)->gso_size;
2049
2050 /* Add size of frags 0 through 4 to create our initial sum */
2051 sum += skb_frag_size(frag++);
2052 sum += skb_frag_size(frag++);
2053 sum += skb_frag_size(frag++);
2054 sum += skb_frag_size(frag++);
2055 sum += skb_frag_size(frag++);
2056
2057 /* Walk through fragments adding latest fragment, testing it, and
2058 * then removing stale fragments from the sum.
2059 */
2060 stale = &skb_shinfo(skb)->frags[0];
2061 for (;;) {
2062 sum += skb_frag_size(frag++);
2063
2064 /* if sum is negative we failed to make sufficient progress */
2065 if (sum < 0)
2066 return true;
2067
2068 if (!nr_frags--)
2069 break;
2070
2071 sum -= skb_frag_size(stale++);
2072 }
2073
2074 return false;
2075}
2076
2077/**
2078 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2079 * @skb: send buffer
2080 * @count: number of buffers used
2081 *
2082 * Note: Our HW can't scatter-gather more than 8 fragments to build
2083 * a packet on the wire and so we need to figure out the cases where we
2084 * need to linearize the skb.
2085 */
2086static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2087{
2088 /* Both TSO and single send will work if count is less than 8 */
2089 if (likely(count < ICE_MAX_BUF_TXD))
2090 return false;
2091
2092 if (skb_is_gso(skb))
2093 return __ice_chk_linearize(skb);
2094
2095 /* we can support up to 8 data buffers for a single send */
2096 return count != ICE_MAX_BUF_TXD;
2097}
2098
2099/**
2100 * ice_xmit_frame_ring - Sends buffer on Tx ring
2101 * @skb: send buffer
2102 * @tx_ring: ring to send buffer on
2103 *
2104 * Returns NETDEV_TX_OK if sent, else an error code
2105 */
2106static netdev_tx_t
2107ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2108{
2109 struct ice_tx_offload_params offload = { 0 };
2110 struct ice_vsi *vsi = tx_ring->vsi;
2111 struct ice_tx_buf *first;
2112 unsigned int count;
2113 int tso, csum;
2114
2115 count = ice_xmit_desc_count(skb);
2116 if (ice_chk_linearize(skb, count)) {
2117 if (__skb_linearize(skb))
2118 goto out_drop;
2119 count = ice_txd_use_count(skb->len);
2120 tx_ring->tx_stats.tx_linearize++;
2121 }
2122
2123 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2124 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2125 * + 4 desc gap to avoid the cache line where head is,
2126 * + 1 desc for context descriptor,
2127 * otherwise try next time
2128 */
2129 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2130 ICE_DESCS_FOR_CTX_DESC)) {
2131 tx_ring->tx_stats.tx_busy++;
2132 return NETDEV_TX_BUSY;
2133 }
2134
2135 offload.tx_ring = tx_ring;
2136
2137 /* record the location of the first descriptor for this packet */
2138 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2139 first->skb = skb;
2140 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2141 first->gso_segs = 1;
2142 first->tx_flags = 0;
2143
2144 /* prepare the VLAN tagging flags for Tx */
2145 if (ice_tx_prepare_vlan_flags(tx_ring, first))
2146 goto out_drop;
2147
2148 /* set up TSO offload */
2149 tso = ice_tso(first, &offload);
2150 if (tso < 0)
2151 goto out_drop;
2152
2153 /* always set up Tx checksum offload */
2154 csum = ice_tx_csum(first, &offload);
2155 if (csum < 0)
2156 goto out_drop;
2157
2158 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2159 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2160 vsi->type == ICE_VSI_PF &&
2161 vsi->port_info->is_sw_lldp))
2162 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2163 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2164 ICE_TXD_CTX_QW1_CMD_S);
2165
2166 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2167 struct ice_tx_ctx_desc *cdesc;
2168 int i = tx_ring->next_to_use;
2169
2170 /* grab the next descriptor */
2171 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2172 i++;
2173 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2174
2175 /* setup context descriptor */
2176 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2177 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2178 cdesc->rsvd = cpu_to_le16(0);
2179 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2180 }
2181
2182 ice_tx_map(tx_ring, first, &offload);
2183 return NETDEV_TX_OK;
2184
2185out_drop:
2186 dev_kfree_skb_any(skb);
2187 return NETDEV_TX_OK;
2188}
2189
2190/**
2191 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2192 * @skb: send buffer
2193 * @netdev: network interface device structure
2194 *
2195 * Returns NETDEV_TX_OK if sent, else an error code
2196 */
2197netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2198{
2199 struct ice_netdev_priv *np = netdev_priv(netdev);
2200 struct ice_vsi *vsi = np->vsi;
2201 struct ice_ring *tx_ring;
2202
2203 tx_ring = vsi->tx_rings[skb->queue_mapping];
2204
2205 /* hardware can't handle really short frames, hardware padding works
2206 * beyond this point
2207 */
2208 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2209 return NETDEV_TX_OK;
2210
2211 return ice_xmit_frame_ring(skb, tx_ring);
2212}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* The driver transmit and receive code */
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include <linux/bpf_trace.h>
9#include <net/xdp.h>
10#include "ice_txrx_lib.h"
11#include "ice_lib.h"
12#include "ice.h"
13#include "ice_trace.h"
14#include "ice_dcb_lib.h"
15#include "ice_xsk.h"
16
17#define ICE_RX_HDR_SIZE 256
18
19#define FDIR_DESC_RXDID 0x40
20#define ICE_FDIR_CLEAN_DELAY 10
21
22/**
23 * ice_prgm_fdir_fltr - Program a Flow Director filter
24 * @vsi: VSI to send dummy packet
25 * @fdir_desc: flow director descriptor
26 * @raw_packet: allocated buffer for flow director
27 */
28int
29ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
30 u8 *raw_packet)
31{
32 struct ice_tx_buf *tx_buf, *first;
33 struct ice_fltr_desc *f_desc;
34 struct ice_tx_desc *tx_desc;
35 struct ice_ring *tx_ring;
36 struct device *dev;
37 dma_addr_t dma;
38 u32 td_cmd;
39 u16 i;
40
41 /* VSI and Tx ring */
42 if (!vsi)
43 return -ENOENT;
44 tx_ring = vsi->tx_rings[0];
45 if (!tx_ring || !tx_ring->desc)
46 return -ENOENT;
47 dev = tx_ring->dev;
48
49 /* we are using two descriptors to add/del a filter and we can wait */
50 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
51 if (!i)
52 return -EAGAIN;
53 msleep_interruptible(1);
54 }
55
56 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
57 DMA_TO_DEVICE);
58
59 if (dma_mapping_error(dev, dma))
60 return -EINVAL;
61
62 /* grab the next descriptor */
63 i = tx_ring->next_to_use;
64 first = &tx_ring->tx_buf[i];
65 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
66 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
67
68 i++;
69 i = (i < tx_ring->count) ? i : 0;
70 tx_desc = ICE_TX_DESC(tx_ring, i);
71 tx_buf = &tx_ring->tx_buf[i];
72
73 i++;
74 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
75
76 memset(tx_buf, 0, sizeof(*tx_buf));
77 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
78 dma_unmap_addr_set(tx_buf, dma, dma);
79
80 tx_desc->buf_addr = cpu_to_le64(dma);
81 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
82 ICE_TX_DESC_CMD_RE;
83
84 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
85 tx_buf->raw_buf = raw_packet;
86
87 tx_desc->cmd_type_offset_bsz =
88 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
89
90 /* Force memory write to complete before letting h/w know
91 * there are new descriptors to fetch.
92 */
93 wmb();
94
95 /* mark the data descriptor to be watched */
96 first->next_to_watch = tx_desc;
97
98 writel(tx_ring->next_to_use, tx_ring->tail);
99
100 return 0;
101}
102
103/**
104 * ice_unmap_and_free_tx_buf - Release a Tx buffer
105 * @ring: the ring that owns the buffer
106 * @tx_buf: the buffer to free
107 */
108static void
109ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
110{
111 if (tx_buf->skb) {
112 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
113 devm_kfree(ring->dev, tx_buf->raw_buf);
114 else if (ice_ring_is_xdp(ring))
115 page_frag_free(tx_buf->raw_buf);
116 else
117 dev_kfree_skb_any(tx_buf->skb);
118 if (dma_unmap_len(tx_buf, len))
119 dma_unmap_single(ring->dev,
120 dma_unmap_addr(tx_buf, dma),
121 dma_unmap_len(tx_buf, len),
122 DMA_TO_DEVICE);
123 } else if (dma_unmap_len(tx_buf, len)) {
124 dma_unmap_page(ring->dev,
125 dma_unmap_addr(tx_buf, dma),
126 dma_unmap_len(tx_buf, len),
127 DMA_TO_DEVICE);
128 }
129
130 tx_buf->next_to_watch = NULL;
131 tx_buf->skb = NULL;
132 dma_unmap_len_set(tx_buf, len, 0);
133 /* tx_buf must be completely set up in the transmit path */
134}
135
136static struct netdev_queue *txring_txq(const struct ice_ring *ring)
137{
138 return netdev_get_tx_queue(ring->netdev, ring->q_index);
139}
140
141/**
142 * ice_clean_tx_ring - Free any empty Tx buffers
143 * @tx_ring: ring to be cleaned
144 */
145void ice_clean_tx_ring(struct ice_ring *tx_ring)
146{
147 u16 i;
148
149 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
150 ice_xsk_clean_xdp_ring(tx_ring);
151 goto tx_skip_free;
152 }
153
154 /* ring already cleared, nothing to do */
155 if (!tx_ring->tx_buf)
156 return;
157
158 /* Free all the Tx ring sk_buffs */
159 for (i = 0; i < tx_ring->count; i++)
160 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
161
162tx_skip_free:
163 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
164
165 /* Zero out the descriptor ring */
166 memset(tx_ring->desc, 0, tx_ring->size);
167
168 tx_ring->next_to_use = 0;
169 tx_ring->next_to_clean = 0;
170
171 if (!tx_ring->netdev)
172 return;
173
174 /* cleanup Tx queue statistics */
175 netdev_tx_reset_queue(txring_txq(tx_ring));
176}
177
178/**
179 * ice_free_tx_ring - Free Tx resources per queue
180 * @tx_ring: Tx descriptor ring for a specific queue
181 *
182 * Free all transmit software resources
183 */
184void ice_free_tx_ring(struct ice_ring *tx_ring)
185{
186 ice_clean_tx_ring(tx_ring);
187 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
188 tx_ring->tx_buf = NULL;
189
190 if (tx_ring->desc) {
191 dmam_free_coherent(tx_ring->dev, tx_ring->size,
192 tx_ring->desc, tx_ring->dma);
193 tx_ring->desc = NULL;
194 }
195}
196
197/**
198 * ice_clean_tx_irq - Reclaim resources after transmit completes
199 * @tx_ring: Tx ring to clean
200 * @napi_budget: Used to determine if we are in netpoll
201 *
202 * Returns true if there's any budget left (e.g. the clean is finished)
203 */
204static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
205{
206 unsigned int total_bytes = 0, total_pkts = 0;
207 unsigned int budget = ICE_DFLT_IRQ_WORK;
208 struct ice_vsi *vsi = tx_ring->vsi;
209 s16 i = tx_ring->next_to_clean;
210 struct ice_tx_desc *tx_desc;
211 struct ice_tx_buf *tx_buf;
212
213 tx_buf = &tx_ring->tx_buf[i];
214 tx_desc = ICE_TX_DESC(tx_ring, i);
215 i -= tx_ring->count;
216
217 prefetch(&vsi->state);
218
219 do {
220 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
221
222 /* if next_to_watch is not set then there is no work pending */
223 if (!eop_desc)
224 break;
225
226 smp_rmb(); /* prevent any other reads prior to eop_desc */
227
228 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
229 /* if the descriptor isn't done, no work yet to do */
230 if (!(eop_desc->cmd_type_offset_bsz &
231 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
232 break;
233
234 /* clear next_to_watch to prevent false hangs */
235 tx_buf->next_to_watch = NULL;
236
237 /* update the statistics for this packet */
238 total_bytes += tx_buf->bytecount;
239 total_pkts += tx_buf->gso_segs;
240
241 if (ice_ring_is_xdp(tx_ring))
242 page_frag_free(tx_buf->raw_buf);
243 else
244 /* free the skb */
245 napi_consume_skb(tx_buf->skb, napi_budget);
246
247 /* unmap skb header data */
248 dma_unmap_single(tx_ring->dev,
249 dma_unmap_addr(tx_buf, dma),
250 dma_unmap_len(tx_buf, len),
251 DMA_TO_DEVICE);
252
253 /* clear tx_buf data */
254 tx_buf->skb = NULL;
255 dma_unmap_len_set(tx_buf, len, 0);
256
257 /* unmap remaining buffers */
258 while (tx_desc != eop_desc) {
259 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
260 tx_buf++;
261 tx_desc++;
262 i++;
263 if (unlikely(!i)) {
264 i -= tx_ring->count;
265 tx_buf = tx_ring->tx_buf;
266 tx_desc = ICE_TX_DESC(tx_ring, 0);
267 }
268
269 /* unmap any remaining paged data */
270 if (dma_unmap_len(tx_buf, len)) {
271 dma_unmap_page(tx_ring->dev,
272 dma_unmap_addr(tx_buf, dma),
273 dma_unmap_len(tx_buf, len),
274 DMA_TO_DEVICE);
275 dma_unmap_len_set(tx_buf, len, 0);
276 }
277 }
278 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
279
280 /* move us one more past the eop_desc for start of next pkt */
281 tx_buf++;
282 tx_desc++;
283 i++;
284 if (unlikely(!i)) {
285 i -= tx_ring->count;
286 tx_buf = tx_ring->tx_buf;
287 tx_desc = ICE_TX_DESC(tx_ring, 0);
288 }
289
290 prefetch(tx_desc);
291
292 /* update budget accounting */
293 budget--;
294 } while (likely(budget));
295
296 i += tx_ring->count;
297 tx_ring->next_to_clean = i;
298
299 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
300
301 if (ice_ring_is_xdp(tx_ring))
302 return !!budget;
303
304 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
305 total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
309 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310 /* Make sure that anybody stopping the queue after this
311 * sees the new next_to_clean.
312 */
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->q_index) &&
316 !test_bit(ICE_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->q_index);
319 ++tx_ring->tx_stats.restart_q;
320 }
321 }
322
323 return !!budget;
324}
325
326/**
327 * ice_setup_tx_ring - Allocate the Tx descriptors
328 * @tx_ring: the Tx ring to set up
329 *
330 * Return 0 on success, negative on error
331 */
332int ice_setup_tx_ring(struct ice_ring *tx_ring)
333{
334 struct device *dev = tx_ring->dev;
335
336 if (!dev)
337 return -ENOMEM;
338
339 /* warn if we are about to overwrite the pointer */
340 WARN_ON(tx_ring->tx_buf);
341 tx_ring->tx_buf =
342 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
343 GFP_KERNEL);
344 if (!tx_ring->tx_buf)
345 return -ENOMEM;
346
347 /* round up to nearest page */
348 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
349 PAGE_SIZE);
350 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
351 GFP_KERNEL);
352 if (!tx_ring->desc) {
353 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
354 tx_ring->size);
355 goto err;
356 }
357
358 tx_ring->next_to_use = 0;
359 tx_ring->next_to_clean = 0;
360 tx_ring->tx_stats.prev_pkt = -1;
361 return 0;
362
363err:
364 devm_kfree(dev, tx_ring->tx_buf);
365 tx_ring->tx_buf = NULL;
366 return -ENOMEM;
367}
368
369/**
370 * ice_clean_rx_ring - Free Rx buffers
371 * @rx_ring: ring to be cleaned
372 */
373void ice_clean_rx_ring(struct ice_ring *rx_ring)
374{
375 struct device *dev = rx_ring->dev;
376 u16 i;
377
378 /* ring already cleared, nothing to do */
379 if (!rx_ring->rx_buf)
380 return;
381
382 if (rx_ring->skb) {
383 dev_kfree_skb(rx_ring->skb);
384 rx_ring->skb = NULL;
385 }
386
387 if (rx_ring->xsk_pool) {
388 ice_xsk_clean_rx_ring(rx_ring);
389 goto rx_skip_free;
390 }
391
392 /* Free all the Rx ring sk_buffs */
393 for (i = 0; i < rx_ring->count; i++) {
394 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
395
396 if (!rx_buf->page)
397 continue;
398
399 /* Invalidate cache lines that may have been written to by
400 * device so that we avoid corrupting memory.
401 */
402 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
403 rx_buf->page_offset,
404 rx_ring->rx_buf_len,
405 DMA_FROM_DEVICE);
406
407 /* free resources associated with mapping */
408 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
409 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
410 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
411
412 rx_buf->page = NULL;
413 rx_buf->page_offset = 0;
414 }
415
416rx_skip_free:
417 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
418
419 /* Zero out the descriptor ring */
420 memset(rx_ring->desc, 0, rx_ring->size);
421
422 rx_ring->next_to_alloc = 0;
423 rx_ring->next_to_clean = 0;
424 rx_ring->next_to_use = 0;
425}
426
427/**
428 * ice_free_rx_ring - Free Rx resources
429 * @rx_ring: ring to clean the resources from
430 *
431 * Free all receive software resources
432 */
433void ice_free_rx_ring(struct ice_ring *rx_ring)
434{
435 ice_clean_rx_ring(rx_ring);
436 if (rx_ring->vsi->type == ICE_VSI_PF)
437 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
438 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
439 rx_ring->xdp_prog = NULL;
440 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
441 rx_ring->rx_buf = NULL;
442
443 if (rx_ring->desc) {
444 dmam_free_coherent(rx_ring->dev, rx_ring->size,
445 rx_ring->desc, rx_ring->dma);
446 rx_ring->desc = NULL;
447 }
448}
449
450/**
451 * ice_setup_rx_ring - Allocate the Rx descriptors
452 * @rx_ring: the Rx ring to set up
453 *
454 * Return 0 on success, negative on error
455 */
456int ice_setup_rx_ring(struct ice_ring *rx_ring)
457{
458 struct device *dev = rx_ring->dev;
459
460 if (!dev)
461 return -ENOMEM;
462
463 /* warn if we are about to overwrite the pointer */
464 WARN_ON(rx_ring->rx_buf);
465 rx_ring->rx_buf =
466 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
467 GFP_KERNEL);
468 if (!rx_ring->rx_buf)
469 return -ENOMEM;
470
471 /* round up to nearest page */
472 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
473 PAGE_SIZE);
474 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
475 GFP_KERNEL);
476 if (!rx_ring->desc) {
477 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
478 rx_ring->size);
479 goto err;
480 }
481
482 rx_ring->next_to_use = 0;
483 rx_ring->next_to_clean = 0;
484
485 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
486 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
487
488 if (rx_ring->vsi->type == ICE_VSI_PF &&
489 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
490 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
491 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
492 goto err;
493 return 0;
494
495err:
496 devm_kfree(dev, rx_ring->rx_buf);
497 rx_ring->rx_buf = NULL;
498 return -ENOMEM;
499}
500
501static unsigned int
502ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
503{
504 unsigned int truesize;
505
506#if (PAGE_SIZE < 8192)
507 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
508#else
509 truesize = rx_ring->rx_offset ?
510 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
511 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
512 SKB_DATA_ALIGN(size);
513#endif
514 return truesize;
515}
516
517/**
518 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
519 * @rx_ring: Rx ring
520 * @xdp: xdp_buff used as input to the XDP program
521 * @xdp_prog: XDP program to run
522 *
523 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
524 */
525static int
526ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
527 struct bpf_prog *xdp_prog)
528{
529 struct ice_ring *xdp_ring;
530 int err, result;
531 u32 act;
532
533 act = bpf_prog_run_xdp(xdp_prog, xdp);
534 switch (act) {
535 case XDP_PASS:
536 return ICE_XDP_PASS;
537 case XDP_TX:
538 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
539 result = ice_xmit_xdp_buff(xdp, xdp_ring);
540 if (result == ICE_XDP_CONSUMED)
541 goto out_failure;
542 return result;
543 case XDP_REDIRECT:
544 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
545 if (err)
546 goto out_failure;
547 return ICE_XDP_REDIR;
548 default:
549 bpf_warn_invalid_xdp_action(act);
550 fallthrough;
551 case XDP_ABORTED:
552out_failure:
553 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
554 fallthrough;
555 case XDP_DROP:
556 return ICE_XDP_CONSUMED;
557 }
558}
559
560/**
561 * ice_xdp_xmit - submit packets to XDP ring for transmission
562 * @dev: netdev
563 * @n: number of XDP frames to be transmitted
564 * @frames: XDP frames to be transmitted
565 * @flags: transmit flags
566 *
567 * Returns number of frames successfully sent. Failed frames
568 * will be free'ed by XDP core.
569 * For error cases, a negative errno code is returned and no-frames
570 * are transmitted (caller must handle freeing frames).
571 */
572int
573ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
574 u32 flags)
575{
576 struct ice_netdev_priv *np = netdev_priv(dev);
577 unsigned int queue_index = smp_processor_id();
578 struct ice_vsi *vsi = np->vsi;
579 struct ice_ring *xdp_ring;
580 int nxmit = 0, i;
581
582 if (test_bit(ICE_VSI_DOWN, vsi->state))
583 return -ENETDOWN;
584
585 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
586 return -ENXIO;
587
588 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
589 return -EINVAL;
590
591 xdp_ring = vsi->xdp_rings[queue_index];
592 for (i = 0; i < n; i++) {
593 struct xdp_frame *xdpf = frames[i];
594 int err;
595
596 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
597 if (err != ICE_XDP_TX)
598 break;
599 nxmit++;
600 }
601
602 if (unlikely(flags & XDP_XMIT_FLUSH))
603 ice_xdp_ring_update_tail(xdp_ring);
604
605 return nxmit;
606}
607
608/**
609 * ice_alloc_mapped_page - recycle or make a new page
610 * @rx_ring: ring to use
611 * @bi: rx_buf struct to modify
612 *
613 * Returns true if the page was successfully allocated or
614 * reused.
615 */
616static bool
617ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
618{
619 struct page *page = bi->page;
620 dma_addr_t dma;
621
622 /* since we are recycling buffers we should seldom need to alloc */
623 if (likely(page))
624 return true;
625
626 /* alloc new page for storage */
627 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
628 if (unlikely(!page)) {
629 rx_ring->rx_stats.alloc_page_failed++;
630 return false;
631 }
632
633 /* map page for use */
634 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
635 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
636
637 /* if mapping failed free memory back to system since
638 * there isn't much point in holding memory we can't use
639 */
640 if (dma_mapping_error(rx_ring->dev, dma)) {
641 __free_pages(page, ice_rx_pg_order(rx_ring));
642 rx_ring->rx_stats.alloc_page_failed++;
643 return false;
644 }
645
646 bi->dma = dma;
647 bi->page = page;
648 bi->page_offset = rx_ring->rx_offset;
649 page_ref_add(page, USHRT_MAX - 1);
650 bi->pagecnt_bias = USHRT_MAX;
651
652 return true;
653}
654
655/**
656 * ice_alloc_rx_bufs - Replace used receive buffers
657 * @rx_ring: ring to place buffers on
658 * @cleaned_count: number of buffers to replace
659 *
660 * Returns false if all allocations were successful, true if any fail. Returning
661 * true signals to the caller that we didn't replace cleaned_count buffers and
662 * there is more work to do.
663 *
664 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
665 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
666 * multiple tail writes per call.
667 */
668bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
669{
670 union ice_32b_rx_flex_desc *rx_desc;
671 u16 ntu = rx_ring->next_to_use;
672 struct ice_rx_buf *bi;
673
674 /* do nothing if no valid netdev defined */
675 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
676 !cleaned_count)
677 return false;
678
679 /* get the Rx descriptor and buffer based on next_to_use */
680 rx_desc = ICE_RX_DESC(rx_ring, ntu);
681 bi = &rx_ring->rx_buf[ntu];
682
683 do {
684 /* if we fail here, we have work remaining */
685 if (!ice_alloc_mapped_page(rx_ring, bi))
686 break;
687
688 /* sync the buffer for use by the device */
689 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
690 bi->page_offset,
691 rx_ring->rx_buf_len,
692 DMA_FROM_DEVICE);
693
694 /* Refresh the desc even if buffer_addrs didn't change
695 * because each write-back erases this info.
696 */
697 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
698
699 rx_desc++;
700 bi++;
701 ntu++;
702 if (unlikely(ntu == rx_ring->count)) {
703 rx_desc = ICE_RX_DESC(rx_ring, 0);
704 bi = rx_ring->rx_buf;
705 ntu = 0;
706 }
707
708 /* clear the status bits for the next_to_use descriptor */
709 rx_desc->wb.status_error0 = 0;
710
711 cleaned_count--;
712 } while (cleaned_count);
713
714 if (rx_ring->next_to_use != ntu)
715 ice_release_rx_desc(rx_ring, ntu);
716
717 return !!cleaned_count;
718}
719
720/**
721 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
722 * @rx_buf: Rx buffer to adjust
723 * @size: Size of adjustment
724 *
725 * Update the offset within page so that Rx buf will be ready to be reused.
726 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
727 * so the second half of page assigned to Rx buffer will be used, otherwise
728 * the offset is moved by "size" bytes
729 */
730static void
731ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
732{
733#if (PAGE_SIZE < 8192)
734 /* flip page offset to other buffer */
735 rx_buf->page_offset ^= size;
736#else
737 /* move offset up to the next cache line */
738 rx_buf->page_offset += size;
739#endif
740}
741
742/**
743 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
744 * @rx_buf: buffer containing the page
745 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
746 *
747 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
748 * which will assign the current buffer to the buffer that next_to_alloc is
749 * pointing to; otherwise, the DMA mapping needs to be destroyed and
750 * page freed
751 */
752static bool
753ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
754{
755 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
756 struct page *page = rx_buf->page;
757
758 /* avoid re-using remote and pfmemalloc pages */
759 if (!dev_page_is_reusable(page))
760 return false;
761
762#if (PAGE_SIZE < 8192)
763 /* if we are only owner of page we can reuse it */
764 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
765 return false;
766#else
767#define ICE_LAST_OFFSET \
768 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
769 if (rx_buf->page_offset > ICE_LAST_OFFSET)
770 return false;
771#endif /* PAGE_SIZE < 8192) */
772
773 /* If we have drained the page fragment pool we need to update
774 * the pagecnt_bias and page count so that we fully restock the
775 * number of references the driver holds.
776 */
777 if (unlikely(pagecnt_bias == 1)) {
778 page_ref_add(page, USHRT_MAX - 1);
779 rx_buf->pagecnt_bias = USHRT_MAX;
780 }
781
782 return true;
783}
784
785/**
786 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
787 * @rx_ring: Rx descriptor ring to transact packets on
788 * @rx_buf: buffer containing page to add
789 * @skb: sk_buff to place the data into
790 * @size: packet length from rx_desc
791 *
792 * This function will add the data contained in rx_buf->page to the skb.
793 * It will just attach the page as a frag to the skb.
794 * The function will then update the page offset.
795 */
796static void
797ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
798 struct sk_buff *skb, unsigned int size)
799{
800#if (PAGE_SIZE >= 8192)
801 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
802#else
803 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
804#endif
805
806 if (!size)
807 return;
808 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
809 rx_buf->page_offset, size, truesize);
810
811 /* page is being used so we must update the page offset */
812 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
813}
814
815/**
816 * ice_reuse_rx_page - page flip buffer and store it back on the ring
817 * @rx_ring: Rx descriptor ring to store buffers on
818 * @old_buf: donor buffer to have page reused
819 *
820 * Synchronizes page for reuse by the adapter
821 */
822static void
823ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
824{
825 u16 nta = rx_ring->next_to_alloc;
826 struct ice_rx_buf *new_buf;
827
828 new_buf = &rx_ring->rx_buf[nta];
829
830 /* update, and store next to alloc */
831 nta++;
832 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
833
834 /* Transfer page from old buffer to new buffer.
835 * Move each member individually to avoid possible store
836 * forwarding stalls and unnecessary copy of skb.
837 */
838 new_buf->dma = old_buf->dma;
839 new_buf->page = old_buf->page;
840 new_buf->page_offset = old_buf->page_offset;
841 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
842}
843
844/**
845 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
846 * @rx_ring: Rx descriptor ring to transact packets on
847 * @size: size of buffer to add to skb
848 * @rx_buf_pgcnt: rx_buf page refcount
849 *
850 * This function will pull an Rx buffer from the ring and synchronize it
851 * for use by the CPU.
852 */
853static struct ice_rx_buf *
854ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
855 int *rx_buf_pgcnt)
856{
857 struct ice_rx_buf *rx_buf;
858
859 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
860 *rx_buf_pgcnt =
861#if (PAGE_SIZE < 8192)
862 page_count(rx_buf->page);
863#else
864 0;
865#endif
866 prefetchw(rx_buf->page);
867
868 if (!size)
869 return rx_buf;
870 /* we are reusing so sync this buffer for CPU use */
871 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
872 rx_buf->page_offset, size,
873 DMA_FROM_DEVICE);
874
875 /* We have pulled a buffer for use, so decrement pagecnt_bias */
876 rx_buf->pagecnt_bias--;
877
878 return rx_buf;
879}
880
881/**
882 * ice_build_skb - Build skb around an existing buffer
883 * @rx_ring: Rx descriptor ring to transact packets on
884 * @rx_buf: Rx buffer to pull data from
885 * @xdp: xdp_buff pointing to the data
886 *
887 * This function builds an skb around an existing Rx buffer, taking care
888 * to set up the skb correctly and avoid any memcpy overhead.
889 */
890static struct sk_buff *
891ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
892 struct xdp_buff *xdp)
893{
894 u8 metasize = xdp->data - xdp->data_meta;
895#if (PAGE_SIZE < 8192)
896 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
897#else
898 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
899 SKB_DATA_ALIGN(xdp->data_end -
900 xdp->data_hard_start);
901#endif
902 struct sk_buff *skb;
903
904 /* Prefetch first cache line of first page. If xdp->data_meta
905 * is unused, this points exactly as xdp->data, otherwise we
906 * likely have a consumer accessing first few bytes of meta
907 * data, and then actual data.
908 */
909 net_prefetch(xdp->data_meta);
910 /* build an skb around the page buffer */
911 skb = build_skb(xdp->data_hard_start, truesize);
912 if (unlikely(!skb))
913 return NULL;
914
915 /* must to record Rx queue, otherwise OS features such as
916 * symmetric queue won't work
917 */
918 skb_record_rx_queue(skb, rx_ring->q_index);
919
920 /* update pointers within the skb to store the data */
921 skb_reserve(skb, xdp->data - xdp->data_hard_start);
922 __skb_put(skb, xdp->data_end - xdp->data);
923 if (metasize)
924 skb_metadata_set(skb, metasize);
925
926 /* buffer is used by skb, update page_offset */
927 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
928
929 return skb;
930}
931
932/**
933 * ice_construct_skb - Allocate skb and populate it
934 * @rx_ring: Rx descriptor ring to transact packets on
935 * @rx_buf: Rx buffer to pull data from
936 * @xdp: xdp_buff pointing to the data
937 *
938 * This function allocates an skb. It then populates it with the page
939 * data from the current receive descriptor, taking care to set up the
940 * skb correctly.
941 */
942static struct sk_buff *
943ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
944 struct xdp_buff *xdp)
945{
946 unsigned int size = xdp->data_end - xdp->data;
947 unsigned int headlen;
948 struct sk_buff *skb;
949
950 /* prefetch first cache line of first page */
951 net_prefetch(xdp->data);
952
953 /* allocate a skb to store the frags */
954 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
955 GFP_ATOMIC | __GFP_NOWARN);
956 if (unlikely(!skb))
957 return NULL;
958
959 skb_record_rx_queue(skb, rx_ring->q_index);
960 /* Determine available headroom for copy */
961 headlen = size;
962 if (headlen > ICE_RX_HDR_SIZE)
963 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
964
965 /* align pull length to size of long to optimize memcpy performance */
966 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
967 sizeof(long)));
968
969 /* if we exhaust the linear part then add what is left as a frag */
970 size -= headlen;
971 if (size) {
972#if (PAGE_SIZE >= 8192)
973 unsigned int truesize = SKB_DATA_ALIGN(size);
974#else
975 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
976#endif
977 skb_add_rx_frag(skb, 0, rx_buf->page,
978 rx_buf->page_offset + headlen, size, truesize);
979 /* buffer is used by skb, update page_offset */
980 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
981 } else {
982 /* buffer is unused, reset bias back to rx_buf; data was copied
983 * onto skb's linear part so there's no need for adjusting
984 * page offset and we can reuse this buffer as-is
985 */
986 rx_buf->pagecnt_bias++;
987 }
988
989 return skb;
990}
991
992/**
993 * ice_put_rx_buf - Clean up used buffer and either recycle or free
994 * @rx_ring: Rx descriptor ring to transact packets on
995 * @rx_buf: Rx buffer to pull data from
996 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
997 *
998 * This function will update next_to_clean and then clean up the contents
999 * of the rx_buf. It will either recycle the buffer or unmap it and free
1000 * the associated resources.
1001 */
1002static void
1003ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1004 int rx_buf_pgcnt)
1005{
1006 u16 ntc = rx_ring->next_to_clean + 1;
1007
1008 /* fetch, update, and store next to clean */
1009 ntc = (ntc < rx_ring->count) ? ntc : 0;
1010 rx_ring->next_to_clean = ntc;
1011
1012 if (!rx_buf)
1013 return;
1014
1015 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1016 /* hand second half of page back to the ring */
1017 ice_reuse_rx_page(rx_ring, rx_buf);
1018 } else {
1019 /* we are not reusing the buffer so unmap it */
1020 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1021 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1022 ICE_RX_DMA_ATTR);
1023 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1024 }
1025
1026 /* clear contents of buffer_info */
1027 rx_buf->page = NULL;
1028}
1029
1030/**
1031 * ice_is_non_eop - process handling of non-EOP buffers
1032 * @rx_ring: Rx ring being processed
1033 * @rx_desc: Rx descriptor for current buffer
1034 *
1035 * If the buffer is an EOP buffer, this function exits returning false,
1036 * otherwise return true indicating that this is in fact a non-EOP buffer.
1037 */
1038static bool
1039ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1040{
1041 /* if we are the last buffer then there is nothing else to do */
1042#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1043 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1044 return false;
1045
1046 rx_ring->rx_stats.non_eop_descs++;
1047
1048 return true;
1049}
1050
1051/**
1052 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1053 * @rx_ring: Rx descriptor ring to transact packets on
1054 * @budget: Total limit on number of packets to process
1055 *
1056 * This function provides a "bounce buffer" approach to Rx interrupt
1057 * processing. The advantage to this is that on systems that have
1058 * expensive overhead for IOMMU access this provides a means of avoiding
1059 * it by maintaining the mapping of the page to the system.
1060 *
1061 * Returns amount of work completed
1062 */
1063int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1064{
1065 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1066 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1067 unsigned int offset = rx_ring->rx_offset;
1068 unsigned int xdp_res, xdp_xmit = 0;
1069 struct sk_buff *skb = rx_ring->skb;
1070 struct bpf_prog *xdp_prog = NULL;
1071 struct xdp_buff xdp;
1072 bool failure;
1073
1074 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1075#if (PAGE_SIZE < 8192)
1076 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1077#endif
1078 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1079
1080 /* start the loop to process Rx packets bounded by 'budget' */
1081 while (likely(total_rx_pkts < (unsigned int)budget)) {
1082 union ice_32b_rx_flex_desc *rx_desc;
1083 struct ice_rx_buf *rx_buf;
1084 unsigned char *hard_start;
1085 unsigned int size;
1086 u16 stat_err_bits;
1087 int rx_buf_pgcnt;
1088 u16 vlan_tag = 0;
1089 u16 rx_ptype;
1090
1091 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1092 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1093
1094 /* status_error_len will always be zero for unused descriptors
1095 * because it's cleared in cleanup, and overlaps with hdr_addr
1096 * which is always zero because packet split isn't used, if the
1097 * hardware wrote DD then it will be non-zero
1098 */
1099 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1100 if (!ice_test_staterr(rx_desc, stat_err_bits))
1101 break;
1102
1103 /* This memory barrier is needed to keep us from reading
1104 * any other fields out of the rx_desc until we know the
1105 * DD bit is set.
1106 */
1107 dma_rmb();
1108
1109 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1110 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1111 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1112
1113 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1114 ctrl_vsi->vf_id != ICE_INVAL_VFID)
1115 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1116 ice_put_rx_buf(rx_ring, NULL, 0);
1117 cleaned_count++;
1118 continue;
1119 }
1120
1121 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1122 ICE_RX_FLX_DESC_PKT_LEN_M;
1123
1124 /* retrieve a buffer from the ring */
1125 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1126
1127 if (!size) {
1128 xdp.data = NULL;
1129 xdp.data_end = NULL;
1130 xdp.data_hard_start = NULL;
1131 xdp.data_meta = NULL;
1132 goto construct_skb;
1133 }
1134
1135 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1136 offset;
1137 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1138#if (PAGE_SIZE > 4096)
1139 /* At larger PAGE_SIZE, frame_sz depend on len size */
1140 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1141#endif
1142
1143 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1144 if (!xdp_prog)
1145 goto construct_skb;
1146
1147 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1148 if (!xdp_res)
1149 goto construct_skb;
1150 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1151 xdp_xmit |= xdp_res;
1152 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1153 } else {
1154 rx_buf->pagecnt_bias++;
1155 }
1156 total_rx_bytes += size;
1157 total_rx_pkts++;
1158
1159 cleaned_count++;
1160 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1161 continue;
1162construct_skb:
1163 if (skb) {
1164 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1165 } else if (likely(xdp.data)) {
1166 if (ice_ring_uses_build_skb(rx_ring))
1167 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1168 else
1169 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1170 }
1171 /* exit if we failed to retrieve a buffer */
1172 if (!skb) {
1173 rx_ring->rx_stats.alloc_buf_failed++;
1174 if (rx_buf)
1175 rx_buf->pagecnt_bias++;
1176 break;
1177 }
1178
1179 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1180 cleaned_count++;
1181
1182 /* skip if it is NOP desc */
1183 if (ice_is_non_eop(rx_ring, rx_desc))
1184 continue;
1185
1186 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1187 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1188 dev_kfree_skb_any(skb);
1189 continue;
1190 }
1191
1192 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1193 if (ice_test_staterr(rx_desc, stat_err_bits))
1194 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1195
1196 /* pad the skb if needed, to make a valid ethernet frame */
1197 if (eth_skb_pad(skb)) {
1198 skb = NULL;
1199 continue;
1200 }
1201
1202 /* probably a little skewed due to removing CRC */
1203 total_rx_bytes += skb->len;
1204
1205 /* populate checksum, VLAN, and protocol */
1206 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1207 ICE_RX_FLEX_DESC_PTYPE_M;
1208
1209 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1210
1211 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1212 /* send completed skb up the stack */
1213 ice_receive_skb(rx_ring, skb, vlan_tag);
1214 skb = NULL;
1215
1216 /* update budget accounting */
1217 total_rx_pkts++;
1218 }
1219
1220 /* return up to cleaned_count buffers to hardware */
1221 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1222
1223 if (xdp_prog)
1224 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1225 rx_ring->skb = skb;
1226
1227 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1228
1229 /* guarantee a trip back through this routine if there was a failure */
1230 return failure ? budget : (int)total_rx_pkts;
1231}
1232
1233/**
1234 * ice_net_dim - Update net DIM algorithm
1235 * @q_vector: the vector associated with the interrupt
1236 *
1237 * Create a DIM sample and notify net_dim() so that it can possibly decide
1238 * a new ITR value based on incoming packets, bytes, and interrupts.
1239 *
1240 * This function is a no-op if the ring is not configured to dynamic ITR.
1241 */
1242static void ice_net_dim(struct ice_q_vector *q_vector)
1243{
1244 struct ice_ring_container *tx = &q_vector->tx;
1245 struct ice_ring_container *rx = &q_vector->rx;
1246
1247 if (ITR_IS_DYNAMIC(tx)) {
1248 struct dim_sample dim_sample = {};
1249 u64 packets = 0, bytes = 0;
1250 struct ice_ring *ring;
1251
1252 ice_for_each_ring(ring, q_vector->tx) {
1253 packets += ring->stats.pkts;
1254 bytes += ring->stats.bytes;
1255 }
1256
1257 dim_update_sample(q_vector->total_events, packets, bytes,
1258 &dim_sample);
1259
1260 net_dim(&tx->dim, dim_sample);
1261 }
1262
1263 if (ITR_IS_DYNAMIC(rx)) {
1264 struct dim_sample dim_sample = {};
1265 u64 packets = 0, bytes = 0;
1266 struct ice_ring *ring;
1267
1268 ice_for_each_ring(ring, q_vector->rx) {
1269 packets += ring->stats.pkts;
1270 bytes += ring->stats.bytes;
1271 }
1272
1273 dim_update_sample(q_vector->total_events, packets, bytes,
1274 &dim_sample);
1275
1276 net_dim(&rx->dim, dim_sample);
1277 }
1278}
1279
1280/**
1281 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1282 * @itr_idx: interrupt throttling index
1283 * @itr: interrupt throttling value in usecs
1284 */
1285static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1286{
1287 /* The ITR value is reported in microseconds, and the register value is
1288 * recorded in 2 microsecond units. For this reason we only need to
1289 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1290 * granularity as a shift instead of division. The mask makes sure the
1291 * ITR value is never odd so we don't accidentally write into the field
1292 * prior to the ITR field.
1293 */
1294 itr &= ICE_ITR_MASK;
1295
1296 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1297 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1298 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1299}
1300
1301/**
1302 * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
1303 * @q_vector: the vector associated with the interrupt to enable
1304 *
1305 * Update the net_dim() algorithm and re-enable the interrupt associated with
1306 * this vector.
1307 *
1308 * If the VSI is down, the interrupt will not be re-enabled.
1309 */
1310static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1311{
1312 struct ice_vsi *vsi = q_vector->vsi;
1313 bool wb_en = q_vector->wb_on_itr;
1314 u32 itr_val;
1315
1316 if (test_bit(ICE_DOWN, vsi->state))
1317 return;
1318
1319 /* When exiting WB_ON_ITR, let ITR resume its normal
1320 * interrupts-enabled path.
1321 */
1322 if (wb_en)
1323 q_vector->wb_on_itr = false;
1324
1325 /* This will do nothing if dynamic updates are not enabled. */
1326 ice_net_dim(q_vector);
1327
1328 /* net_dim() updates ITR out-of-band using a work item */
1329 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1330 /* trigger an immediate software interrupt when exiting
1331 * busy poll, to make sure to catch any pending cleanups
1332 * that might have been missed due to interrupt state
1333 * transition.
1334 */
1335 if (wb_en) {
1336 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1337 GLINT_DYN_CTL_SW_ITR_INDX_M |
1338 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1339 }
1340 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1341}
1342
1343/**
1344 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1345 * @q_vector: q_vector to set WB_ON_ITR on
1346 *
1347 * We need to tell hardware to write-back completed descriptors even when
1348 * interrupts are disabled. Descriptors will be written back on cache line
1349 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1350 * descriptors may not be written back if they don't fill a cache line until
1351 * the next interrupt.
1352 *
1353 * This sets the write-back frequency to whatever was set previously for the
1354 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1355 * aren't meddling with the INTENA_M bit.
1356 */
1357static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1358{
1359 struct ice_vsi *vsi = q_vector->vsi;
1360
1361 /* already in wb_on_itr mode no need to change it */
1362 if (q_vector->wb_on_itr)
1363 return;
1364
1365 /* use previously set ITR values for all of the ITR indices by
1366 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1367 * be static in non-adaptive mode (user configured)
1368 */
1369 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1370 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1371 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1372 GLINT_DYN_CTL_WB_ON_ITR_M);
1373
1374 q_vector->wb_on_itr = true;
1375}
1376
1377/**
1378 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1379 * @napi: napi struct with our devices info in it
1380 * @budget: amount of work driver is allowed to do this pass, in packets
1381 *
1382 * This function will clean all queues associated with a q_vector.
1383 *
1384 * Returns the amount of work done
1385 */
1386int ice_napi_poll(struct napi_struct *napi, int budget)
1387{
1388 struct ice_q_vector *q_vector =
1389 container_of(napi, struct ice_q_vector, napi);
1390 bool clean_complete = true;
1391 struct ice_ring *ring;
1392 int budget_per_ring;
1393 int work_done = 0;
1394
1395 /* Since the actual Tx work is minimal, we can give the Tx a larger
1396 * budget and be more aggressive about cleaning up the Tx descriptors.
1397 */
1398 ice_for_each_ring(ring, q_vector->tx) {
1399 bool wd = ring->xsk_pool ?
1400 ice_clean_tx_irq_zc(ring, budget) :
1401 ice_clean_tx_irq(ring, budget);
1402
1403 if (!wd)
1404 clean_complete = false;
1405 }
1406
1407 /* Handle case where we are called by netpoll with a budget of 0 */
1408 if (unlikely(budget <= 0))
1409 return budget;
1410
1411 /* normally we have 1 Rx ring per q_vector */
1412 if (unlikely(q_vector->num_ring_rx > 1))
1413 /* We attempt to distribute budget to each Rx queue fairly, but
1414 * don't allow the budget to go below 1 because that would exit
1415 * polling early.
1416 */
1417 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1418 else
1419 /* Max of 1 Rx ring in this q_vector so give it the budget */
1420 budget_per_ring = budget;
1421
1422 ice_for_each_ring(ring, q_vector->rx) {
1423 int cleaned;
1424
1425 /* A dedicated path for zero-copy allows making a single
1426 * comparison in the irq context instead of many inside the
1427 * ice_clean_rx_irq function and makes the codebase cleaner.
1428 */
1429 cleaned = ring->xsk_pool ?
1430 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1431 ice_clean_rx_irq(ring, budget_per_ring);
1432 work_done += cleaned;
1433 /* if we clean as many as budgeted, we must not be done */
1434 if (cleaned >= budget_per_ring)
1435 clean_complete = false;
1436 }
1437
1438 /* If work not completed, return budget and polling will return */
1439 if (!clean_complete) {
1440 /* Set the writeback on ITR so partial completions of
1441 * cache-lines will still continue even if we're polling.
1442 */
1443 ice_set_wb_on_itr(q_vector);
1444 return budget;
1445 }
1446
1447 /* Exit the polling mode, but don't re-enable interrupts if stack might
1448 * poll us due to busy-polling
1449 */
1450 if (likely(napi_complete_done(napi, work_done)))
1451 ice_update_ena_itr(q_vector);
1452 else
1453 ice_set_wb_on_itr(q_vector);
1454
1455 return min_t(int, work_done, budget - 1);
1456}
1457
1458/**
1459 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1460 * @tx_ring: the ring to be checked
1461 * @size: the size buffer we want to assure is available
1462 *
1463 * Returns -EBUSY if a stop is needed, else 0
1464 */
1465static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1466{
1467 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1468 /* Memory barrier before checking head and tail */
1469 smp_mb();
1470
1471 /* Check again in a case another CPU has just made room available. */
1472 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1473 return -EBUSY;
1474
1475 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1476 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1477 ++tx_ring->tx_stats.restart_q;
1478 return 0;
1479}
1480
1481/**
1482 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1483 * @tx_ring: the ring to be checked
1484 * @size: the size buffer we want to assure is available
1485 *
1486 * Returns 0 if stop is not needed
1487 */
1488static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1489{
1490 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1491 return 0;
1492
1493 return __ice_maybe_stop_tx(tx_ring, size);
1494}
1495
1496/**
1497 * ice_tx_map - Build the Tx descriptor
1498 * @tx_ring: ring to send buffer on
1499 * @first: first buffer info buffer to use
1500 * @off: pointer to struct that holds offload parameters
1501 *
1502 * This function loops over the skb data pointed to by *first
1503 * and gets a physical address for each memory location and programs
1504 * it and the length into the transmit descriptor.
1505 */
1506static void
1507ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1508 struct ice_tx_offload_params *off)
1509{
1510 u64 td_offset, td_tag, td_cmd;
1511 u16 i = tx_ring->next_to_use;
1512 unsigned int data_len, size;
1513 struct ice_tx_desc *tx_desc;
1514 struct ice_tx_buf *tx_buf;
1515 struct sk_buff *skb;
1516 skb_frag_t *frag;
1517 dma_addr_t dma;
1518
1519 td_tag = off->td_l2tag1;
1520 td_cmd = off->td_cmd;
1521 td_offset = off->td_offset;
1522 skb = first->skb;
1523
1524 data_len = skb->data_len;
1525 size = skb_headlen(skb);
1526
1527 tx_desc = ICE_TX_DESC(tx_ring, i);
1528
1529 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1530 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1531 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1532 ICE_TX_FLAGS_VLAN_S;
1533 }
1534
1535 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1536
1537 tx_buf = first;
1538
1539 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1540 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1541
1542 if (dma_mapping_error(tx_ring->dev, dma))
1543 goto dma_error;
1544
1545 /* record length, and DMA address */
1546 dma_unmap_len_set(tx_buf, len, size);
1547 dma_unmap_addr_set(tx_buf, dma, dma);
1548
1549 /* align size to end of page */
1550 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1551 tx_desc->buf_addr = cpu_to_le64(dma);
1552
1553 /* account for data chunks larger than the hardware
1554 * can handle
1555 */
1556 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1557 tx_desc->cmd_type_offset_bsz =
1558 ice_build_ctob(td_cmd, td_offset, max_data,
1559 td_tag);
1560
1561 tx_desc++;
1562 i++;
1563
1564 if (i == tx_ring->count) {
1565 tx_desc = ICE_TX_DESC(tx_ring, 0);
1566 i = 0;
1567 }
1568
1569 dma += max_data;
1570 size -= max_data;
1571
1572 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1573 tx_desc->buf_addr = cpu_to_le64(dma);
1574 }
1575
1576 if (likely(!data_len))
1577 break;
1578
1579 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1580 size, td_tag);
1581
1582 tx_desc++;
1583 i++;
1584
1585 if (i == tx_ring->count) {
1586 tx_desc = ICE_TX_DESC(tx_ring, 0);
1587 i = 0;
1588 }
1589
1590 size = skb_frag_size(frag);
1591 data_len -= size;
1592
1593 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1594 DMA_TO_DEVICE);
1595
1596 tx_buf = &tx_ring->tx_buf[i];
1597 }
1598
1599 /* record bytecount for BQL */
1600 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1601
1602 /* record SW timestamp if HW timestamp is not available */
1603 skb_tx_timestamp(first->skb);
1604
1605 i++;
1606 if (i == tx_ring->count)
1607 i = 0;
1608
1609 /* write last descriptor with RS and EOP bits */
1610 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1611 tx_desc->cmd_type_offset_bsz =
1612 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1613
1614 /* Force memory writes to complete before letting h/w know there
1615 * are new descriptors to fetch.
1616 *
1617 * We also use this memory barrier to make certain all of the
1618 * status bits have been updated before next_to_watch is written.
1619 */
1620 wmb();
1621
1622 /* set next_to_watch value indicating a packet is present */
1623 first->next_to_watch = tx_desc;
1624
1625 tx_ring->next_to_use = i;
1626
1627 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1628
1629 /* notify HW of packet */
1630 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1631 writel(i, tx_ring->tail);
1632
1633 return;
1634
1635dma_error:
1636 /* clear DMA mappings for failed tx_buf map */
1637 for (;;) {
1638 tx_buf = &tx_ring->tx_buf[i];
1639 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1640 if (tx_buf == first)
1641 break;
1642 if (i == 0)
1643 i = tx_ring->count;
1644 i--;
1645 }
1646
1647 tx_ring->next_to_use = i;
1648}
1649
1650/**
1651 * ice_tx_csum - Enable Tx checksum offloads
1652 * @first: pointer to the first descriptor
1653 * @off: pointer to struct that holds offload parameters
1654 *
1655 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1656 */
1657static
1658int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1659{
1660 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1661 struct sk_buff *skb = first->skb;
1662 union {
1663 struct iphdr *v4;
1664 struct ipv6hdr *v6;
1665 unsigned char *hdr;
1666 } ip;
1667 union {
1668 struct tcphdr *tcp;
1669 unsigned char *hdr;
1670 } l4;
1671 __be16 frag_off, protocol;
1672 unsigned char *exthdr;
1673 u32 offset, cmd = 0;
1674 u8 l4_proto = 0;
1675
1676 if (skb->ip_summed != CHECKSUM_PARTIAL)
1677 return 0;
1678
1679 ip.hdr = skb_network_header(skb);
1680 l4.hdr = skb_transport_header(skb);
1681
1682 /* compute outer L2 header size */
1683 l2_len = ip.hdr - skb->data;
1684 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1685
1686 protocol = vlan_get_protocol(skb);
1687
1688 if (protocol == htons(ETH_P_IP))
1689 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1690 else if (protocol == htons(ETH_P_IPV6))
1691 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1692
1693 if (skb->encapsulation) {
1694 bool gso_ena = false;
1695 u32 tunnel = 0;
1696
1697 /* define outer network header type */
1698 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1699 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1700 ICE_TX_CTX_EIPT_IPV4 :
1701 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1702 l4_proto = ip.v4->protocol;
1703 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1704 int ret;
1705
1706 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1707 exthdr = ip.hdr + sizeof(*ip.v6);
1708 l4_proto = ip.v6->nexthdr;
1709 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1710 &l4_proto, &frag_off);
1711 if (ret < 0)
1712 return -1;
1713 }
1714
1715 /* define outer transport */
1716 switch (l4_proto) {
1717 case IPPROTO_UDP:
1718 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1719 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1720 break;
1721 case IPPROTO_GRE:
1722 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1723 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1724 break;
1725 case IPPROTO_IPIP:
1726 case IPPROTO_IPV6:
1727 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1728 l4.hdr = skb_inner_network_header(skb);
1729 break;
1730 default:
1731 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1732 return -1;
1733
1734 skb_checksum_help(skb);
1735 return 0;
1736 }
1737
1738 /* compute outer L3 header size */
1739 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1740 ICE_TXD_CTX_QW0_EIPLEN_S;
1741
1742 /* switch IP header pointer from outer to inner header */
1743 ip.hdr = skb_inner_network_header(skb);
1744
1745 /* compute tunnel header size */
1746 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1747 ICE_TXD_CTX_QW0_NATLEN_S;
1748
1749 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1750 /* indicate if we need to offload outer UDP header */
1751 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1752 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1753 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1754
1755 /* record tunnel offload values */
1756 off->cd_tunnel_params |= tunnel;
1757
1758 /* set DTYP=1 to indicate that it's an Tx context descriptor
1759 * in IPsec tunnel mode with Tx offloads in Quad word 1
1760 */
1761 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1762
1763 /* switch L4 header pointer from outer to inner */
1764 l4.hdr = skb_inner_transport_header(skb);
1765 l4_proto = 0;
1766
1767 /* reset type as we transition from outer to inner headers */
1768 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1769 if (ip.v4->version == 4)
1770 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771 if (ip.v6->version == 6)
1772 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1773 }
1774
1775 /* Enable IP checksum offloads */
1776 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1777 l4_proto = ip.v4->protocol;
1778 /* the stack computes the IP header already, the only time we
1779 * need the hardware to recompute it is in the case of TSO.
1780 */
1781 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1782 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1783 else
1784 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1785
1786 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1787 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1788 exthdr = ip.hdr + sizeof(*ip.v6);
1789 l4_proto = ip.v6->nexthdr;
1790 if (l4.hdr != exthdr)
1791 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1792 &frag_off);
1793 } else {
1794 return -1;
1795 }
1796
1797 /* compute inner L3 header size */
1798 l3_len = l4.hdr - ip.hdr;
1799 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1800
1801 /* Enable L4 checksum offloads */
1802 switch (l4_proto) {
1803 case IPPROTO_TCP:
1804 /* enable checksum offloads */
1805 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1806 l4_len = l4.tcp->doff;
1807 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1808 break;
1809 case IPPROTO_UDP:
1810 /* enable UDP checksum offload */
1811 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1812 l4_len = (sizeof(struct udphdr) >> 2);
1813 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1814 break;
1815 case IPPROTO_SCTP:
1816 /* enable SCTP checksum offload */
1817 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1818 l4_len = sizeof(struct sctphdr) >> 2;
1819 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1820 break;
1821
1822 default:
1823 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1824 return -1;
1825 skb_checksum_help(skb);
1826 return 0;
1827 }
1828
1829 off->td_cmd |= cmd;
1830 off->td_offset |= offset;
1831 return 1;
1832}
1833
1834/**
1835 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1836 * @tx_ring: ring to send buffer on
1837 * @first: pointer to struct ice_tx_buf
1838 *
1839 * Checks the skb and set up correspondingly several generic transmit flags
1840 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1841 */
1842static void
1843ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1844{
1845 struct sk_buff *skb = first->skb;
1846
1847 /* nothing left to do, software offloaded VLAN */
1848 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1849 return;
1850
1851 /* currently, we always assume 802.1Q for VLAN insertion as VLAN
1852 * insertion for 802.1AD is not supported
1853 */
1854 if (skb_vlan_tag_present(skb)) {
1855 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1856 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1857 }
1858
1859 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1860}
1861
1862/**
1863 * ice_tso - computes mss and TSO length to prepare for TSO
1864 * @first: pointer to struct ice_tx_buf
1865 * @off: pointer to struct that holds offload parameters
1866 *
1867 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1868 */
1869static
1870int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1871{
1872 struct sk_buff *skb = first->skb;
1873 union {
1874 struct iphdr *v4;
1875 struct ipv6hdr *v6;
1876 unsigned char *hdr;
1877 } ip;
1878 union {
1879 struct tcphdr *tcp;
1880 struct udphdr *udp;
1881 unsigned char *hdr;
1882 } l4;
1883 u64 cd_mss, cd_tso_len;
1884 u32 paylen;
1885 u8 l4_start;
1886 int err;
1887
1888 if (skb->ip_summed != CHECKSUM_PARTIAL)
1889 return 0;
1890
1891 if (!skb_is_gso(skb))
1892 return 0;
1893
1894 err = skb_cow_head(skb, 0);
1895 if (err < 0)
1896 return err;
1897
1898 /* cppcheck-suppress unreadVariable */
1899 ip.hdr = skb_network_header(skb);
1900 l4.hdr = skb_transport_header(skb);
1901
1902 /* initialize outer IP header fields */
1903 if (ip.v4->version == 4) {
1904 ip.v4->tot_len = 0;
1905 ip.v4->check = 0;
1906 } else {
1907 ip.v6->payload_len = 0;
1908 }
1909
1910 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1911 SKB_GSO_GRE_CSUM |
1912 SKB_GSO_IPXIP4 |
1913 SKB_GSO_IPXIP6 |
1914 SKB_GSO_UDP_TUNNEL |
1915 SKB_GSO_UDP_TUNNEL_CSUM)) {
1916 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1917 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1918 l4.udp->len = 0;
1919
1920 /* determine offset of outer transport header */
1921 l4_start = (u8)(l4.hdr - skb->data);
1922
1923 /* remove payload length from outer checksum */
1924 paylen = skb->len - l4_start;
1925 csum_replace_by_diff(&l4.udp->check,
1926 (__force __wsum)htonl(paylen));
1927 }
1928
1929 /* reset pointers to inner headers */
1930
1931 /* cppcheck-suppress unreadVariable */
1932 ip.hdr = skb_inner_network_header(skb);
1933 l4.hdr = skb_inner_transport_header(skb);
1934
1935 /* initialize inner IP header fields */
1936 if (ip.v4->version == 4) {
1937 ip.v4->tot_len = 0;
1938 ip.v4->check = 0;
1939 } else {
1940 ip.v6->payload_len = 0;
1941 }
1942 }
1943
1944 /* determine offset of transport header */
1945 l4_start = (u8)(l4.hdr - skb->data);
1946
1947 /* remove payload length from checksum */
1948 paylen = skb->len - l4_start;
1949
1950 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1951 csum_replace_by_diff(&l4.udp->check,
1952 (__force __wsum)htonl(paylen));
1953 /* compute length of UDP segmentation header */
1954 off->header_len = (u8)sizeof(l4.udp) + l4_start;
1955 } else {
1956 csum_replace_by_diff(&l4.tcp->check,
1957 (__force __wsum)htonl(paylen));
1958 /* compute length of TCP segmentation header */
1959 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1960 }
1961
1962 /* update gso_segs and bytecount */
1963 first->gso_segs = skb_shinfo(skb)->gso_segs;
1964 first->bytecount += (first->gso_segs - 1) * off->header_len;
1965
1966 cd_tso_len = skb->len - off->header_len;
1967 cd_mss = skb_shinfo(skb)->gso_size;
1968
1969 /* record cdesc_qw1 with TSO parameters */
1970 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1971 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1972 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1973 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1974 first->tx_flags |= ICE_TX_FLAGS_TSO;
1975 return 1;
1976}
1977
1978/**
1979 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1980 * @size: transmit request size in bytes
1981 *
1982 * Due to hardware alignment restrictions (4K alignment), we need to
1983 * assume that we can have no more than 12K of data per descriptor, even
1984 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1985 * Thus, we need to divide by 12K. But division is slow! Instead,
1986 * we decompose the operation into shifts and one relatively cheap
1987 * multiply operation.
1988 *
1989 * To divide by 12K, we first divide by 4K, then divide by 3:
1990 * To divide by 4K, shift right by 12 bits
1991 * To divide by 3, multiply by 85, then divide by 256
1992 * (Divide by 256 is done by shifting right by 8 bits)
1993 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1994 * 3, we'll underestimate near each multiple of 12K. This is actually more
1995 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1996 * segment. For our purposes this is accurate out to 1M which is orders of
1997 * magnitude greater than our largest possible GSO size.
1998 *
1999 * This would then be implemented as:
2000 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2001 *
2002 * Since multiplication and division are commutative, we can reorder
2003 * operations into:
2004 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2005 */
2006static unsigned int ice_txd_use_count(unsigned int size)
2007{
2008 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2009}
2010
2011/**
2012 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2013 * @skb: send buffer
2014 *
2015 * Returns number of data descriptors needed for this skb.
2016 */
2017static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2018{
2019 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2020 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2021 unsigned int count = 0, size = skb_headlen(skb);
2022
2023 for (;;) {
2024 count += ice_txd_use_count(size);
2025
2026 if (!nr_frags--)
2027 break;
2028
2029 size = skb_frag_size(frag++);
2030 }
2031
2032 return count;
2033}
2034
2035/**
2036 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2037 * @skb: send buffer
2038 *
2039 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2040 * and so we need to figure out the cases where we need to linearize the skb.
2041 *
2042 * For TSO we need to count the TSO header and segment payload separately.
2043 * As such we need to check cases where we have 7 fragments or more as we
2044 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2045 * the segment payload in the first descriptor, and another 7 for the
2046 * fragments.
2047 */
2048static bool __ice_chk_linearize(struct sk_buff *skb)
2049{
2050 const skb_frag_t *frag, *stale;
2051 int nr_frags, sum;
2052
2053 /* no need to check if number of frags is less than 7 */
2054 nr_frags = skb_shinfo(skb)->nr_frags;
2055 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2056 return false;
2057
2058 /* We need to walk through the list and validate that each group
2059 * of 6 fragments totals at least gso_size.
2060 */
2061 nr_frags -= ICE_MAX_BUF_TXD - 2;
2062 frag = &skb_shinfo(skb)->frags[0];
2063
2064 /* Initialize size to the negative value of gso_size minus 1. We
2065 * use this as the worst case scenario in which the frag ahead
2066 * of us only provides one byte which is why we are limited to 6
2067 * descriptors for a single transmit as the header and previous
2068 * fragment are already consuming 2 descriptors.
2069 */
2070 sum = 1 - skb_shinfo(skb)->gso_size;
2071
2072 /* Add size of frags 0 through 4 to create our initial sum */
2073 sum += skb_frag_size(frag++);
2074 sum += skb_frag_size(frag++);
2075 sum += skb_frag_size(frag++);
2076 sum += skb_frag_size(frag++);
2077 sum += skb_frag_size(frag++);
2078
2079 /* Walk through fragments adding latest fragment, testing it, and
2080 * then removing stale fragments from the sum.
2081 */
2082 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2083 int stale_size = skb_frag_size(stale);
2084
2085 sum += skb_frag_size(frag++);
2086
2087 /* The stale fragment may present us with a smaller
2088 * descriptor than the actual fragment size. To account
2089 * for that we need to remove all the data on the front and
2090 * figure out what the remainder would be in the last
2091 * descriptor associated with the fragment.
2092 */
2093 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2094 int align_pad = -(skb_frag_off(stale)) &
2095 (ICE_MAX_READ_REQ_SIZE - 1);
2096
2097 sum -= align_pad;
2098 stale_size -= align_pad;
2099
2100 do {
2101 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2102 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2103 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2104 }
2105
2106 /* if sum is negative we failed to make sufficient progress */
2107 if (sum < 0)
2108 return true;
2109
2110 if (!nr_frags--)
2111 break;
2112
2113 sum -= stale_size;
2114 }
2115
2116 return false;
2117}
2118
2119/**
2120 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2121 * @skb: send buffer
2122 * @count: number of buffers used
2123 *
2124 * Note: Our HW can't scatter-gather more than 8 fragments to build
2125 * a packet on the wire and so we need to figure out the cases where we
2126 * need to linearize the skb.
2127 */
2128static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2129{
2130 /* Both TSO and single send will work if count is less than 8 */
2131 if (likely(count < ICE_MAX_BUF_TXD))
2132 return false;
2133
2134 if (skb_is_gso(skb))
2135 return __ice_chk_linearize(skb);
2136
2137 /* we can support up to 8 data buffers for a single send */
2138 return count != ICE_MAX_BUF_TXD;
2139}
2140
2141/**
2142 * ice_tstamp - set up context descriptor for hardware timestamp
2143 * @tx_ring: pointer to the Tx ring to send buffer on
2144 * @skb: pointer to the SKB we're sending
2145 * @first: Tx buffer
2146 * @off: Tx offload parameters
2147 */
2148static void
2149ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
2150 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2151{
2152 s8 idx;
2153
2154 /* only timestamp the outbound packet if the user has requested it */
2155 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2156 return;
2157
2158 if (!tx_ring->ptp_tx)
2159 return;
2160
2161 /* Tx timestamps cannot be sampled when doing TSO */
2162 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2163 return;
2164
2165 /* Grab an open timestamp slot */
2166 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2167 if (idx < 0)
2168 return;
2169
2170 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2171 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2172 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2173 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2174}
2175
2176/**
2177 * ice_xmit_frame_ring - Sends buffer on Tx ring
2178 * @skb: send buffer
2179 * @tx_ring: ring to send buffer on
2180 *
2181 * Returns NETDEV_TX_OK if sent, else an error code
2182 */
2183static netdev_tx_t
2184ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2185{
2186 struct ice_tx_offload_params offload = { 0 };
2187 struct ice_vsi *vsi = tx_ring->vsi;
2188 struct ice_tx_buf *first;
2189 struct ethhdr *eth;
2190 unsigned int count;
2191 int tso, csum;
2192
2193 ice_trace(xmit_frame_ring, tx_ring, skb);
2194
2195 count = ice_xmit_desc_count(skb);
2196 if (ice_chk_linearize(skb, count)) {
2197 if (__skb_linearize(skb))
2198 goto out_drop;
2199 count = ice_txd_use_count(skb->len);
2200 tx_ring->tx_stats.tx_linearize++;
2201 }
2202
2203 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2204 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2205 * + 4 desc gap to avoid the cache line where head is,
2206 * + 1 desc for context descriptor,
2207 * otherwise try next time
2208 */
2209 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2210 ICE_DESCS_FOR_CTX_DESC)) {
2211 tx_ring->tx_stats.tx_busy++;
2212 return NETDEV_TX_BUSY;
2213 }
2214
2215 offload.tx_ring = tx_ring;
2216
2217 /* record the location of the first descriptor for this packet */
2218 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2219 first->skb = skb;
2220 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2221 first->gso_segs = 1;
2222 first->tx_flags = 0;
2223
2224 /* prepare the VLAN tagging flags for Tx */
2225 ice_tx_prepare_vlan_flags(tx_ring, first);
2226
2227 /* set up TSO offload */
2228 tso = ice_tso(first, &offload);
2229 if (tso < 0)
2230 goto out_drop;
2231
2232 /* always set up Tx checksum offload */
2233 csum = ice_tx_csum(first, &offload);
2234 if (csum < 0)
2235 goto out_drop;
2236
2237 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2238 eth = (struct ethhdr *)skb_mac_header(skb);
2239 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2240 eth->h_proto == htons(ETH_P_LLDP)) &&
2241 vsi->type == ICE_VSI_PF &&
2242 vsi->port_info->qos_cfg.is_sw_lldp))
2243 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2244 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2245 ICE_TXD_CTX_QW1_CMD_S);
2246
2247 ice_tstamp(tx_ring, skb, first, &offload);
2248
2249 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2250 struct ice_tx_ctx_desc *cdesc;
2251 u16 i = tx_ring->next_to_use;
2252
2253 /* grab the next descriptor */
2254 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2255 i++;
2256 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2257
2258 /* setup context descriptor */
2259 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2260 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2261 cdesc->rsvd = cpu_to_le16(0);
2262 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2263 }
2264
2265 ice_tx_map(tx_ring, first, &offload);
2266 return NETDEV_TX_OK;
2267
2268out_drop:
2269 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2270 dev_kfree_skb_any(skb);
2271 return NETDEV_TX_OK;
2272}
2273
2274/**
2275 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2276 * @skb: send buffer
2277 * @netdev: network interface device structure
2278 *
2279 * Returns NETDEV_TX_OK if sent, else an error code
2280 */
2281netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2282{
2283 struct ice_netdev_priv *np = netdev_priv(netdev);
2284 struct ice_vsi *vsi = np->vsi;
2285 struct ice_ring *tx_ring;
2286
2287 tx_ring = vsi->tx_rings[skb->queue_mapping];
2288
2289 /* hardware can't handle really short frames, hardware padding works
2290 * beyond this point
2291 */
2292 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2293 return NETDEV_TX_OK;
2294
2295 return ice_xmit_frame_ring(skb, tx_ring);
2296}
2297
2298/**
2299 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2300 * @tx_ring: tx_ring to clean
2301 */
2302void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2303{
2304 struct ice_vsi *vsi = tx_ring->vsi;
2305 s16 i = tx_ring->next_to_clean;
2306 int budget = ICE_DFLT_IRQ_WORK;
2307 struct ice_tx_desc *tx_desc;
2308 struct ice_tx_buf *tx_buf;
2309
2310 tx_buf = &tx_ring->tx_buf[i];
2311 tx_desc = ICE_TX_DESC(tx_ring, i);
2312 i -= tx_ring->count;
2313
2314 do {
2315 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2316
2317 /* if next_to_watch is not set then there is no pending work */
2318 if (!eop_desc)
2319 break;
2320
2321 /* prevent any other reads prior to eop_desc */
2322 smp_rmb();
2323
2324 /* if the descriptor isn't done, no work to do */
2325 if (!(eop_desc->cmd_type_offset_bsz &
2326 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2327 break;
2328
2329 /* clear next_to_watch to prevent false hangs */
2330 tx_buf->next_to_watch = NULL;
2331 tx_desc->buf_addr = 0;
2332 tx_desc->cmd_type_offset_bsz = 0;
2333
2334 /* move past filter desc */
2335 tx_buf++;
2336 tx_desc++;
2337 i++;
2338 if (unlikely(!i)) {
2339 i -= tx_ring->count;
2340 tx_buf = tx_ring->tx_buf;
2341 tx_desc = ICE_TX_DESC(tx_ring, 0);
2342 }
2343
2344 /* unmap the data header */
2345 if (dma_unmap_len(tx_buf, len))
2346 dma_unmap_single(tx_ring->dev,
2347 dma_unmap_addr(tx_buf, dma),
2348 dma_unmap_len(tx_buf, len),
2349 DMA_TO_DEVICE);
2350 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2351 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2352
2353 /* clear next_to_watch to prevent false hangs */
2354 tx_buf->raw_buf = NULL;
2355 tx_buf->tx_flags = 0;
2356 tx_buf->next_to_watch = NULL;
2357 dma_unmap_len_set(tx_buf, len, 0);
2358 tx_desc->buf_addr = 0;
2359 tx_desc->cmd_type_offset_bsz = 0;
2360
2361 /* move past eop_desc for start of next FD desc */
2362 tx_buf++;
2363 tx_desc++;
2364 i++;
2365 if (unlikely(!i)) {
2366 i -= tx_ring->count;
2367 tx_buf = tx_ring->tx_buf;
2368 tx_desc = ICE_TX_DESC(tx_ring, 0);
2369 }
2370
2371 budget--;
2372 } while (likely(budget));
2373
2374 i += tx_ring->count;
2375 tx_ring->next_to_clean = i;
2376
2377 /* re-enable interrupt if needed */
2378 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2379}