Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* The driver transmit and receive code */
5
6#include <linux/prefetch.h>
7#include <linux/mm.h>
8#include <linux/bpf_trace.h>
9#include <net/xdp.h>
10#include "ice_txrx_lib.h"
11#include "ice_lib.h"
12#include "ice.h"
13#include "ice_dcb_lib.h"
14#include "ice_xsk.h"
15
16#define ICE_RX_HDR_SIZE 256
17
18#define FDIR_DESC_RXDID 0x40
19#define ICE_FDIR_CLEAN_DELAY 10
20
21/**
22 * ice_prgm_fdir_fltr - Program a Flow Director filter
23 * @vsi: VSI to send dummy packet
24 * @fdir_desc: flow director descriptor
25 * @raw_packet: allocated buffer for flow director
26 */
27int
28ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
29 u8 *raw_packet)
30{
31 struct ice_tx_buf *tx_buf, *first;
32 struct ice_fltr_desc *f_desc;
33 struct ice_tx_desc *tx_desc;
34 struct ice_ring *tx_ring;
35 struct device *dev;
36 dma_addr_t dma;
37 u32 td_cmd;
38 u16 i;
39
40 /* VSI and Tx ring */
41 if (!vsi)
42 return -ENOENT;
43 tx_ring = vsi->tx_rings[0];
44 if (!tx_ring || !tx_ring->desc)
45 return -ENOENT;
46 dev = tx_ring->dev;
47
48 /* we are using two descriptors to add/del a filter and we can wait */
49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
50 if (!i)
51 return -EAGAIN;
52 msleep_interruptible(1);
53 }
54
55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
56 DMA_TO_DEVICE);
57
58 if (dma_mapping_error(dev, dma))
59 return -EINVAL;
60
61 /* grab the next descriptor */
62 i = tx_ring->next_to_use;
63 first = &tx_ring->tx_buf[i];
64 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
66
67 i++;
68 i = (i < tx_ring->count) ? i : 0;
69 tx_desc = ICE_TX_DESC(tx_ring, i);
70 tx_buf = &tx_ring->tx_buf[i];
71
72 i++;
73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
74
75 memset(tx_buf, 0, sizeof(*tx_buf));
76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 dma_unmap_addr_set(tx_buf, dma, dma);
78
79 tx_desc->buf_addr = cpu_to_le64(dma);
80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
81 ICE_TX_DESC_CMD_RE;
82
83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 tx_buf->raw_buf = raw_packet;
85
86 tx_desc->cmd_type_offset_bsz =
87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
88
89 /* Force memory write to complete before letting h/w know
90 * there are new descriptors to fetch.
91 */
92 wmb();
93
94 /* mark the data descriptor to be watched */
95 first->next_to_watch = tx_desc;
96
97 writel(tx_ring->next_to_use, tx_ring->tail);
98
99 return 0;
100}
101
102/**
103 * ice_unmap_and_free_tx_buf - Release a Tx buffer
104 * @ring: the ring that owns the buffer
105 * @tx_buf: the buffer to free
106 */
107static void
108ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
109{
110 if (tx_buf->skb) {
111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 devm_kfree(ring->dev, tx_buf->raw_buf);
113 else if (ice_ring_is_xdp(ring))
114 page_frag_free(tx_buf->raw_buf);
115 else
116 dev_kfree_skb_any(tx_buf->skb);
117 if (dma_unmap_len(tx_buf, len))
118 dma_unmap_single(ring->dev,
119 dma_unmap_addr(tx_buf, dma),
120 dma_unmap_len(tx_buf, len),
121 DMA_TO_DEVICE);
122 } else if (dma_unmap_len(tx_buf, len)) {
123 dma_unmap_page(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 }
128
129 tx_buf->next_to_watch = NULL;
130 tx_buf->skb = NULL;
131 dma_unmap_len_set(tx_buf, len, 0);
132 /* tx_buf must be completely set up in the transmit path */
133}
134
135static struct netdev_queue *txring_txq(const struct ice_ring *ring)
136{
137 return netdev_get_tx_queue(ring->netdev, ring->q_index);
138}
139
140/**
141 * ice_clean_tx_ring - Free any empty Tx buffers
142 * @tx_ring: ring to be cleaned
143 */
144void ice_clean_tx_ring(struct ice_ring *tx_ring)
145{
146 u16 i;
147
148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
149 ice_xsk_clean_xdp_ring(tx_ring);
150 goto tx_skip_free;
151 }
152
153 /* ring already cleared, nothing to do */
154 if (!tx_ring->tx_buf)
155 return;
156
157 /* Free all the Tx ring sk_buffs */
158 for (i = 0; i < tx_ring->count; i++)
159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
160
161tx_skip_free:
162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
163
164 /* Zero out the descriptor ring */
165 memset(tx_ring->desc, 0, tx_ring->size);
166
167 tx_ring->next_to_use = 0;
168 tx_ring->next_to_clean = 0;
169
170 if (!tx_ring->netdev)
171 return;
172
173 /* cleanup Tx queue statistics */
174 netdev_tx_reset_queue(txring_txq(tx_ring));
175}
176
177/**
178 * ice_free_tx_ring - Free Tx resources per queue
179 * @tx_ring: Tx descriptor ring for a specific queue
180 *
181 * Free all transmit software resources
182 */
183void ice_free_tx_ring(struct ice_ring *tx_ring)
184{
185 ice_clean_tx_ring(tx_ring);
186 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 tx_ring->tx_buf = NULL;
188
189 if (tx_ring->desc) {
190 dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 tx_ring->desc, tx_ring->dma);
192 tx_ring->desc = NULL;
193 }
194}
195
196/**
197 * ice_clean_tx_irq - Reclaim resources after transmit completes
198 * @tx_ring: Tx ring to clean
199 * @napi_budget: Used to determine if we are in netpoll
200 *
201 * Returns true if there's any budget left (e.g. the clean is finished)
202 */
203static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
204{
205 unsigned int total_bytes = 0, total_pkts = 0;
206 unsigned int budget = ICE_DFLT_IRQ_WORK;
207 struct ice_vsi *vsi = tx_ring->vsi;
208 s16 i = tx_ring->next_to_clean;
209 struct ice_tx_desc *tx_desc;
210 struct ice_tx_buf *tx_buf;
211
212 tx_buf = &tx_ring->tx_buf[i];
213 tx_desc = ICE_TX_DESC(tx_ring, i);
214 i -= tx_ring->count;
215
216 prefetch(&vsi->state);
217
218 do {
219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
220
221 /* if next_to_watch is not set then there is no work pending */
222 if (!eop_desc)
223 break;
224
225 smp_rmb(); /* prevent any other reads prior to eop_desc */
226
227 /* if the descriptor isn't done, no work yet to do */
228 if (!(eop_desc->cmd_type_offset_bsz &
229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
230 break;
231
232 /* clear next_to_watch to prevent false hangs */
233 tx_buf->next_to_watch = NULL;
234
235 /* update the statistics for this packet */
236 total_bytes += tx_buf->bytecount;
237 total_pkts += tx_buf->gso_segs;
238
239 if (ice_ring_is_xdp(tx_ring))
240 page_frag_free(tx_buf->raw_buf);
241 else
242 /* free the skb */
243 napi_consume_skb(tx_buf->skb, napi_budget);
244
245 /* unmap skb header data */
246 dma_unmap_single(tx_ring->dev,
247 dma_unmap_addr(tx_buf, dma),
248 dma_unmap_len(tx_buf, len),
249 DMA_TO_DEVICE);
250
251 /* clear tx_buf data */
252 tx_buf->skb = NULL;
253 dma_unmap_len_set(tx_buf, len, 0);
254
255 /* unmap remaining buffers */
256 while (tx_desc != eop_desc) {
257 tx_buf++;
258 tx_desc++;
259 i++;
260 if (unlikely(!i)) {
261 i -= tx_ring->count;
262 tx_buf = tx_ring->tx_buf;
263 tx_desc = ICE_TX_DESC(tx_ring, 0);
264 }
265
266 /* unmap any remaining paged data */
267 if (dma_unmap_len(tx_buf, len)) {
268 dma_unmap_page(tx_ring->dev,
269 dma_unmap_addr(tx_buf, dma),
270 dma_unmap_len(tx_buf, len),
271 DMA_TO_DEVICE);
272 dma_unmap_len_set(tx_buf, len, 0);
273 }
274 }
275
276 /* move us one more past the eop_desc for start of next pkt */
277 tx_buf++;
278 tx_desc++;
279 i++;
280 if (unlikely(!i)) {
281 i -= tx_ring->count;
282 tx_buf = tx_ring->tx_buf;
283 tx_desc = ICE_TX_DESC(tx_ring, 0);
284 }
285
286 prefetch(tx_desc);
287
288 /* update budget accounting */
289 budget--;
290 } while (likely(budget));
291
292 i += tx_ring->count;
293 tx_ring->next_to_clean = i;
294
295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
296
297 if (ice_ring_is_xdp(tx_ring))
298 return !!budget;
299
300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
301 total_bytes);
302
303#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306 /* Make sure that anybody stopping the queue after this
307 * sees the new next_to_clean.
308 */
309 smp_mb();
310 if (__netif_subqueue_stopped(tx_ring->netdev,
311 tx_ring->q_index) &&
312 !test_bit(__ICE_DOWN, vsi->state)) {
313 netif_wake_subqueue(tx_ring->netdev,
314 tx_ring->q_index);
315 ++tx_ring->tx_stats.restart_q;
316 }
317 }
318
319 return !!budget;
320}
321
322/**
323 * ice_setup_tx_ring - Allocate the Tx descriptors
324 * @tx_ring: the Tx ring to set up
325 *
326 * Return 0 on success, negative on error
327 */
328int ice_setup_tx_ring(struct ice_ring *tx_ring)
329{
330 struct device *dev = tx_ring->dev;
331
332 if (!dev)
333 return -ENOMEM;
334
335 /* warn if we are about to overwrite the pointer */
336 WARN_ON(tx_ring->tx_buf);
337 tx_ring->tx_buf =
338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
339 GFP_KERNEL);
340 if (!tx_ring->tx_buf)
341 return -ENOMEM;
342
343 /* round up to nearest page */
344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
345 PAGE_SIZE);
346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
347 GFP_KERNEL);
348 if (!tx_ring->desc) {
349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
350 tx_ring->size);
351 goto err;
352 }
353
354 tx_ring->next_to_use = 0;
355 tx_ring->next_to_clean = 0;
356 tx_ring->tx_stats.prev_pkt = -1;
357 return 0;
358
359err:
360 devm_kfree(dev, tx_ring->tx_buf);
361 tx_ring->tx_buf = NULL;
362 return -ENOMEM;
363}
364
365/**
366 * ice_clean_rx_ring - Free Rx buffers
367 * @rx_ring: ring to be cleaned
368 */
369void ice_clean_rx_ring(struct ice_ring *rx_ring)
370{
371 struct device *dev = rx_ring->dev;
372 u16 i;
373
374 /* ring already cleared, nothing to do */
375 if (!rx_ring->rx_buf)
376 return;
377
378 if (rx_ring->xsk_umem) {
379 ice_xsk_clean_rx_ring(rx_ring);
380 goto rx_skip_free;
381 }
382
383 /* Free all the Rx ring sk_buffs */
384 for (i = 0; i < rx_ring->count; i++) {
385 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
386
387 if (rx_buf->skb) {
388 dev_kfree_skb(rx_buf->skb);
389 rx_buf->skb = NULL;
390 }
391 if (!rx_buf->page)
392 continue;
393
394 /* Invalidate cache lines that may have been written to by
395 * device so that we avoid corrupting memory.
396 */
397 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
398 rx_buf->page_offset,
399 rx_ring->rx_buf_len,
400 DMA_FROM_DEVICE);
401
402 /* free resources associated with mapping */
403 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
404 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
405 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
406
407 rx_buf->page = NULL;
408 rx_buf->page_offset = 0;
409 }
410
411rx_skip_free:
412 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
413
414 /* Zero out the descriptor ring */
415 memset(rx_ring->desc, 0, rx_ring->size);
416
417 rx_ring->next_to_alloc = 0;
418 rx_ring->next_to_clean = 0;
419 rx_ring->next_to_use = 0;
420}
421
422/**
423 * ice_free_rx_ring - Free Rx resources
424 * @rx_ring: ring to clean the resources from
425 *
426 * Free all receive software resources
427 */
428void ice_free_rx_ring(struct ice_ring *rx_ring)
429{
430 ice_clean_rx_ring(rx_ring);
431 if (rx_ring->vsi->type == ICE_VSI_PF)
432 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
433 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
434 rx_ring->xdp_prog = NULL;
435 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
436 rx_ring->rx_buf = NULL;
437
438 if (rx_ring->desc) {
439 dmam_free_coherent(rx_ring->dev, rx_ring->size,
440 rx_ring->desc, rx_ring->dma);
441 rx_ring->desc = NULL;
442 }
443}
444
445/**
446 * ice_setup_rx_ring - Allocate the Rx descriptors
447 * @rx_ring: the Rx ring to set up
448 *
449 * Return 0 on success, negative on error
450 */
451int ice_setup_rx_ring(struct ice_ring *rx_ring)
452{
453 struct device *dev = rx_ring->dev;
454
455 if (!dev)
456 return -ENOMEM;
457
458 /* warn if we are about to overwrite the pointer */
459 WARN_ON(rx_ring->rx_buf);
460 rx_ring->rx_buf =
461 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
462 GFP_KERNEL);
463 if (!rx_ring->rx_buf)
464 return -ENOMEM;
465
466 /* round up to nearest page */
467 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 PAGE_SIZE);
469 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
470 GFP_KERNEL);
471 if (!rx_ring->desc) {
472 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
473 rx_ring->size);
474 goto err;
475 }
476
477 rx_ring->next_to_use = 0;
478 rx_ring->next_to_clean = 0;
479
480 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
481 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
482
483 if (rx_ring->vsi->type == ICE_VSI_PF &&
484 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
485 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
486 rx_ring->q_index))
487 goto err;
488 return 0;
489
490err:
491 devm_kfree(dev, rx_ring->rx_buf);
492 rx_ring->rx_buf = NULL;
493 return -ENOMEM;
494}
495
496/**
497 * ice_rx_offset - Return expected offset into page to access data
498 * @rx_ring: Ring we are requesting offset of
499 *
500 * Returns the offset value for ring into the data buffer.
501 */
502static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
503{
504 if (ice_ring_uses_build_skb(rx_ring))
505 return ICE_SKB_PAD;
506 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
507 return XDP_PACKET_HEADROOM;
508
509 return 0;
510}
511
512static unsigned int
513ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
514{
515 unsigned int truesize;
516
517#if (PAGE_SIZE < 8192)
518 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
519#else
520 truesize = ice_rx_offset(rx_ring) ?
521 SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
522 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
523 SKB_DATA_ALIGN(size);
524#endif
525 return truesize;
526}
527
528/**
529 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
530 * @rx_ring: Rx ring
531 * @xdp: xdp_buff used as input to the XDP program
532 * @xdp_prog: XDP program to run
533 *
534 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
535 */
536static int
537ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
538 struct bpf_prog *xdp_prog)
539{
540 int err, result = ICE_XDP_PASS;
541 struct ice_ring *xdp_ring;
542 u32 act;
543
544 act = bpf_prog_run_xdp(xdp_prog, xdp);
545 switch (act) {
546 case XDP_PASS:
547 break;
548 case XDP_TX:
549 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
550 result = ice_xmit_xdp_buff(xdp, xdp_ring);
551 break;
552 case XDP_REDIRECT:
553 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
554 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
555 break;
556 default:
557 bpf_warn_invalid_xdp_action(act);
558 fallthrough;
559 case XDP_ABORTED:
560 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
561 fallthrough;
562 case XDP_DROP:
563 result = ICE_XDP_CONSUMED;
564 break;
565 }
566
567 return result;
568}
569
570/**
571 * ice_xdp_xmit - submit packets to XDP ring for transmission
572 * @dev: netdev
573 * @n: number of XDP frames to be transmitted
574 * @frames: XDP frames to be transmitted
575 * @flags: transmit flags
576 *
577 * Returns number of frames successfully sent. Frames that fail are
578 * free'ed via XDP return API.
579 * For error cases, a negative errno code is returned and no-frames
580 * are transmitted (caller must handle freeing frames).
581 */
582int
583ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
584 u32 flags)
585{
586 struct ice_netdev_priv *np = netdev_priv(dev);
587 unsigned int queue_index = smp_processor_id();
588 struct ice_vsi *vsi = np->vsi;
589 struct ice_ring *xdp_ring;
590 int drops = 0, i;
591
592 if (test_bit(__ICE_DOWN, vsi->state))
593 return -ENETDOWN;
594
595 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
596 return -ENXIO;
597
598 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
599 return -EINVAL;
600
601 xdp_ring = vsi->xdp_rings[queue_index];
602 for (i = 0; i < n; i++) {
603 struct xdp_frame *xdpf = frames[i];
604 int err;
605
606 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
607 if (err != ICE_XDP_TX) {
608 xdp_return_frame_rx_napi(xdpf);
609 drops++;
610 }
611 }
612
613 if (unlikely(flags & XDP_XMIT_FLUSH))
614 ice_xdp_ring_update_tail(xdp_ring);
615
616 return n - drops;
617}
618
619/**
620 * ice_alloc_mapped_page - recycle or make a new page
621 * @rx_ring: ring to use
622 * @bi: rx_buf struct to modify
623 *
624 * Returns true if the page was successfully allocated or
625 * reused.
626 */
627static bool
628ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
629{
630 struct page *page = bi->page;
631 dma_addr_t dma;
632
633 /* since we are recycling buffers we should seldom need to alloc */
634 if (likely(page))
635 return true;
636
637 /* alloc new page for storage */
638 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
639 if (unlikely(!page)) {
640 rx_ring->rx_stats.alloc_page_failed++;
641 return false;
642 }
643
644 /* map page for use */
645 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
646 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
647
648 /* if mapping failed free memory back to system since
649 * there isn't much point in holding memory we can't use
650 */
651 if (dma_mapping_error(rx_ring->dev, dma)) {
652 __free_pages(page, ice_rx_pg_order(rx_ring));
653 rx_ring->rx_stats.alloc_page_failed++;
654 return false;
655 }
656
657 bi->dma = dma;
658 bi->page = page;
659 bi->page_offset = ice_rx_offset(rx_ring);
660 page_ref_add(page, USHRT_MAX - 1);
661 bi->pagecnt_bias = USHRT_MAX;
662
663 return true;
664}
665
666/**
667 * ice_alloc_rx_bufs - Replace used receive buffers
668 * @rx_ring: ring to place buffers on
669 * @cleaned_count: number of buffers to replace
670 *
671 * Returns false if all allocations were successful, true if any fail. Returning
672 * true signals to the caller that we didn't replace cleaned_count buffers and
673 * there is more work to do.
674 *
675 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
676 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
677 * multiple tail writes per call.
678 */
679bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
680{
681 union ice_32b_rx_flex_desc *rx_desc;
682 u16 ntu = rx_ring->next_to_use;
683 struct ice_rx_buf *bi;
684
685 /* do nothing if no valid netdev defined */
686 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
687 !cleaned_count)
688 return false;
689
690 /* get the Rx descriptor and buffer based on next_to_use */
691 rx_desc = ICE_RX_DESC(rx_ring, ntu);
692 bi = &rx_ring->rx_buf[ntu];
693
694 do {
695 /* if we fail here, we have work remaining */
696 if (!ice_alloc_mapped_page(rx_ring, bi))
697 break;
698
699 /* sync the buffer for use by the device */
700 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
701 bi->page_offset,
702 rx_ring->rx_buf_len,
703 DMA_FROM_DEVICE);
704
705 /* Refresh the desc even if buffer_addrs didn't change
706 * because each write-back erases this info.
707 */
708 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
709
710 rx_desc++;
711 bi++;
712 ntu++;
713 if (unlikely(ntu == rx_ring->count)) {
714 rx_desc = ICE_RX_DESC(rx_ring, 0);
715 bi = rx_ring->rx_buf;
716 ntu = 0;
717 }
718
719 /* clear the status bits for the next_to_use descriptor */
720 rx_desc->wb.status_error0 = 0;
721
722 cleaned_count--;
723 } while (cleaned_count);
724
725 if (rx_ring->next_to_use != ntu)
726 ice_release_rx_desc(rx_ring, ntu);
727
728 return !!cleaned_count;
729}
730
731/**
732 * ice_page_is_reserved - check if reuse is possible
733 * @page: page struct to check
734 */
735static bool ice_page_is_reserved(struct page *page)
736{
737 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
738}
739
740/**
741 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
742 * @rx_buf: Rx buffer to adjust
743 * @size: Size of adjustment
744 *
745 * Update the offset within page so that Rx buf will be ready to be reused.
746 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
747 * so the second half of page assigned to Rx buffer will be used, otherwise
748 * the offset is moved by "size" bytes
749 */
750static void
751ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
752{
753#if (PAGE_SIZE < 8192)
754 /* flip page offset to other buffer */
755 rx_buf->page_offset ^= size;
756#else
757 /* move offset up to the next cache line */
758 rx_buf->page_offset += size;
759#endif
760}
761
762/**
763 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
764 * @rx_buf: buffer containing the page
765 *
766 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
767 * which will assign the current buffer to the buffer that next_to_alloc is
768 * pointing to; otherwise, the DMA mapping needs to be destroyed and
769 * page freed
770 */
771static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
772{
773 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
774 struct page *page = rx_buf->page;
775
776 /* avoid re-using remote pages */
777 if (unlikely(ice_page_is_reserved(page)))
778 return false;
779
780#if (PAGE_SIZE < 8192)
781 /* if we are only owner of page we can reuse it */
782 if (unlikely((page_count(page) - pagecnt_bias) > 1))
783 return false;
784#else
785#define ICE_LAST_OFFSET \
786 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
787 if (rx_buf->page_offset > ICE_LAST_OFFSET)
788 return false;
789#endif /* PAGE_SIZE < 8192) */
790
791 /* If we have drained the page fragment pool we need to update
792 * the pagecnt_bias and page count so that we fully restock the
793 * number of references the driver holds.
794 */
795 if (unlikely(pagecnt_bias == 1)) {
796 page_ref_add(page, USHRT_MAX - 1);
797 rx_buf->pagecnt_bias = USHRT_MAX;
798 }
799
800 return true;
801}
802
803/**
804 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
805 * @rx_ring: Rx descriptor ring to transact packets on
806 * @rx_buf: buffer containing page to add
807 * @skb: sk_buff to place the data into
808 * @size: packet length from rx_desc
809 *
810 * This function will add the data contained in rx_buf->page to the skb.
811 * It will just attach the page as a frag to the skb.
812 * The function will then update the page offset.
813 */
814static void
815ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
816 struct sk_buff *skb, unsigned int size)
817{
818#if (PAGE_SIZE >= 8192)
819 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
820#else
821 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
822#endif
823
824 if (!size)
825 return;
826 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
827 rx_buf->page_offset, size, truesize);
828
829 /* page is being used so we must update the page offset */
830 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
831}
832
833/**
834 * ice_reuse_rx_page - page flip buffer and store it back on the ring
835 * @rx_ring: Rx descriptor ring to store buffers on
836 * @old_buf: donor buffer to have page reused
837 *
838 * Synchronizes page for reuse by the adapter
839 */
840static void
841ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
842{
843 u16 nta = rx_ring->next_to_alloc;
844 struct ice_rx_buf *new_buf;
845
846 new_buf = &rx_ring->rx_buf[nta];
847
848 /* update, and store next to alloc */
849 nta++;
850 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
851
852 /* Transfer page from old buffer to new buffer.
853 * Move each member individually to avoid possible store
854 * forwarding stalls and unnecessary copy of skb.
855 */
856 new_buf->dma = old_buf->dma;
857 new_buf->page = old_buf->page;
858 new_buf->page_offset = old_buf->page_offset;
859 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
860}
861
862/**
863 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
864 * @rx_ring: Rx descriptor ring to transact packets on
865 * @skb: skb to be used
866 * @size: size of buffer to add to skb
867 *
868 * This function will pull an Rx buffer from the ring and synchronize it
869 * for use by the CPU.
870 */
871static struct ice_rx_buf *
872ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
873 const unsigned int size)
874{
875 struct ice_rx_buf *rx_buf;
876
877 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
878 prefetchw(rx_buf->page);
879 *skb = rx_buf->skb;
880
881 if (!size)
882 return rx_buf;
883 /* we are reusing so sync this buffer for CPU use */
884 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
885 rx_buf->page_offset, size,
886 DMA_FROM_DEVICE);
887
888 /* We have pulled a buffer for use, so decrement pagecnt_bias */
889 rx_buf->pagecnt_bias--;
890
891 return rx_buf;
892}
893
894/**
895 * ice_build_skb - Build skb around an existing buffer
896 * @rx_ring: Rx descriptor ring to transact packets on
897 * @rx_buf: Rx buffer to pull data from
898 * @xdp: xdp_buff pointing to the data
899 *
900 * This function builds an skb around an existing Rx buffer, taking care
901 * to set up the skb correctly and avoid any memcpy overhead.
902 */
903static struct sk_buff *
904ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
905 struct xdp_buff *xdp)
906{
907 u8 metasize = xdp->data - xdp->data_meta;
908#if (PAGE_SIZE < 8192)
909 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
910#else
911 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
912 SKB_DATA_ALIGN(xdp->data_end -
913 xdp->data_hard_start);
914#endif
915 struct sk_buff *skb;
916
917 /* Prefetch first cache line of first page. If xdp->data_meta
918 * is unused, this points exactly as xdp->data, otherwise we
919 * likely have a consumer accessing first few bytes of meta
920 * data, and then actual data.
921 */
922 prefetch(xdp->data_meta);
923#if L1_CACHE_BYTES < 128
924 prefetch((void *)(xdp->data + L1_CACHE_BYTES));
925#endif
926 /* build an skb around the page buffer */
927 skb = build_skb(xdp->data_hard_start, truesize);
928 if (unlikely(!skb))
929 return NULL;
930
931 /* must to record Rx queue, otherwise OS features such as
932 * symmetric queue won't work
933 */
934 skb_record_rx_queue(skb, rx_ring->q_index);
935
936 /* update pointers within the skb to store the data */
937 skb_reserve(skb, xdp->data - xdp->data_hard_start);
938 __skb_put(skb, xdp->data_end - xdp->data);
939 if (metasize)
940 skb_metadata_set(skb, metasize);
941
942 /* buffer is used by skb, update page_offset */
943 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
944
945 return skb;
946}
947
948/**
949 * ice_construct_skb - Allocate skb and populate it
950 * @rx_ring: Rx descriptor ring to transact packets on
951 * @rx_buf: Rx buffer to pull data from
952 * @xdp: xdp_buff pointing to the data
953 *
954 * This function allocates an skb. It then populates it with the page
955 * data from the current receive descriptor, taking care to set up the
956 * skb correctly.
957 */
958static struct sk_buff *
959ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
960 struct xdp_buff *xdp)
961{
962 unsigned int size = xdp->data_end - xdp->data;
963 unsigned int headlen;
964 struct sk_buff *skb;
965
966 /* prefetch first cache line of first page */
967 prefetch(xdp->data);
968#if L1_CACHE_BYTES < 128
969 prefetch((void *)(xdp->data + L1_CACHE_BYTES));
970#endif /* L1_CACHE_BYTES */
971
972 /* allocate a skb to store the frags */
973 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
974 GFP_ATOMIC | __GFP_NOWARN);
975 if (unlikely(!skb))
976 return NULL;
977
978 skb_record_rx_queue(skb, rx_ring->q_index);
979 /* Determine available headroom for copy */
980 headlen = size;
981 if (headlen > ICE_RX_HDR_SIZE)
982 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
983
984 /* align pull length to size of long to optimize memcpy performance */
985 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
986 sizeof(long)));
987
988 /* if we exhaust the linear part then add what is left as a frag */
989 size -= headlen;
990 if (size) {
991#if (PAGE_SIZE >= 8192)
992 unsigned int truesize = SKB_DATA_ALIGN(size);
993#else
994 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
995#endif
996 skb_add_rx_frag(skb, 0, rx_buf->page,
997 rx_buf->page_offset + headlen, size, truesize);
998 /* buffer is used by skb, update page_offset */
999 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1000 } else {
1001 /* buffer is unused, reset bias back to rx_buf; data was copied
1002 * onto skb's linear part so there's no need for adjusting
1003 * page offset and we can reuse this buffer as-is
1004 */
1005 rx_buf->pagecnt_bias++;
1006 }
1007
1008 return skb;
1009}
1010
1011/**
1012 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1013 * @rx_ring: Rx descriptor ring to transact packets on
1014 * @rx_buf: Rx buffer to pull data from
1015 *
1016 * This function will update next_to_clean and then clean up the contents
1017 * of the rx_buf. It will either recycle the buffer or unmap it and free
1018 * the associated resources.
1019 */
1020static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
1021{
1022 u16 ntc = rx_ring->next_to_clean + 1;
1023
1024 /* fetch, update, and store next to clean */
1025 ntc = (ntc < rx_ring->count) ? ntc : 0;
1026 rx_ring->next_to_clean = ntc;
1027
1028 if (!rx_buf)
1029 return;
1030
1031 if (ice_can_reuse_rx_page(rx_buf)) {
1032 /* hand second half of page back to the ring */
1033 ice_reuse_rx_page(rx_ring, rx_buf);
1034 } else {
1035 /* we are not reusing the buffer so unmap it */
1036 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1037 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1038 ICE_RX_DMA_ATTR);
1039 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1040 }
1041
1042 /* clear contents of buffer_info */
1043 rx_buf->page = NULL;
1044 rx_buf->skb = NULL;
1045}
1046
1047/**
1048 * ice_is_non_eop - process handling of non-EOP buffers
1049 * @rx_ring: Rx ring being processed
1050 * @rx_desc: Rx descriptor for current buffer
1051 * @skb: Current socket buffer containing buffer in progress
1052 *
1053 * If the buffer is an EOP buffer, this function exits returning false,
1054 * otherwise return true indicating that this is in fact a non-EOP buffer.
1055 */
1056static bool
1057ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1058 struct sk_buff *skb)
1059{
1060 /* if we are the last buffer then there is nothing else to do */
1061#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1062 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1063 return false;
1064
1065 /* place skb in next buffer to be received */
1066 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
1067 rx_ring->rx_stats.non_eop_descs++;
1068
1069 return true;
1070}
1071
1072/**
1073 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1074 * @rx_ring: Rx descriptor ring to transact packets on
1075 * @budget: Total limit on number of packets to process
1076 *
1077 * This function provides a "bounce buffer" approach to Rx interrupt
1078 * processing. The advantage to this is that on systems that have
1079 * expensive overhead for IOMMU access this provides a means of avoiding
1080 * it by maintaining the mapping of the page to the system.
1081 *
1082 * Returns amount of work completed
1083 */
1084int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1085{
1086 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1087 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1088 unsigned int xdp_res, xdp_xmit = 0;
1089 struct bpf_prog *xdp_prog = NULL;
1090 struct xdp_buff xdp;
1091 bool failure;
1092
1093 xdp.rxq = &rx_ring->xdp_rxq;
1094 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1095#if (PAGE_SIZE < 8192)
1096 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1097#endif
1098
1099 /* start the loop to process Rx packets bounded by 'budget' */
1100 while (likely(total_rx_pkts < (unsigned int)budget)) {
1101 union ice_32b_rx_flex_desc *rx_desc;
1102 struct ice_rx_buf *rx_buf;
1103 struct sk_buff *skb;
1104 unsigned int size;
1105 u16 stat_err_bits;
1106 u16 vlan_tag = 0;
1107 u8 rx_ptype;
1108
1109 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1110 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1111
1112 /* status_error_len will always be zero for unused descriptors
1113 * because it's cleared in cleanup, and overlaps with hdr_addr
1114 * which is always zero because packet split isn't used, if the
1115 * hardware wrote DD then it will be non-zero
1116 */
1117 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1118 if (!ice_test_staterr(rx_desc, stat_err_bits))
1119 break;
1120
1121 /* This memory barrier is needed to keep us from reading
1122 * any other fields out of the rx_desc until we know the
1123 * DD bit is set.
1124 */
1125 dma_rmb();
1126
1127 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1128 ice_put_rx_buf(rx_ring, NULL);
1129 cleaned_count++;
1130 continue;
1131 }
1132
1133 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1134 ICE_RX_FLX_DESC_PKT_LEN_M;
1135
1136 /* retrieve a buffer from the ring */
1137 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1138
1139 if (!size) {
1140 xdp.data = NULL;
1141 xdp.data_end = NULL;
1142 xdp.data_hard_start = NULL;
1143 xdp.data_meta = NULL;
1144 goto construct_skb;
1145 }
1146
1147 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1148 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1149 xdp.data_meta = xdp.data;
1150 xdp.data_end = xdp.data + size;
1151#if (PAGE_SIZE > 4096)
1152 /* At larger PAGE_SIZE, frame_sz depend on len size */
1153 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1154#endif
1155
1156 rcu_read_lock();
1157 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1158 if (!xdp_prog) {
1159 rcu_read_unlock();
1160 goto construct_skb;
1161 }
1162
1163 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1164 rcu_read_unlock();
1165 if (!xdp_res)
1166 goto construct_skb;
1167 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1168 xdp_xmit |= xdp_res;
1169 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1170 } else {
1171 rx_buf->pagecnt_bias++;
1172 }
1173 total_rx_bytes += size;
1174 total_rx_pkts++;
1175
1176 cleaned_count++;
1177 ice_put_rx_buf(rx_ring, rx_buf);
1178 continue;
1179construct_skb:
1180 if (skb) {
1181 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1182 } else if (likely(xdp.data)) {
1183 if (ice_ring_uses_build_skb(rx_ring))
1184 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1185 else
1186 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1187 }
1188 /* exit if we failed to retrieve a buffer */
1189 if (!skb) {
1190 rx_ring->rx_stats.alloc_buf_failed++;
1191 if (rx_buf)
1192 rx_buf->pagecnt_bias++;
1193 break;
1194 }
1195
1196 ice_put_rx_buf(rx_ring, rx_buf);
1197 cleaned_count++;
1198
1199 /* skip if it is NOP desc */
1200 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1201 continue;
1202
1203 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1204 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1205 dev_kfree_skb_any(skb);
1206 continue;
1207 }
1208
1209 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1210 if (ice_test_staterr(rx_desc, stat_err_bits))
1211 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1212
1213 /* pad the skb if needed, to make a valid ethernet frame */
1214 if (eth_skb_pad(skb)) {
1215 skb = NULL;
1216 continue;
1217 }
1218
1219 /* probably a little skewed due to removing CRC */
1220 total_rx_bytes += skb->len;
1221
1222 /* populate checksum, VLAN, and protocol */
1223 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1224 ICE_RX_FLEX_DESC_PTYPE_M;
1225
1226 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1227
1228 /* send completed skb up the stack */
1229 ice_receive_skb(rx_ring, skb, vlan_tag);
1230
1231 /* update budget accounting */
1232 total_rx_pkts++;
1233 }
1234
1235 /* return up to cleaned_count buffers to hardware */
1236 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1237
1238 if (xdp_prog)
1239 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1240
1241 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1242
1243 /* guarantee a trip back through this routine if there was a failure */
1244 return failure ? budget : (int)total_rx_pkts;
1245}
1246
1247/**
1248 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1249 * @port_info: port_info structure containing the current link speed
1250 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1251 * @itr: ITR value to update
1252 *
1253 * Calculate how big of an increment should be applied to the ITR value passed
1254 * in based on wmem_default, SKB overhead, ethernet overhead, and the current
1255 * link speed.
1256 *
1257 * The following is a calculation derived from:
1258 * wmem_default / (size + overhead) = desired_pkts_per_int
1259 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1260 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1261 *
1262 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1263 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1264 * formula down to:
1265 *
1266 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1267 * ITR = -------------------------------------------- * --------------
1268 * rate pkt_size + 640
1269 */
1270static unsigned int
1271ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1272 unsigned int avg_pkt_size,
1273 unsigned int itr)
1274{
1275 switch (port_info->phy.link_info.link_speed) {
1276 case ICE_AQ_LINK_SPEED_100GB:
1277 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1278 avg_pkt_size + 640);
1279 break;
1280 case ICE_AQ_LINK_SPEED_50GB:
1281 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1282 avg_pkt_size + 640);
1283 break;
1284 case ICE_AQ_LINK_SPEED_40GB:
1285 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1286 avg_pkt_size + 640);
1287 break;
1288 case ICE_AQ_LINK_SPEED_25GB:
1289 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1290 avg_pkt_size + 640);
1291 break;
1292 case ICE_AQ_LINK_SPEED_20GB:
1293 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1294 avg_pkt_size + 640);
1295 break;
1296 case ICE_AQ_LINK_SPEED_10GB:
1297 default:
1298 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1299 avg_pkt_size + 640);
1300 break;
1301 }
1302
1303 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1304 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1305 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1306 }
1307
1308 return itr;
1309}
1310
1311/**
1312 * ice_update_itr - update the adaptive ITR value based on statistics
1313 * @q_vector: structure containing interrupt and ring information
1314 * @rc: structure containing ring performance data
1315 *
1316 * Stores a new ITR value based on packets and byte
1317 * counts during the last interrupt. The advantage of per interrupt
1318 * computation is faster updates and more accurate ITR for the current
1319 * traffic pattern. Constants in this function were computed
1320 * based on theoretical maximum wire speed and thresholds were set based
1321 * on testing data as well as attempting to minimize response time
1322 * while increasing bulk throughput.
1323 */
1324static void
1325ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1326{
1327 unsigned long next_update = jiffies;
1328 unsigned int packets, bytes, itr;
1329 bool container_is_rx;
1330
1331 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1332 return;
1333
1334 /* If itr_countdown is set it means we programmed an ITR within
1335 * the last 4 interrupt cycles. This has a side effect of us
1336 * potentially firing an early interrupt. In order to work around
1337 * this we need to throw out any data received for a few
1338 * interrupts following the update.
1339 */
1340 if (q_vector->itr_countdown) {
1341 itr = rc->target_itr;
1342 goto clear_counts;
1343 }
1344
1345 container_is_rx = (&q_vector->rx == rc);
1346 /* For Rx we want to push the delay up and default to low latency.
1347 * for Tx we want to pull the delay down and default to high latency.
1348 */
1349 itr = container_is_rx ?
1350 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1351 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1352
1353 /* If we didn't update within up to 1 - 2 jiffies we can assume
1354 * that either packets are coming in so slow there hasn't been
1355 * any work, or that there is so much work that NAPI is dealing
1356 * with interrupt moderation and we don't need to do anything.
1357 */
1358 if (time_after(next_update, rc->next_update))
1359 goto clear_counts;
1360
1361 prefetch(q_vector->vsi->port_info);
1362
1363 packets = rc->total_pkts;
1364 bytes = rc->total_bytes;
1365
1366 if (container_is_rx) {
1367 /* If Rx there are 1 to 4 packets and bytes are less than
1368 * 9000 assume insufficient data to use bulk rate limiting
1369 * approach unless Tx is already in bulk rate limiting. We
1370 * are likely latency driven.
1371 */
1372 if (packets && packets < 4 && bytes < 9000 &&
1373 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1374 itr = ICE_ITR_ADAPTIVE_LATENCY;
1375 goto adjust_by_size_and_speed;
1376 }
1377 } else if (packets < 4) {
1378 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1379 * bulk mode and we are receiving 4 or fewer packets just
1380 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1381 * that the Rx can relax.
1382 */
1383 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1384 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1385 ICE_ITR_ADAPTIVE_MAX_USECS)
1386 goto clear_counts;
1387 } else if (packets > 32) {
1388 /* If we have processed over 32 packets in a single interrupt
1389 * for Tx assume we need to switch over to "bulk" mode.
1390 */
1391 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1392 }
1393
1394 /* We have no packets to actually measure against. This means
1395 * either one of the other queues on this vector is active or
1396 * we are a Tx queue doing TSO with too high of an interrupt rate.
1397 *
1398 * Between 4 and 56 we can assume that our current interrupt delay
1399 * is only slightly too low. As such we should increase it by a small
1400 * fixed amount.
1401 */
1402 if (packets < 56) {
1403 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1404 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1405 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1406 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1407 }
1408 goto clear_counts;
1409 }
1410
1411 if (packets <= 256) {
1412 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1413 itr &= ICE_ITR_MASK;
1414
1415 /* Between 56 and 112 is our "goldilocks" zone where we are
1416 * working out "just right". Just report that our current
1417 * ITR is good for us.
1418 */
1419 if (packets <= 112)
1420 goto clear_counts;
1421
1422 /* If packet count is 128 or greater we are likely looking
1423 * at a slight overrun of the delay we want. Try halving
1424 * our delay to see if that will cut the number of packets
1425 * in half per interrupt.
1426 */
1427 itr >>= 1;
1428 itr &= ICE_ITR_MASK;
1429 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1430 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1431
1432 goto clear_counts;
1433 }
1434
1435 /* The paths below assume we are dealing with a bulk ITR since
1436 * number of packets is greater than 256. We are just going to have
1437 * to compute a value and try to bring the count under control,
1438 * though for smaller packet sizes there isn't much we can do as
1439 * NAPI polling will likely be kicking in sooner rather than later.
1440 */
1441 itr = ICE_ITR_ADAPTIVE_BULK;
1442
1443adjust_by_size_and_speed:
1444
1445 /* based on checks above packets cannot be 0 so division is safe */
1446 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1447 bytes / packets, itr);
1448
1449clear_counts:
1450 /* write back value */
1451 rc->target_itr = itr;
1452
1453 /* next update should occur within next jiffy */
1454 rc->next_update = next_update + 1;
1455
1456 rc->total_bytes = 0;
1457 rc->total_pkts = 0;
1458}
1459
1460/**
1461 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1462 * @itr_idx: interrupt throttling index
1463 * @itr: interrupt throttling value in usecs
1464 */
1465static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1466{
1467 /* The ITR value is reported in microseconds, and the register value is
1468 * recorded in 2 microsecond units. For this reason we only need to
1469 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1470 * granularity as a shift instead of division. The mask makes sure the
1471 * ITR value is never odd so we don't accidentally write into the field
1472 * prior to the ITR field.
1473 */
1474 itr &= ICE_ITR_MASK;
1475
1476 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1477 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1478 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1479}
1480
1481/* The act of updating the ITR will cause it to immediately trigger. In order
1482 * to prevent this from throwing off adaptive update statistics we defer the
1483 * update so that it can only happen so often. So after either Tx or Rx are
1484 * updated we make the adaptive scheme wait until either the ITR completely
1485 * expires via the next_update expiration or we have been through at least
1486 * 3 interrupts.
1487 */
1488#define ITR_COUNTDOWN_START 3
1489
1490/**
1491 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1492 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1493 */
1494static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1495{
1496 struct ice_ring_container *tx = &q_vector->tx;
1497 struct ice_ring_container *rx = &q_vector->rx;
1498 struct ice_vsi *vsi = q_vector->vsi;
1499 u32 itr_val;
1500
1501 /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1502 * interrupts to expire right away in case we have more work ready to go
1503 * already
1504 */
1505 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1506 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1507 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1508 /* set target back to last user set value */
1509 rx->target_itr = rx->itr_setting;
1510 /* set current to what we just wrote and dynamic if needed */
1511 rx->current_itr = ICE_WB_ON_ITR_USECS |
1512 (rx->itr_setting & ICE_ITR_DYNAMIC);
1513 /* allow normal interrupt flow to start */
1514 q_vector->itr_countdown = 0;
1515 return;
1516 }
1517
1518 /* This will do nothing if dynamic updates are not enabled */
1519 ice_update_itr(q_vector, tx);
1520 ice_update_itr(q_vector, rx);
1521
1522 /* This block of logic allows us to get away with only updating
1523 * one ITR value with each interrupt. The idea is to perform a
1524 * pseudo-lazy update with the following criteria.
1525 *
1526 * 1. Rx is given higher priority than Tx if both are in same state
1527 * 2. If we must reduce an ITR that is given highest priority.
1528 * 3. We then give priority to increasing ITR based on amount.
1529 */
1530 if (rx->target_itr < rx->current_itr) {
1531 /* Rx ITR needs to be reduced, this is highest priority */
1532 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1533 rx->current_itr = rx->target_itr;
1534 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1535 } else if ((tx->target_itr < tx->current_itr) ||
1536 ((rx->target_itr - rx->current_itr) <
1537 (tx->target_itr - tx->current_itr))) {
1538 /* Tx ITR needs to be reduced, this is second priority
1539 * Tx ITR needs to be increased more than Rx, fourth priority
1540 */
1541 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1542 tx->current_itr = tx->target_itr;
1543 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1544 } else if (rx->current_itr != rx->target_itr) {
1545 /* Rx ITR needs to be increased, third priority */
1546 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1547 rx->current_itr = rx->target_itr;
1548 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1549 } else {
1550 /* Still have to re-enable the interrupts */
1551 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1552 if (q_vector->itr_countdown)
1553 q_vector->itr_countdown--;
1554 }
1555
1556 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1557 wr32(&q_vector->vsi->back->hw,
1558 GLINT_DYN_CTL(q_vector->reg_idx),
1559 itr_val);
1560}
1561
1562/**
1563 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1564 * @q_vector: q_vector to set WB_ON_ITR on
1565 *
1566 * We need to tell hardware to write-back completed descriptors even when
1567 * interrupts are disabled. Descriptors will be written back on cache line
1568 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1569 * descriptors may not be written back if they don't fill a cache line until the
1570 * next interrupt.
1571 *
1572 * This sets the write-back frequency to 2 microseconds as that is the minimum
1573 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1574 * make sure hardware knows we aren't meddling with the INTENA_M bit.
1575 */
1576static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1577{
1578 struct ice_vsi *vsi = q_vector->vsi;
1579
1580 /* already in WB_ON_ITR mode no need to change it */
1581 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1582 return;
1583
1584 if (q_vector->num_ring_rx)
1585 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1586 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1587 ICE_RX_ITR));
1588
1589 if (q_vector->num_ring_tx)
1590 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1591 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1592 ICE_TX_ITR));
1593
1594 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1595}
1596
1597/**
1598 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1599 * @napi: napi struct with our devices info in it
1600 * @budget: amount of work driver is allowed to do this pass, in packets
1601 *
1602 * This function will clean all queues associated with a q_vector.
1603 *
1604 * Returns the amount of work done
1605 */
1606int ice_napi_poll(struct napi_struct *napi, int budget)
1607{
1608 struct ice_q_vector *q_vector =
1609 container_of(napi, struct ice_q_vector, napi);
1610 bool clean_complete = true;
1611 struct ice_ring *ring;
1612 int budget_per_ring;
1613 int work_done = 0;
1614
1615 /* Since the actual Tx work is minimal, we can give the Tx a larger
1616 * budget and be more aggressive about cleaning up the Tx descriptors.
1617 */
1618 ice_for_each_ring(ring, q_vector->tx) {
1619 bool wd = ring->xsk_umem ?
1620 ice_clean_tx_irq_zc(ring, budget) :
1621 ice_clean_tx_irq(ring, budget);
1622
1623 if (!wd)
1624 clean_complete = false;
1625 }
1626
1627 /* Handle case where we are called by netpoll with a budget of 0 */
1628 if (unlikely(budget <= 0))
1629 return budget;
1630
1631 /* normally we have 1 Rx ring per q_vector */
1632 if (unlikely(q_vector->num_ring_rx > 1))
1633 /* We attempt to distribute budget to each Rx queue fairly, but
1634 * don't allow the budget to go below 1 because that would exit
1635 * polling early.
1636 */
1637 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1638 else
1639 /* Max of 1 Rx ring in this q_vector so give it the budget */
1640 budget_per_ring = budget;
1641
1642 ice_for_each_ring(ring, q_vector->rx) {
1643 int cleaned;
1644
1645 /* A dedicated path for zero-copy allows making a single
1646 * comparison in the irq context instead of many inside the
1647 * ice_clean_rx_irq function and makes the codebase cleaner.
1648 */
1649 cleaned = ring->xsk_umem ?
1650 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1651 ice_clean_rx_irq(ring, budget_per_ring);
1652 work_done += cleaned;
1653 /* if we clean as many as budgeted, we must not be done */
1654 if (cleaned >= budget_per_ring)
1655 clean_complete = false;
1656 }
1657
1658 /* If work not completed, return budget and polling will return */
1659 if (!clean_complete)
1660 return budget;
1661
1662 /* Exit the polling mode, but don't re-enable interrupts if stack might
1663 * poll us due to busy-polling
1664 */
1665 if (likely(napi_complete_done(napi, work_done)))
1666 ice_update_ena_itr(q_vector);
1667 else
1668 ice_set_wb_on_itr(q_vector);
1669
1670 return min_t(int, work_done, budget - 1);
1671}
1672
1673/**
1674 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1675 * @tx_ring: the ring to be checked
1676 * @size: the size buffer we want to assure is available
1677 *
1678 * Returns -EBUSY if a stop is needed, else 0
1679 */
1680static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1681{
1682 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1683 /* Memory barrier before checking head and tail */
1684 smp_mb();
1685
1686 /* Check again in a case another CPU has just made room available. */
1687 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1688 return -EBUSY;
1689
1690 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1691 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1692 ++tx_ring->tx_stats.restart_q;
1693 return 0;
1694}
1695
1696/**
1697 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1698 * @tx_ring: the ring to be checked
1699 * @size: the size buffer we want to assure is available
1700 *
1701 * Returns 0 if stop is not needed
1702 */
1703static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1704{
1705 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1706 return 0;
1707
1708 return __ice_maybe_stop_tx(tx_ring, size);
1709}
1710
1711/**
1712 * ice_tx_map - Build the Tx descriptor
1713 * @tx_ring: ring to send buffer on
1714 * @first: first buffer info buffer to use
1715 * @off: pointer to struct that holds offload parameters
1716 *
1717 * This function loops over the skb data pointed to by *first
1718 * and gets a physical address for each memory location and programs
1719 * it and the length into the transmit descriptor.
1720 */
1721static void
1722ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1723 struct ice_tx_offload_params *off)
1724{
1725 u64 td_offset, td_tag, td_cmd;
1726 u16 i = tx_ring->next_to_use;
1727 unsigned int data_len, size;
1728 struct ice_tx_desc *tx_desc;
1729 struct ice_tx_buf *tx_buf;
1730 struct sk_buff *skb;
1731 skb_frag_t *frag;
1732 dma_addr_t dma;
1733
1734 td_tag = off->td_l2tag1;
1735 td_cmd = off->td_cmd;
1736 td_offset = off->td_offset;
1737 skb = first->skb;
1738
1739 data_len = skb->data_len;
1740 size = skb_headlen(skb);
1741
1742 tx_desc = ICE_TX_DESC(tx_ring, i);
1743
1744 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1745 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1746 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1747 ICE_TX_FLAGS_VLAN_S;
1748 }
1749
1750 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1751
1752 tx_buf = first;
1753
1754 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1755 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1756
1757 if (dma_mapping_error(tx_ring->dev, dma))
1758 goto dma_error;
1759
1760 /* record length, and DMA address */
1761 dma_unmap_len_set(tx_buf, len, size);
1762 dma_unmap_addr_set(tx_buf, dma, dma);
1763
1764 /* align size to end of page */
1765 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1766 tx_desc->buf_addr = cpu_to_le64(dma);
1767
1768 /* account for data chunks larger than the hardware
1769 * can handle
1770 */
1771 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1772 tx_desc->cmd_type_offset_bsz =
1773 ice_build_ctob(td_cmd, td_offset, max_data,
1774 td_tag);
1775
1776 tx_desc++;
1777 i++;
1778
1779 if (i == tx_ring->count) {
1780 tx_desc = ICE_TX_DESC(tx_ring, 0);
1781 i = 0;
1782 }
1783
1784 dma += max_data;
1785 size -= max_data;
1786
1787 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1788 tx_desc->buf_addr = cpu_to_le64(dma);
1789 }
1790
1791 if (likely(!data_len))
1792 break;
1793
1794 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1795 size, td_tag);
1796
1797 tx_desc++;
1798 i++;
1799
1800 if (i == tx_ring->count) {
1801 tx_desc = ICE_TX_DESC(tx_ring, 0);
1802 i = 0;
1803 }
1804
1805 size = skb_frag_size(frag);
1806 data_len -= size;
1807
1808 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1809 DMA_TO_DEVICE);
1810
1811 tx_buf = &tx_ring->tx_buf[i];
1812 }
1813
1814 /* record bytecount for BQL */
1815 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1816
1817 /* record SW timestamp if HW timestamp is not available */
1818 skb_tx_timestamp(first->skb);
1819
1820 i++;
1821 if (i == tx_ring->count)
1822 i = 0;
1823
1824 /* write last descriptor with RS and EOP bits */
1825 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1826 tx_desc->cmd_type_offset_bsz =
1827 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1828
1829 /* Force memory writes to complete before letting h/w know there
1830 * are new descriptors to fetch.
1831 *
1832 * We also use this memory barrier to make certain all of the
1833 * status bits have been updated before next_to_watch is written.
1834 */
1835 wmb();
1836
1837 /* set next_to_watch value indicating a packet is present */
1838 first->next_to_watch = tx_desc;
1839
1840 tx_ring->next_to_use = i;
1841
1842 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1843
1844 /* notify HW of packet */
1845 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1846 writel(i, tx_ring->tail);
1847
1848 return;
1849
1850dma_error:
1851 /* clear DMA mappings for failed tx_buf map */
1852 for (;;) {
1853 tx_buf = &tx_ring->tx_buf[i];
1854 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1855 if (tx_buf == first)
1856 break;
1857 if (i == 0)
1858 i = tx_ring->count;
1859 i--;
1860 }
1861
1862 tx_ring->next_to_use = i;
1863}
1864
1865/**
1866 * ice_tx_csum - Enable Tx checksum offloads
1867 * @first: pointer to the first descriptor
1868 * @off: pointer to struct that holds offload parameters
1869 *
1870 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1871 */
1872static
1873int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1874{
1875 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1876 struct sk_buff *skb = first->skb;
1877 union {
1878 struct iphdr *v4;
1879 struct ipv6hdr *v6;
1880 unsigned char *hdr;
1881 } ip;
1882 union {
1883 struct tcphdr *tcp;
1884 unsigned char *hdr;
1885 } l4;
1886 __be16 frag_off, protocol;
1887 unsigned char *exthdr;
1888 u32 offset, cmd = 0;
1889 u8 l4_proto = 0;
1890
1891 if (skb->ip_summed != CHECKSUM_PARTIAL)
1892 return 0;
1893
1894 ip.hdr = skb_network_header(skb);
1895 l4.hdr = skb_transport_header(skb);
1896
1897 /* compute outer L2 header size */
1898 l2_len = ip.hdr - skb->data;
1899 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1900
1901 protocol = vlan_get_protocol(skb);
1902
1903 if (protocol == htons(ETH_P_IP))
1904 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1905 else if (protocol == htons(ETH_P_IPV6))
1906 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1907
1908 if (skb->encapsulation) {
1909 bool gso_ena = false;
1910 u32 tunnel = 0;
1911
1912 /* define outer network header type */
1913 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1914 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1915 ICE_TX_CTX_EIPT_IPV4 :
1916 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1917 l4_proto = ip.v4->protocol;
1918 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1919 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1920 exthdr = ip.hdr + sizeof(*ip.v6);
1921 l4_proto = ip.v6->nexthdr;
1922 if (l4.hdr != exthdr)
1923 ipv6_skip_exthdr(skb, exthdr - skb->data,
1924 &l4_proto, &frag_off);
1925 }
1926
1927 /* define outer transport */
1928 switch (l4_proto) {
1929 case IPPROTO_UDP:
1930 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1931 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1932 break;
1933 case IPPROTO_GRE:
1934 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1935 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1936 break;
1937 case IPPROTO_IPIP:
1938 case IPPROTO_IPV6:
1939 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1940 l4.hdr = skb_inner_network_header(skb);
1941 break;
1942 default:
1943 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1944 return -1;
1945
1946 skb_checksum_help(skb);
1947 return 0;
1948 }
1949
1950 /* compute outer L3 header size */
1951 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1952 ICE_TXD_CTX_QW0_EIPLEN_S;
1953
1954 /* switch IP header pointer from outer to inner header */
1955 ip.hdr = skb_inner_network_header(skb);
1956
1957 /* compute tunnel header size */
1958 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1959 ICE_TXD_CTX_QW0_NATLEN_S;
1960
1961 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1962 /* indicate if we need to offload outer UDP header */
1963 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1964 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1965 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1966
1967 /* record tunnel offload values */
1968 off->cd_tunnel_params |= tunnel;
1969
1970 /* set DTYP=1 to indicate that it's an Tx context descriptor
1971 * in IPsec tunnel mode with Tx offloads in Quad word 1
1972 */
1973 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1974
1975 /* switch L4 header pointer from outer to inner */
1976 l4.hdr = skb_inner_transport_header(skb);
1977 l4_proto = 0;
1978
1979 /* reset type as we transition from outer to inner headers */
1980 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1981 if (ip.v4->version == 4)
1982 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1983 if (ip.v6->version == 6)
1984 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1985 }
1986
1987 /* Enable IP checksum offloads */
1988 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1989 l4_proto = ip.v4->protocol;
1990 /* the stack computes the IP header already, the only time we
1991 * need the hardware to recompute it is in the case of TSO.
1992 */
1993 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1994 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1995 else
1996 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1997
1998 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1999 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2000 exthdr = ip.hdr + sizeof(*ip.v6);
2001 l4_proto = ip.v6->nexthdr;
2002 if (l4.hdr != exthdr)
2003 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2004 &frag_off);
2005 } else {
2006 return -1;
2007 }
2008
2009 /* compute inner L3 header size */
2010 l3_len = l4.hdr - ip.hdr;
2011 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2012
2013 /* Enable L4 checksum offloads */
2014 switch (l4_proto) {
2015 case IPPROTO_TCP:
2016 /* enable checksum offloads */
2017 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2018 l4_len = l4.tcp->doff;
2019 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2020 break;
2021 case IPPROTO_UDP:
2022 /* enable UDP checksum offload */
2023 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2024 l4_len = (sizeof(struct udphdr) >> 2);
2025 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2026 break;
2027 case IPPROTO_SCTP:
2028 /* enable SCTP checksum offload */
2029 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2030 l4_len = sizeof(struct sctphdr) >> 2;
2031 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2032 break;
2033
2034 default:
2035 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2036 return -1;
2037 skb_checksum_help(skb);
2038 return 0;
2039 }
2040
2041 off->td_cmd |= cmd;
2042 off->td_offset |= offset;
2043 return 1;
2044}
2045
2046/**
2047 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2048 * @tx_ring: ring to send buffer on
2049 * @first: pointer to struct ice_tx_buf
2050 *
2051 * Checks the skb and set up correspondingly several generic transmit flags
2052 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2053 */
2054static void
2055ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2056{
2057 struct sk_buff *skb = first->skb;
2058
2059 /* nothing left to do, software offloaded VLAN */
2060 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2061 return;
2062
2063 /* currently, we always assume 802.1Q for VLAN insertion as VLAN
2064 * insertion for 802.1AD is not supported
2065 */
2066 if (skb_vlan_tag_present(skb)) {
2067 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2068 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2069 }
2070
2071 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2072}
2073
2074/**
2075 * ice_tso - computes mss and TSO length to prepare for TSO
2076 * @first: pointer to struct ice_tx_buf
2077 * @off: pointer to struct that holds offload parameters
2078 *
2079 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
2080 */
2081static
2082int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2083{
2084 struct sk_buff *skb = first->skb;
2085 union {
2086 struct iphdr *v4;
2087 struct ipv6hdr *v6;
2088 unsigned char *hdr;
2089 } ip;
2090 union {
2091 struct tcphdr *tcp;
2092 struct udphdr *udp;
2093 unsigned char *hdr;
2094 } l4;
2095 u64 cd_mss, cd_tso_len;
2096 u32 paylen;
2097 u8 l4_start;
2098 int err;
2099
2100 if (skb->ip_summed != CHECKSUM_PARTIAL)
2101 return 0;
2102
2103 if (!skb_is_gso(skb))
2104 return 0;
2105
2106 err = skb_cow_head(skb, 0);
2107 if (err < 0)
2108 return err;
2109
2110 /* cppcheck-suppress unreadVariable */
2111 ip.hdr = skb_network_header(skb);
2112 l4.hdr = skb_transport_header(skb);
2113
2114 /* initialize outer IP header fields */
2115 if (ip.v4->version == 4) {
2116 ip.v4->tot_len = 0;
2117 ip.v4->check = 0;
2118 } else {
2119 ip.v6->payload_len = 0;
2120 }
2121
2122 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2123 SKB_GSO_GRE_CSUM |
2124 SKB_GSO_IPXIP4 |
2125 SKB_GSO_IPXIP6 |
2126 SKB_GSO_UDP_TUNNEL |
2127 SKB_GSO_UDP_TUNNEL_CSUM)) {
2128 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2129 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2130 l4.udp->len = 0;
2131
2132 /* determine offset of outer transport header */
2133 l4_start = (u8)(l4.hdr - skb->data);
2134
2135 /* remove payload length from outer checksum */
2136 paylen = skb->len - l4_start;
2137 csum_replace_by_diff(&l4.udp->check,
2138 (__force __wsum)htonl(paylen));
2139 }
2140
2141 /* reset pointers to inner headers */
2142
2143 /* cppcheck-suppress unreadVariable */
2144 ip.hdr = skb_inner_network_header(skb);
2145 l4.hdr = skb_inner_transport_header(skb);
2146
2147 /* initialize inner IP header fields */
2148 if (ip.v4->version == 4) {
2149 ip.v4->tot_len = 0;
2150 ip.v4->check = 0;
2151 } else {
2152 ip.v6->payload_len = 0;
2153 }
2154 }
2155
2156 /* determine offset of transport header */
2157 l4_start = (u8)(l4.hdr - skb->data);
2158
2159 /* remove payload length from checksum */
2160 paylen = skb->len - l4_start;
2161
2162 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2163 csum_replace_by_diff(&l4.udp->check,
2164 (__force __wsum)htonl(paylen));
2165 /* compute length of UDP segmentation header */
2166 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2167 } else {
2168 csum_replace_by_diff(&l4.tcp->check,
2169 (__force __wsum)htonl(paylen));
2170 /* compute length of TCP segmentation header */
2171 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2172 }
2173
2174 /* update gso_segs and bytecount */
2175 first->gso_segs = skb_shinfo(skb)->gso_segs;
2176 first->bytecount += (first->gso_segs - 1) * off->header_len;
2177
2178 cd_tso_len = skb->len - off->header_len;
2179 cd_mss = skb_shinfo(skb)->gso_size;
2180
2181 /* record cdesc_qw1 with TSO parameters */
2182 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2183 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2184 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2185 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2186 first->tx_flags |= ICE_TX_FLAGS_TSO;
2187 return 1;
2188}
2189
2190/**
2191 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2192 * @size: transmit request size in bytes
2193 *
2194 * Due to hardware alignment restrictions (4K alignment), we need to
2195 * assume that we can have no more than 12K of data per descriptor, even
2196 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2197 * Thus, we need to divide by 12K. But division is slow! Instead,
2198 * we decompose the operation into shifts and one relatively cheap
2199 * multiply operation.
2200 *
2201 * To divide by 12K, we first divide by 4K, then divide by 3:
2202 * To divide by 4K, shift right by 12 bits
2203 * To divide by 3, multiply by 85, then divide by 256
2204 * (Divide by 256 is done by shifting right by 8 bits)
2205 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2206 * 3, we'll underestimate near each multiple of 12K. This is actually more
2207 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2208 * segment. For our purposes this is accurate out to 1M which is orders of
2209 * magnitude greater than our largest possible GSO size.
2210 *
2211 * This would then be implemented as:
2212 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2213 *
2214 * Since multiplication and division are commutative, we can reorder
2215 * operations into:
2216 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2217 */
2218static unsigned int ice_txd_use_count(unsigned int size)
2219{
2220 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2221}
2222
2223/**
2224 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2225 * @skb: send buffer
2226 *
2227 * Returns number of data descriptors needed for this skb.
2228 */
2229static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2230{
2231 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2232 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2233 unsigned int count = 0, size = skb_headlen(skb);
2234
2235 for (;;) {
2236 count += ice_txd_use_count(size);
2237
2238 if (!nr_frags--)
2239 break;
2240
2241 size = skb_frag_size(frag++);
2242 }
2243
2244 return count;
2245}
2246
2247/**
2248 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2249 * @skb: send buffer
2250 *
2251 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2252 * and so we need to figure out the cases where we need to linearize the skb.
2253 *
2254 * For TSO we need to count the TSO header and segment payload separately.
2255 * As such we need to check cases where we have 7 fragments or more as we
2256 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2257 * the segment payload in the first descriptor, and another 7 for the
2258 * fragments.
2259 */
2260static bool __ice_chk_linearize(struct sk_buff *skb)
2261{
2262 const skb_frag_t *frag, *stale;
2263 int nr_frags, sum;
2264
2265 /* no need to check if number of frags is less than 7 */
2266 nr_frags = skb_shinfo(skb)->nr_frags;
2267 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2268 return false;
2269
2270 /* We need to walk through the list and validate that each group
2271 * of 6 fragments totals at least gso_size.
2272 */
2273 nr_frags -= ICE_MAX_BUF_TXD - 2;
2274 frag = &skb_shinfo(skb)->frags[0];
2275
2276 /* Initialize size to the negative value of gso_size minus 1. We
2277 * use this as the worst case scenario in which the frag ahead
2278 * of us only provides one byte which is why we are limited to 6
2279 * descriptors for a single transmit as the header and previous
2280 * fragment are already consuming 2 descriptors.
2281 */
2282 sum = 1 - skb_shinfo(skb)->gso_size;
2283
2284 /* Add size of frags 0 through 4 to create our initial sum */
2285 sum += skb_frag_size(frag++);
2286 sum += skb_frag_size(frag++);
2287 sum += skb_frag_size(frag++);
2288 sum += skb_frag_size(frag++);
2289 sum += skb_frag_size(frag++);
2290
2291 /* Walk through fragments adding latest fragment, testing it, and
2292 * then removing stale fragments from the sum.
2293 */
2294 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2295 int stale_size = skb_frag_size(stale);
2296
2297 sum += skb_frag_size(frag++);
2298
2299 /* The stale fragment may present us with a smaller
2300 * descriptor than the actual fragment size. To account
2301 * for that we need to remove all the data on the front and
2302 * figure out what the remainder would be in the last
2303 * descriptor associated with the fragment.
2304 */
2305 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2306 int align_pad = -(skb_frag_off(stale)) &
2307 (ICE_MAX_READ_REQ_SIZE - 1);
2308
2309 sum -= align_pad;
2310 stale_size -= align_pad;
2311
2312 do {
2313 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2314 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2315 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2316 }
2317
2318 /* if sum is negative we failed to make sufficient progress */
2319 if (sum < 0)
2320 return true;
2321
2322 if (!nr_frags--)
2323 break;
2324
2325 sum -= stale_size;
2326 }
2327
2328 return false;
2329}
2330
2331/**
2332 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2333 * @skb: send buffer
2334 * @count: number of buffers used
2335 *
2336 * Note: Our HW can't scatter-gather more than 8 fragments to build
2337 * a packet on the wire and so we need to figure out the cases where we
2338 * need to linearize the skb.
2339 */
2340static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2341{
2342 /* Both TSO and single send will work if count is less than 8 */
2343 if (likely(count < ICE_MAX_BUF_TXD))
2344 return false;
2345
2346 if (skb_is_gso(skb))
2347 return __ice_chk_linearize(skb);
2348
2349 /* we can support up to 8 data buffers for a single send */
2350 return count != ICE_MAX_BUF_TXD;
2351}
2352
2353/**
2354 * ice_xmit_frame_ring - Sends buffer on Tx ring
2355 * @skb: send buffer
2356 * @tx_ring: ring to send buffer on
2357 *
2358 * Returns NETDEV_TX_OK if sent, else an error code
2359 */
2360static netdev_tx_t
2361ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2362{
2363 struct ice_tx_offload_params offload = { 0 };
2364 struct ice_vsi *vsi = tx_ring->vsi;
2365 struct ice_tx_buf *first;
2366 unsigned int count;
2367 int tso, csum;
2368
2369 count = ice_xmit_desc_count(skb);
2370 if (ice_chk_linearize(skb, count)) {
2371 if (__skb_linearize(skb))
2372 goto out_drop;
2373 count = ice_txd_use_count(skb->len);
2374 tx_ring->tx_stats.tx_linearize++;
2375 }
2376
2377 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2378 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2379 * + 4 desc gap to avoid the cache line where head is,
2380 * + 1 desc for context descriptor,
2381 * otherwise try next time
2382 */
2383 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2384 ICE_DESCS_FOR_CTX_DESC)) {
2385 tx_ring->tx_stats.tx_busy++;
2386 return NETDEV_TX_BUSY;
2387 }
2388
2389 offload.tx_ring = tx_ring;
2390
2391 /* record the location of the first descriptor for this packet */
2392 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2393 first->skb = skb;
2394 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2395 first->gso_segs = 1;
2396 first->tx_flags = 0;
2397
2398 /* prepare the VLAN tagging flags for Tx */
2399 ice_tx_prepare_vlan_flags(tx_ring, first);
2400
2401 /* set up TSO offload */
2402 tso = ice_tso(first, &offload);
2403 if (tso < 0)
2404 goto out_drop;
2405
2406 /* always set up Tx checksum offload */
2407 csum = ice_tx_csum(first, &offload);
2408 if (csum < 0)
2409 goto out_drop;
2410
2411 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2412 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2413 vsi->type == ICE_VSI_PF &&
2414 vsi->port_info->is_sw_lldp))
2415 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2416 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2417 ICE_TXD_CTX_QW1_CMD_S);
2418
2419 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2420 struct ice_tx_ctx_desc *cdesc;
2421 u16 i = tx_ring->next_to_use;
2422
2423 /* grab the next descriptor */
2424 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2425 i++;
2426 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2427
2428 /* setup context descriptor */
2429 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2430 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2431 cdesc->rsvd = cpu_to_le16(0);
2432 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2433 }
2434
2435 ice_tx_map(tx_ring, first, &offload);
2436 return NETDEV_TX_OK;
2437
2438out_drop:
2439 dev_kfree_skb_any(skb);
2440 return NETDEV_TX_OK;
2441}
2442
2443/**
2444 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2445 * @skb: send buffer
2446 * @netdev: network interface device structure
2447 *
2448 * Returns NETDEV_TX_OK if sent, else an error code
2449 */
2450netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2451{
2452 struct ice_netdev_priv *np = netdev_priv(netdev);
2453 struct ice_vsi *vsi = np->vsi;
2454 struct ice_ring *tx_ring;
2455
2456 tx_ring = vsi->tx_rings[skb->queue_mapping];
2457
2458 /* hardware can't handle really short frames, hardware padding works
2459 * beyond this point
2460 */
2461 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2462 return NETDEV_TX_OK;
2463
2464 return ice_xmit_frame_ring(skb, tx_ring);
2465}
2466
2467/**
2468 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2469 * @tx_ring: tx_ring to clean
2470 */
2471void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2472{
2473 struct ice_vsi *vsi = tx_ring->vsi;
2474 s16 i = tx_ring->next_to_clean;
2475 int budget = ICE_DFLT_IRQ_WORK;
2476 struct ice_tx_desc *tx_desc;
2477 struct ice_tx_buf *tx_buf;
2478
2479 tx_buf = &tx_ring->tx_buf[i];
2480 tx_desc = ICE_TX_DESC(tx_ring, i);
2481 i -= tx_ring->count;
2482
2483 do {
2484 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2485
2486 /* if next_to_watch is not set then there is no pending work */
2487 if (!eop_desc)
2488 break;
2489
2490 /* prevent any other reads prior to eop_desc */
2491 smp_rmb();
2492
2493 /* if the descriptor isn't done, no work to do */
2494 if (!(eop_desc->cmd_type_offset_bsz &
2495 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2496 break;
2497
2498 /* clear next_to_watch to prevent false hangs */
2499 tx_buf->next_to_watch = NULL;
2500 tx_desc->buf_addr = 0;
2501 tx_desc->cmd_type_offset_bsz = 0;
2502
2503 /* move past filter desc */
2504 tx_buf++;
2505 tx_desc++;
2506 i++;
2507 if (unlikely(!i)) {
2508 i -= tx_ring->count;
2509 tx_buf = tx_ring->tx_buf;
2510 tx_desc = ICE_TX_DESC(tx_ring, 0);
2511 }
2512
2513 /* unmap the data header */
2514 if (dma_unmap_len(tx_buf, len))
2515 dma_unmap_single(tx_ring->dev,
2516 dma_unmap_addr(tx_buf, dma),
2517 dma_unmap_len(tx_buf, len),
2518 DMA_TO_DEVICE);
2519 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2520 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2521
2522 /* clear next_to_watch to prevent false hangs */
2523 tx_buf->raw_buf = NULL;
2524 tx_buf->tx_flags = 0;
2525 tx_buf->next_to_watch = NULL;
2526 dma_unmap_len_set(tx_buf, len, 0);
2527 tx_desc->buf_addr = 0;
2528 tx_desc->cmd_type_offset_bsz = 0;
2529
2530 /* move past eop_desc for start of next FD desc */
2531 tx_buf++;
2532 tx_desc++;
2533 i++;
2534 if (unlikely(!i)) {
2535 i -= tx_ring->count;
2536 tx_buf = tx_ring->tx_buf;
2537 tx_desc = ICE_TX_DESC(tx_ring, 0);
2538 }
2539
2540 budget--;
2541 } while (likely(budget));
2542
2543 i += tx_ring->count;
2544 tx_ring->next_to_clean = i;
2545
2546 /* re-enable interrupt if needed */
2547 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2548}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4/* The driver transmit and receive code */
5
6#include <linux/mm.h>
7#include <linux/netdevice.h>
8#include <linux/prefetch.h>
9#include <linux/bpf_trace.h>
10#include <net/dsfield.h>
11#include <net/mpls.h>
12#include <net/xdp.h>
13#include "ice_txrx_lib.h"
14#include "ice_lib.h"
15#include "ice.h"
16#include "ice_trace.h"
17#include "ice_dcb_lib.h"
18#include "ice_xsk.h"
19#include "ice_eswitch.h"
20
21#define ICE_RX_HDR_SIZE 256
22
23#define FDIR_DESC_RXDID 0x40
24#define ICE_FDIR_CLEAN_DELAY 10
25
26/**
27 * ice_prgm_fdir_fltr - Program a Flow Director filter
28 * @vsi: VSI to send dummy packet
29 * @fdir_desc: flow director descriptor
30 * @raw_packet: allocated buffer for flow director
31 */
32int
33ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
34 u8 *raw_packet)
35{
36 struct ice_tx_buf *tx_buf, *first;
37 struct ice_fltr_desc *f_desc;
38 struct ice_tx_desc *tx_desc;
39 struct ice_tx_ring *tx_ring;
40 struct device *dev;
41 dma_addr_t dma;
42 u32 td_cmd;
43 u16 i;
44
45 /* VSI and Tx ring */
46 if (!vsi)
47 return -ENOENT;
48 tx_ring = vsi->tx_rings[0];
49 if (!tx_ring || !tx_ring->desc)
50 return -ENOENT;
51 dev = tx_ring->dev;
52
53 /* we are using two descriptors to add/del a filter and we can wait */
54 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
55 if (!i)
56 return -EAGAIN;
57 msleep_interruptible(1);
58 }
59
60 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
61 DMA_TO_DEVICE);
62
63 if (dma_mapping_error(dev, dma))
64 return -EINVAL;
65
66 /* grab the next descriptor */
67 i = tx_ring->next_to_use;
68 first = &tx_ring->tx_buf[i];
69 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
70 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
71
72 i++;
73 i = (i < tx_ring->count) ? i : 0;
74 tx_desc = ICE_TX_DESC(tx_ring, i);
75 tx_buf = &tx_ring->tx_buf[i];
76
77 i++;
78 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
79
80 memset(tx_buf, 0, sizeof(*tx_buf));
81 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
82 dma_unmap_addr_set(tx_buf, dma, dma);
83
84 tx_desc->buf_addr = cpu_to_le64(dma);
85 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
86 ICE_TX_DESC_CMD_RE;
87
88 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
89 tx_buf->raw_buf = raw_packet;
90
91 tx_desc->cmd_type_offset_bsz =
92 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
93
94 /* Force memory write to complete before letting h/w know
95 * there are new descriptors to fetch.
96 */
97 wmb();
98
99 /* mark the data descriptor to be watched */
100 first->next_to_watch = tx_desc;
101
102 writel(tx_ring->next_to_use, tx_ring->tail);
103
104 return 0;
105}
106
107/**
108 * ice_unmap_and_free_tx_buf - Release a Tx buffer
109 * @ring: the ring that owns the buffer
110 * @tx_buf: the buffer to free
111 */
112static void
113ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
114{
115 if (tx_buf->skb) {
116 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
117 devm_kfree(ring->dev, tx_buf->raw_buf);
118 else if (ice_ring_is_xdp(ring))
119 page_frag_free(tx_buf->raw_buf);
120 else
121 dev_kfree_skb_any(tx_buf->skb);
122 if (dma_unmap_len(tx_buf, len))
123 dma_unmap_single(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
126 DMA_TO_DEVICE);
127 } else if (dma_unmap_len(tx_buf, len)) {
128 dma_unmap_page(ring->dev,
129 dma_unmap_addr(tx_buf, dma),
130 dma_unmap_len(tx_buf, len),
131 DMA_TO_DEVICE);
132 }
133
134 tx_buf->next_to_watch = NULL;
135 tx_buf->skb = NULL;
136 dma_unmap_len_set(tx_buf, len, 0);
137 /* tx_buf must be completely set up in the transmit path */
138}
139
140static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
141{
142 return netdev_get_tx_queue(ring->netdev, ring->q_index);
143}
144
145/**
146 * ice_clean_tx_ring - Free any empty Tx buffers
147 * @tx_ring: ring to be cleaned
148 */
149void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
150{
151 u32 size;
152 u16 i;
153
154 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
155 ice_xsk_clean_xdp_ring(tx_ring);
156 goto tx_skip_free;
157 }
158
159 /* ring already cleared, nothing to do */
160 if (!tx_ring->tx_buf)
161 return;
162
163 /* Free all the Tx ring sk_buffs */
164 for (i = 0; i < tx_ring->count; i++)
165 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
166
167tx_skip_free:
168 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
169
170 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
171 PAGE_SIZE);
172 /* Zero out the descriptor ring */
173 memset(tx_ring->desc, 0, size);
174
175 tx_ring->next_to_use = 0;
176 tx_ring->next_to_clean = 0;
177 tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
178 tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
179
180 if (!tx_ring->netdev)
181 return;
182
183 /* cleanup Tx queue statistics */
184 netdev_tx_reset_queue(txring_txq(tx_ring));
185}
186
187/**
188 * ice_free_tx_ring - Free Tx resources per queue
189 * @tx_ring: Tx descriptor ring for a specific queue
190 *
191 * Free all transmit software resources
192 */
193void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
194{
195 u32 size;
196
197 ice_clean_tx_ring(tx_ring);
198 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
199 tx_ring->tx_buf = NULL;
200
201 if (tx_ring->desc) {
202 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
203 PAGE_SIZE);
204 dmam_free_coherent(tx_ring->dev, size,
205 tx_ring->desc, tx_ring->dma);
206 tx_ring->desc = NULL;
207 }
208}
209
210/**
211 * ice_clean_tx_irq - Reclaim resources after transmit completes
212 * @tx_ring: Tx ring to clean
213 * @napi_budget: Used to determine if we are in netpoll
214 *
215 * Returns true if there's any budget left (e.g. the clean is finished)
216 */
217static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
218{
219 unsigned int total_bytes = 0, total_pkts = 0;
220 unsigned int budget = ICE_DFLT_IRQ_WORK;
221 struct ice_vsi *vsi = tx_ring->vsi;
222 s16 i = tx_ring->next_to_clean;
223 struct ice_tx_desc *tx_desc;
224 struct ice_tx_buf *tx_buf;
225
226 /* get the bql data ready */
227 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
228
229 tx_buf = &tx_ring->tx_buf[i];
230 tx_desc = ICE_TX_DESC(tx_ring, i);
231 i -= tx_ring->count;
232
233 prefetch(&vsi->state);
234
235 do {
236 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
237
238 /* if next_to_watch is not set then there is no work pending */
239 if (!eop_desc)
240 break;
241
242 /* follow the guidelines of other drivers */
243 prefetchw(&tx_buf->skb->users);
244
245 smp_rmb(); /* prevent any other reads prior to eop_desc */
246
247 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
248 /* if the descriptor isn't done, no work yet to do */
249 if (!(eop_desc->cmd_type_offset_bsz &
250 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
251 break;
252
253 /* clear next_to_watch to prevent false hangs */
254 tx_buf->next_to_watch = NULL;
255
256 /* update the statistics for this packet */
257 total_bytes += tx_buf->bytecount;
258 total_pkts += tx_buf->gso_segs;
259
260 /* free the skb */
261 napi_consume_skb(tx_buf->skb, napi_budget);
262
263 /* unmap skb header data */
264 dma_unmap_single(tx_ring->dev,
265 dma_unmap_addr(tx_buf, dma),
266 dma_unmap_len(tx_buf, len),
267 DMA_TO_DEVICE);
268
269 /* clear tx_buf data */
270 tx_buf->skb = NULL;
271 dma_unmap_len_set(tx_buf, len, 0);
272
273 /* unmap remaining buffers */
274 while (tx_desc != eop_desc) {
275 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
276 tx_buf++;
277 tx_desc++;
278 i++;
279 if (unlikely(!i)) {
280 i -= tx_ring->count;
281 tx_buf = tx_ring->tx_buf;
282 tx_desc = ICE_TX_DESC(tx_ring, 0);
283 }
284
285 /* unmap any remaining paged data */
286 if (dma_unmap_len(tx_buf, len)) {
287 dma_unmap_page(tx_ring->dev,
288 dma_unmap_addr(tx_buf, dma),
289 dma_unmap_len(tx_buf, len),
290 DMA_TO_DEVICE);
291 dma_unmap_len_set(tx_buf, len, 0);
292 }
293 }
294 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
295
296 /* move us one more past the eop_desc for start of next pkt */
297 tx_buf++;
298 tx_desc++;
299 i++;
300 if (unlikely(!i)) {
301 i -= tx_ring->count;
302 tx_buf = tx_ring->tx_buf;
303 tx_desc = ICE_TX_DESC(tx_ring, 0);
304 }
305
306 prefetch(tx_desc);
307
308 /* update budget accounting */
309 budget--;
310 } while (likely(budget));
311
312 i += tx_ring->count;
313 tx_ring->next_to_clean = i;
314
315 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
316 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
317
318#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
319 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
320 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
321 /* Make sure that anybody stopping the queue after this
322 * sees the new next_to_clean.
323 */
324 smp_mb();
325 if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
326 !test_bit(ICE_VSI_DOWN, vsi->state)) {
327 netif_tx_wake_queue(txring_txq(tx_ring));
328 ++tx_ring->ring_stats->tx_stats.restart_q;
329 }
330 }
331
332 return !!budget;
333}
334
335/**
336 * ice_setup_tx_ring - Allocate the Tx descriptors
337 * @tx_ring: the Tx ring to set up
338 *
339 * Return 0 on success, negative on error
340 */
341int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
342{
343 struct device *dev = tx_ring->dev;
344 u32 size;
345
346 if (!dev)
347 return -ENOMEM;
348
349 /* warn if we are about to overwrite the pointer */
350 WARN_ON(tx_ring->tx_buf);
351 tx_ring->tx_buf =
352 devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
353 GFP_KERNEL);
354 if (!tx_ring->tx_buf)
355 return -ENOMEM;
356
357 /* round up to nearest page */
358 size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
359 PAGE_SIZE);
360 tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
361 GFP_KERNEL);
362 if (!tx_ring->desc) {
363 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
364 size);
365 goto err;
366 }
367
368 tx_ring->next_to_use = 0;
369 tx_ring->next_to_clean = 0;
370 tx_ring->ring_stats->tx_stats.prev_pkt = -1;
371 return 0;
372
373err:
374 devm_kfree(dev, tx_ring->tx_buf);
375 tx_ring->tx_buf = NULL;
376 return -ENOMEM;
377}
378
379/**
380 * ice_clean_rx_ring - Free Rx buffers
381 * @rx_ring: ring to be cleaned
382 */
383void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
384{
385 struct device *dev = rx_ring->dev;
386 u32 size;
387 u16 i;
388
389 /* ring already cleared, nothing to do */
390 if (!rx_ring->rx_buf)
391 return;
392
393 if (rx_ring->skb) {
394 dev_kfree_skb(rx_ring->skb);
395 rx_ring->skb = NULL;
396 }
397
398 if (rx_ring->xsk_pool) {
399 ice_xsk_clean_rx_ring(rx_ring);
400 goto rx_skip_free;
401 }
402
403 /* Free all the Rx ring sk_buffs */
404 for (i = 0; i < rx_ring->count; i++) {
405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
406
407 if (!rx_buf->page)
408 continue;
409
410 /* Invalidate cache lines that may have been written to by
411 * device so that we avoid corrupting memory.
412 */
413 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
414 rx_buf->page_offset,
415 rx_ring->rx_buf_len,
416 DMA_FROM_DEVICE);
417
418 /* free resources associated with mapping */
419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
420 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
421 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
422
423 rx_buf->page = NULL;
424 rx_buf->page_offset = 0;
425 }
426
427rx_skip_free:
428 if (rx_ring->xsk_pool)
429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
430 else
431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
432
433 /* Zero out the descriptor ring */
434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
435 PAGE_SIZE);
436 memset(rx_ring->desc, 0, size);
437
438 rx_ring->next_to_alloc = 0;
439 rx_ring->next_to_clean = 0;
440 rx_ring->next_to_use = 0;
441}
442
443/**
444 * ice_free_rx_ring - Free Rx resources
445 * @rx_ring: ring to clean the resources from
446 *
447 * Free all receive software resources
448 */
449void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
450{
451 u32 size;
452
453 ice_clean_rx_ring(rx_ring);
454 if (rx_ring->vsi->type == ICE_VSI_PF)
455 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
456 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
457 rx_ring->xdp_prog = NULL;
458 if (rx_ring->xsk_pool) {
459 kfree(rx_ring->xdp_buf);
460 rx_ring->xdp_buf = NULL;
461 } else {
462 kfree(rx_ring->rx_buf);
463 rx_ring->rx_buf = NULL;
464 }
465
466 if (rx_ring->desc) {
467 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
468 PAGE_SIZE);
469 dmam_free_coherent(rx_ring->dev, size,
470 rx_ring->desc, rx_ring->dma);
471 rx_ring->desc = NULL;
472 }
473}
474
475/**
476 * ice_setup_rx_ring - Allocate the Rx descriptors
477 * @rx_ring: the Rx ring to set up
478 *
479 * Return 0 on success, negative on error
480 */
481int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
482{
483 struct device *dev = rx_ring->dev;
484 u32 size;
485
486 if (!dev)
487 return -ENOMEM;
488
489 /* warn if we are about to overwrite the pointer */
490 WARN_ON(rx_ring->rx_buf);
491 rx_ring->rx_buf =
492 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
493 if (!rx_ring->rx_buf)
494 return -ENOMEM;
495
496 /* round up to nearest page */
497 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
498 PAGE_SIZE);
499 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
500 GFP_KERNEL);
501 if (!rx_ring->desc) {
502 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
503 size);
504 goto err;
505 }
506
507 rx_ring->next_to_use = 0;
508 rx_ring->next_to_clean = 0;
509
510 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
511 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
512
513 if (rx_ring->vsi->type == ICE_VSI_PF &&
514 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
515 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
516 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
517 goto err;
518 return 0;
519
520err:
521 kfree(rx_ring->rx_buf);
522 rx_ring->rx_buf = NULL;
523 return -ENOMEM;
524}
525
526static unsigned int
527ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
528{
529 unsigned int truesize;
530
531#if (PAGE_SIZE < 8192)
532 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
533#else
534 truesize = rx_ring->rx_offset ?
535 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
536 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
537 SKB_DATA_ALIGN(size);
538#endif
539 return truesize;
540}
541
542/**
543 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
544 * @rx_ring: Rx ring
545 * @xdp: xdp_buff used as input to the XDP program
546 * @xdp_prog: XDP program to run
547 * @xdp_ring: ring to be used for XDP_TX action
548 *
549 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
550 */
551static int
552ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
553 struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
554{
555 int err;
556 u32 act;
557
558 act = bpf_prog_run_xdp(xdp_prog, xdp);
559 switch (act) {
560 case XDP_PASS:
561 return ICE_XDP_PASS;
562 case XDP_TX:
563 if (static_branch_unlikely(&ice_xdp_locking_key))
564 spin_lock(&xdp_ring->tx_lock);
565 err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
566 if (static_branch_unlikely(&ice_xdp_locking_key))
567 spin_unlock(&xdp_ring->tx_lock);
568 if (err == ICE_XDP_CONSUMED)
569 goto out_failure;
570 return err;
571 case XDP_REDIRECT:
572 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
573 if (err)
574 goto out_failure;
575 return ICE_XDP_REDIR;
576 default:
577 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
578 fallthrough;
579 case XDP_ABORTED:
580out_failure:
581 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
582 fallthrough;
583 case XDP_DROP:
584 return ICE_XDP_CONSUMED;
585 }
586}
587
588/**
589 * ice_xdp_xmit - submit packets to XDP ring for transmission
590 * @dev: netdev
591 * @n: number of XDP frames to be transmitted
592 * @frames: XDP frames to be transmitted
593 * @flags: transmit flags
594 *
595 * Returns number of frames successfully sent. Failed frames
596 * will be free'ed by XDP core.
597 * For error cases, a negative errno code is returned and no-frames
598 * are transmitted (caller must handle freeing frames).
599 */
600int
601ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
602 u32 flags)
603{
604 struct ice_netdev_priv *np = netdev_priv(dev);
605 unsigned int queue_index = smp_processor_id();
606 struct ice_vsi *vsi = np->vsi;
607 struct ice_tx_ring *xdp_ring;
608 int nxmit = 0, i;
609
610 if (test_bit(ICE_VSI_DOWN, vsi->state))
611 return -ENETDOWN;
612
613 if (!ice_is_xdp_ena_vsi(vsi))
614 return -ENXIO;
615
616 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
617 return -EINVAL;
618
619 if (static_branch_unlikely(&ice_xdp_locking_key)) {
620 queue_index %= vsi->num_xdp_txq;
621 xdp_ring = vsi->xdp_rings[queue_index];
622 spin_lock(&xdp_ring->tx_lock);
623 } else {
624 /* Generally, should not happen */
625 if (unlikely(queue_index >= vsi->num_xdp_txq))
626 return -ENXIO;
627 xdp_ring = vsi->xdp_rings[queue_index];
628 }
629
630 for (i = 0; i < n; i++) {
631 struct xdp_frame *xdpf = frames[i];
632 int err;
633
634 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
635 if (err != ICE_XDP_TX)
636 break;
637 nxmit++;
638 }
639
640 if (unlikely(flags & XDP_XMIT_FLUSH))
641 ice_xdp_ring_update_tail(xdp_ring);
642
643 if (static_branch_unlikely(&ice_xdp_locking_key))
644 spin_unlock(&xdp_ring->tx_lock);
645
646 return nxmit;
647}
648
649/**
650 * ice_alloc_mapped_page - recycle or make a new page
651 * @rx_ring: ring to use
652 * @bi: rx_buf struct to modify
653 *
654 * Returns true if the page was successfully allocated or
655 * reused.
656 */
657static bool
658ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
659{
660 struct page *page = bi->page;
661 dma_addr_t dma;
662
663 /* since we are recycling buffers we should seldom need to alloc */
664 if (likely(page))
665 return true;
666
667 /* alloc new page for storage */
668 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
669 if (unlikely(!page)) {
670 rx_ring->ring_stats->rx_stats.alloc_page_failed++;
671 return false;
672 }
673
674 /* map page for use */
675 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
676 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
677
678 /* if mapping failed free memory back to system since
679 * there isn't much point in holding memory we can't use
680 */
681 if (dma_mapping_error(rx_ring->dev, dma)) {
682 __free_pages(page, ice_rx_pg_order(rx_ring));
683 rx_ring->ring_stats->rx_stats.alloc_page_failed++;
684 return false;
685 }
686
687 bi->dma = dma;
688 bi->page = page;
689 bi->page_offset = rx_ring->rx_offset;
690 page_ref_add(page, USHRT_MAX - 1);
691 bi->pagecnt_bias = USHRT_MAX;
692
693 return true;
694}
695
696/**
697 * ice_alloc_rx_bufs - Replace used receive buffers
698 * @rx_ring: ring to place buffers on
699 * @cleaned_count: number of buffers to replace
700 *
701 * Returns false if all allocations were successful, true if any fail. Returning
702 * true signals to the caller that we didn't replace cleaned_count buffers and
703 * there is more work to do.
704 *
705 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
706 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
707 * multiple tail writes per call.
708 */
709bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
710{
711 union ice_32b_rx_flex_desc *rx_desc;
712 u16 ntu = rx_ring->next_to_use;
713 struct ice_rx_buf *bi;
714
715 /* do nothing if no valid netdev defined */
716 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
717 !cleaned_count)
718 return false;
719
720 /* get the Rx descriptor and buffer based on next_to_use */
721 rx_desc = ICE_RX_DESC(rx_ring, ntu);
722 bi = &rx_ring->rx_buf[ntu];
723
724 do {
725 /* if we fail here, we have work remaining */
726 if (!ice_alloc_mapped_page(rx_ring, bi))
727 break;
728
729 /* sync the buffer for use by the device */
730 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
731 bi->page_offset,
732 rx_ring->rx_buf_len,
733 DMA_FROM_DEVICE);
734
735 /* Refresh the desc even if buffer_addrs didn't change
736 * because each write-back erases this info.
737 */
738 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
739
740 rx_desc++;
741 bi++;
742 ntu++;
743 if (unlikely(ntu == rx_ring->count)) {
744 rx_desc = ICE_RX_DESC(rx_ring, 0);
745 bi = rx_ring->rx_buf;
746 ntu = 0;
747 }
748
749 /* clear the status bits for the next_to_use descriptor */
750 rx_desc->wb.status_error0 = 0;
751
752 cleaned_count--;
753 } while (cleaned_count);
754
755 if (rx_ring->next_to_use != ntu)
756 ice_release_rx_desc(rx_ring, ntu);
757
758 return !!cleaned_count;
759}
760
761/**
762 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
763 * @rx_buf: Rx buffer to adjust
764 * @size: Size of adjustment
765 *
766 * Update the offset within page so that Rx buf will be ready to be reused.
767 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
768 * so the second half of page assigned to Rx buffer will be used, otherwise
769 * the offset is moved by "size" bytes
770 */
771static void
772ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
773{
774#if (PAGE_SIZE < 8192)
775 /* flip page offset to other buffer */
776 rx_buf->page_offset ^= size;
777#else
778 /* move offset up to the next cache line */
779 rx_buf->page_offset += size;
780#endif
781}
782
783/**
784 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
785 * @rx_buf: buffer containing the page
786 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
787 *
788 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
789 * which will assign the current buffer to the buffer that next_to_alloc is
790 * pointing to; otherwise, the DMA mapping needs to be destroyed and
791 * page freed
792 */
793static bool
794ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
795{
796 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
797 struct page *page = rx_buf->page;
798
799 /* avoid re-using remote and pfmemalloc pages */
800 if (!dev_page_is_reusable(page))
801 return false;
802
803#if (PAGE_SIZE < 8192)
804 /* if we are only owner of page we can reuse it */
805 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
806 return false;
807#else
808#define ICE_LAST_OFFSET \
809 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
810 if (rx_buf->page_offset > ICE_LAST_OFFSET)
811 return false;
812#endif /* PAGE_SIZE < 8192) */
813
814 /* If we have drained the page fragment pool we need to update
815 * the pagecnt_bias and page count so that we fully restock the
816 * number of references the driver holds.
817 */
818 if (unlikely(pagecnt_bias == 1)) {
819 page_ref_add(page, USHRT_MAX - 1);
820 rx_buf->pagecnt_bias = USHRT_MAX;
821 }
822
823 return true;
824}
825
826/**
827 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
828 * @rx_ring: Rx descriptor ring to transact packets on
829 * @rx_buf: buffer containing page to add
830 * @skb: sk_buff to place the data into
831 * @size: packet length from rx_desc
832 *
833 * This function will add the data contained in rx_buf->page to the skb.
834 * It will just attach the page as a frag to the skb.
835 * The function will then update the page offset.
836 */
837static void
838ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
839 struct sk_buff *skb, unsigned int size)
840{
841#if (PAGE_SIZE >= 8192)
842 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
843#else
844 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
845#endif
846
847 if (!size)
848 return;
849 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
850 rx_buf->page_offset, size, truesize);
851
852 /* page is being used so we must update the page offset */
853 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
854}
855
856/**
857 * ice_reuse_rx_page - page flip buffer and store it back on the ring
858 * @rx_ring: Rx descriptor ring to store buffers on
859 * @old_buf: donor buffer to have page reused
860 *
861 * Synchronizes page for reuse by the adapter
862 */
863static void
864ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
865{
866 u16 nta = rx_ring->next_to_alloc;
867 struct ice_rx_buf *new_buf;
868
869 new_buf = &rx_ring->rx_buf[nta];
870
871 /* update, and store next to alloc */
872 nta++;
873 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
874
875 /* Transfer page from old buffer to new buffer.
876 * Move each member individually to avoid possible store
877 * forwarding stalls and unnecessary copy of skb.
878 */
879 new_buf->dma = old_buf->dma;
880 new_buf->page = old_buf->page;
881 new_buf->page_offset = old_buf->page_offset;
882 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
883}
884
885/**
886 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
887 * @rx_ring: Rx descriptor ring to transact packets on
888 * @size: size of buffer to add to skb
889 * @rx_buf_pgcnt: rx_buf page refcount
890 *
891 * This function will pull an Rx buffer from the ring and synchronize it
892 * for use by the CPU.
893 */
894static struct ice_rx_buf *
895ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
896 int *rx_buf_pgcnt)
897{
898 struct ice_rx_buf *rx_buf;
899
900 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
901 *rx_buf_pgcnt =
902#if (PAGE_SIZE < 8192)
903 page_count(rx_buf->page);
904#else
905 0;
906#endif
907 prefetchw(rx_buf->page);
908
909 if (!size)
910 return rx_buf;
911 /* we are reusing so sync this buffer for CPU use */
912 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
913 rx_buf->page_offset, size,
914 DMA_FROM_DEVICE);
915
916 /* We have pulled a buffer for use, so decrement pagecnt_bias */
917 rx_buf->pagecnt_bias--;
918
919 return rx_buf;
920}
921
922/**
923 * ice_build_skb - Build skb around an existing buffer
924 * @rx_ring: Rx descriptor ring to transact packets on
925 * @rx_buf: Rx buffer to pull data from
926 * @xdp: xdp_buff pointing to the data
927 *
928 * This function builds an skb around an existing Rx buffer, taking care
929 * to set up the skb correctly and avoid any memcpy overhead.
930 */
931static struct sk_buff *
932ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
933 struct xdp_buff *xdp)
934{
935 u8 metasize = xdp->data - xdp->data_meta;
936#if (PAGE_SIZE < 8192)
937 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
938#else
939 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
940 SKB_DATA_ALIGN(xdp->data_end -
941 xdp->data_hard_start);
942#endif
943 struct sk_buff *skb;
944
945 /* Prefetch first cache line of first page. If xdp->data_meta
946 * is unused, this points exactly as xdp->data, otherwise we
947 * likely have a consumer accessing first few bytes of meta
948 * data, and then actual data.
949 */
950 net_prefetch(xdp->data_meta);
951 /* build an skb around the page buffer */
952 skb = napi_build_skb(xdp->data_hard_start, truesize);
953 if (unlikely(!skb))
954 return NULL;
955
956 /* must to record Rx queue, otherwise OS features such as
957 * symmetric queue won't work
958 */
959 skb_record_rx_queue(skb, rx_ring->q_index);
960
961 /* update pointers within the skb to store the data */
962 skb_reserve(skb, xdp->data - xdp->data_hard_start);
963 __skb_put(skb, xdp->data_end - xdp->data);
964 if (metasize)
965 skb_metadata_set(skb, metasize);
966
967 /* buffer is used by skb, update page_offset */
968 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
969
970 return skb;
971}
972
973/**
974 * ice_construct_skb - Allocate skb and populate it
975 * @rx_ring: Rx descriptor ring to transact packets on
976 * @rx_buf: Rx buffer to pull data from
977 * @xdp: xdp_buff pointing to the data
978 *
979 * This function allocates an skb. It then populates it with the page
980 * data from the current receive descriptor, taking care to set up the
981 * skb correctly.
982 */
983static struct sk_buff *
984ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
985 struct xdp_buff *xdp)
986{
987 unsigned int metasize = xdp->data - xdp->data_meta;
988 unsigned int size = xdp->data_end - xdp->data;
989 unsigned int headlen;
990 struct sk_buff *skb;
991
992 /* prefetch first cache line of first page */
993 net_prefetch(xdp->data_meta);
994
995 /* allocate a skb to store the frags */
996 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
997 ICE_RX_HDR_SIZE + metasize,
998 GFP_ATOMIC | __GFP_NOWARN);
999 if (unlikely(!skb))
1000 return NULL;
1001
1002 skb_record_rx_queue(skb, rx_ring->q_index);
1003 /* Determine available headroom for copy */
1004 headlen = size;
1005 if (headlen > ICE_RX_HDR_SIZE)
1006 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
1007
1008 /* align pull length to size of long to optimize memcpy performance */
1009 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1010 ALIGN(headlen + metasize, sizeof(long)));
1011
1012 if (metasize) {
1013 skb_metadata_set(skb, metasize);
1014 __skb_pull(skb, metasize);
1015 }
1016
1017 /* if we exhaust the linear part then add what is left as a frag */
1018 size -= headlen;
1019 if (size) {
1020#if (PAGE_SIZE >= 8192)
1021 unsigned int truesize = SKB_DATA_ALIGN(size);
1022#else
1023 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
1024#endif
1025 skb_add_rx_frag(skb, 0, rx_buf->page,
1026 rx_buf->page_offset + headlen, size, truesize);
1027 /* buffer is used by skb, update page_offset */
1028 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1029 } else {
1030 /* buffer is unused, reset bias back to rx_buf; data was copied
1031 * onto skb's linear part so there's no need for adjusting
1032 * page offset and we can reuse this buffer as-is
1033 */
1034 rx_buf->pagecnt_bias++;
1035 }
1036
1037 return skb;
1038}
1039
1040/**
1041 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1042 * @rx_ring: Rx descriptor ring to transact packets on
1043 * @rx_buf: Rx buffer to pull data from
1044 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1045 *
1046 * This function will update next_to_clean and then clean up the contents
1047 * of the rx_buf. It will either recycle the buffer or unmap it and free
1048 * the associated resources.
1049 */
1050static void
1051ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1052 int rx_buf_pgcnt)
1053{
1054 u16 ntc = rx_ring->next_to_clean + 1;
1055
1056 /* fetch, update, and store next to clean */
1057 ntc = (ntc < rx_ring->count) ? ntc : 0;
1058 rx_ring->next_to_clean = ntc;
1059
1060 if (!rx_buf)
1061 return;
1062
1063 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1064 /* hand second half of page back to the ring */
1065 ice_reuse_rx_page(rx_ring, rx_buf);
1066 } else {
1067 /* we are not reusing the buffer so unmap it */
1068 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1069 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1070 ICE_RX_DMA_ATTR);
1071 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1072 }
1073
1074 /* clear contents of buffer_info */
1075 rx_buf->page = NULL;
1076}
1077
1078/**
1079 * ice_is_non_eop - process handling of non-EOP buffers
1080 * @rx_ring: Rx ring being processed
1081 * @rx_desc: Rx descriptor for current buffer
1082 *
1083 * If the buffer is an EOP buffer, this function exits returning false,
1084 * otherwise return true indicating that this is in fact a non-EOP buffer.
1085 */
1086static bool
1087ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1088{
1089 /* if we are the last buffer then there is nothing else to do */
1090#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1091 if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
1092 return false;
1093
1094 rx_ring->ring_stats->rx_stats.non_eop_descs++;
1095
1096 return true;
1097}
1098
1099/**
1100 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1101 * @rx_ring: Rx descriptor ring to transact packets on
1102 * @budget: Total limit on number of packets to process
1103 *
1104 * This function provides a "bounce buffer" approach to Rx interrupt
1105 * processing. The advantage to this is that on systems that have
1106 * expensive overhead for IOMMU access this provides a means of avoiding
1107 * it by maintaining the mapping of the page to the system.
1108 *
1109 * Returns amount of work completed
1110 */
1111int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1112{
1113 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1114 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1115 unsigned int offset = rx_ring->rx_offset;
1116 struct ice_tx_ring *xdp_ring = NULL;
1117 unsigned int xdp_res, xdp_xmit = 0;
1118 struct sk_buff *skb = rx_ring->skb;
1119 struct bpf_prog *xdp_prog = NULL;
1120 struct xdp_buff xdp;
1121 bool failure;
1122
1123 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1124#if (PAGE_SIZE < 8192)
1125 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1126#endif
1127 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1128
1129 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1130 if (xdp_prog)
1131 xdp_ring = rx_ring->xdp_ring;
1132
1133 /* start the loop to process Rx packets bounded by 'budget' */
1134 while (likely(total_rx_pkts < (unsigned int)budget)) {
1135 union ice_32b_rx_flex_desc *rx_desc;
1136 struct ice_rx_buf *rx_buf;
1137 unsigned char *hard_start;
1138 unsigned int size;
1139 u16 stat_err_bits;
1140 int rx_buf_pgcnt;
1141 u16 vlan_tag = 0;
1142 u16 rx_ptype;
1143
1144 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1145 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1146
1147 /* status_error_len will always be zero for unused descriptors
1148 * because it's cleared in cleanup, and overlaps with hdr_addr
1149 * which is always zero because packet split isn't used, if the
1150 * hardware wrote DD then it will be non-zero
1151 */
1152 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1153 if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1154 break;
1155
1156 /* This memory barrier is needed to keep us from reading
1157 * any other fields out of the rx_desc until we know the
1158 * DD bit is set.
1159 */
1160 dma_rmb();
1161
1162 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1163 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1164 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1165
1166 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1167 ctrl_vsi->vf)
1168 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1169 ice_put_rx_buf(rx_ring, NULL, 0);
1170 cleaned_count++;
1171 continue;
1172 }
1173
1174 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1175 ICE_RX_FLX_DESC_PKT_LEN_M;
1176
1177 /* retrieve a buffer from the ring */
1178 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1179
1180 if (!size) {
1181 xdp.data = NULL;
1182 xdp.data_end = NULL;
1183 xdp.data_hard_start = NULL;
1184 xdp.data_meta = NULL;
1185 goto construct_skb;
1186 }
1187
1188 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1189 offset;
1190 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1191#if (PAGE_SIZE > 4096)
1192 /* At larger PAGE_SIZE, frame_sz depend on len size */
1193 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1194#endif
1195
1196 if (!xdp_prog)
1197 goto construct_skb;
1198
1199 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1200 if (!xdp_res)
1201 goto construct_skb;
1202 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1203 xdp_xmit |= xdp_res;
1204 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1205 } else {
1206 rx_buf->pagecnt_bias++;
1207 }
1208 total_rx_bytes += size;
1209 total_rx_pkts++;
1210
1211 cleaned_count++;
1212 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1213 continue;
1214construct_skb:
1215 if (skb) {
1216 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1217 } else if (likely(xdp.data)) {
1218 if (ice_ring_uses_build_skb(rx_ring))
1219 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1220 else
1221 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1222 }
1223 /* exit if we failed to retrieve a buffer */
1224 if (!skb) {
1225 rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
1226 if (rx_buf)
1227 rx_buf->pagecnt_bias++;
1228 break;
1229 }
1230
1231 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1232 cleaned_count++;
1233
1234 /* skip if it is NOP desc */
1235 if (ice_is_non_eop(rx_ring, rx_desc))
1236 continue;
1237
1238 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1239 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
1240 stat_err_bits))) {
1241 dev_kfree_skb_any(skb);
1242 continue;
1243 }
1244
1245 vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
1246
1247 /* pad the skb if needed, to make a valid ethernet frame */
1248 if (eth_skb_pad(skb)) {
1249 skb = NULL;
1250 continue;
1251 }
1252
1253 /* probably a little skewed due to removing CRC */
1254 total_rx_bytes += skb->len;
1255
1256 /* populate checksum, VLAN, and protocol */
1257 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1258 ICE_RX_FLEX_DESC_PTYPE_M;
1259
1260 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1261
1262 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1263 /* send completed skb up the stack */
1264 ice_receive_skb(rx_ring, skb, vlan_tag);
1265 skb = NULL;
1266
1267 /* update budget accounting */
1268 total_rx_pkts++;
1269 }
1270
1271 /* return up to cleaned_count buffers to hardware */
1272 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1273
1274 if (xdp_prog)
1275 ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1276 rx_ring->skb = skb;
1277
1278 if (rx_ring->ring_stats)
1279 ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
1280 total_rx_bytes);
1281
1282 /* guarantee a trip back through this routine if there was a failure */
1283 return failure ? budget : (int)total_rx_pkts;
1284}
1285
1286static void __ice_update_sample(struct ice_q_vector *q_vector,
1287 struct ice_ring_container *rc,
1288 struct dim_sample *sample,
1289 bool is_tx)
1290{
1291 u64 packets = 0, bytes = 0;
1292
1293 if (is_tx) {
1294 struct ice_tx_ring *tx_ring;
1295
1296 ice_for_each_tx_ring(tx_ring, *rc) {
1297 struct ice_ring_stats *ring_stats;
1298
1299 ring_stats = tx_ring->ring_stats;
1300 if (!ring_stats)
1301 continue;
1302 packets += ring_stats->stats.pkts;
1303 bytes += ring_stats->stats.bytes;
1304 }
1305 } else {
1306 struct ice_rx_ring *rx_ring;
1307
1308 ice_for_each_rx_ring(rx_ring, *rc) {
1309 struct ice_ring_stats *ring_stats;
1310
1311 ring_stats = rx_ring->ring_stats;
1312 if (!ring_stats)
1313 continue;
1314 packets += ring_stats->stats.pkts;
1315 bytes += ring_stats->stats.bytes;
1316 }
1317 }
1318
1319 dim_update_sample(q_vector->total_events, packets, bytes, sample);
1320 sample->comp_ctr = 0;
1321
1322 /* if dim settings get stale, like when not updated for 1
1323 * second or longer, force it to start again. This addresses the
1324 * frequent case of an idle queue being switched to by the
1325 * scheduler. The 1,000 here means 1,000 milliseconds.
1326 */
1327 if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1328 rc->dim.state = DIM_START_MEASURE;
1329}
1330
1331/**
1332 * ice_net_dim - Update net DIM algorithm
1333 * @q_vector: the vector associated with the interrupt
1334 *
1335 * Create a DIM sample and notify net_dim() so that it can possibly decide
1336 * a new ITR value based on incoming packets, bytes, and interrupts.
1337 *
1338 * This function is a no-op if the ring is not configured to dynamic ITR.
1339 */
1340static void ice_net_dim(struct ice_q_vector *q_vector)
1341{
1342 struct ice_ring_container *tx = &q_vector->tx;
1343 struct ice_ring_container *rx = &q_vector->rx;
1344
1345 if (ITR_IS_DYNAMIC(tx)) {
1346 struct dim_sample dim_sample;
1347
1348 __ice_update_sample(q_vector, tx, &dim_sample, true);
1349 net_dim(&tx->dim, dim_sample);
1350 }
1351
1352 if (ITR_IS_DYNAMIC(rx)) {
1353 struct dim_sample dim_sample;
1354
1355 __ice_update_sample(q_vector, rx, &dim_sample, false);
1356 net_dim(&rx->dim, dim_sample);
1357 }
1358}
1359
1360/**
1361 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1362 * @itr_idx: interrupt throttling index
1363 * @itr: interrupt throttling value in usecs
1364 */
1365static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1366{
1367 /* The ITR value is reported in microseconds, and the register value is
1368 * recorded in 2 microsecond units. For this reason we only need to
1369 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1370 * granularity as a shift instead of division. The mask makes sure the
1371 * ITR value is never odd so we don't accidentally write into the field
1372 * prior to the ITR field.
1373 */
1374 itr &= ICE_ITR_MASK;
1375
1376 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1377 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1378 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1379}
1380
1381/**
1382 * ice_enable_interrupt - re-enable MSI-X interrupt
1383 * @q_vector: the vector associated with the interrupt to enable
1384 *
1385 * If the VSI is down, the interrupt will not be re-enabled. Also,
1386 * when enabling the interrupt always reset the wb_on_itr to false
1387 * and trigger a software interrupt to clean out internal state.
1388 */
1389static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1390{
1391 struct ice_vsi *vsi = q_vector->vsi;
1392 bool wb_en = q_vector->wb_on_itr;
1393 u32 itr_val;
1394
1395 if (test_bit(ICE_DOWN, vsi->state))
1396 return;
1397
1398 /* trigger an ITR delayed software interrupt when exiting busy poll, to
1399 * make sure to catch any pending cleanups that might have been missed
1400 * due to interrupt state transition. If busy poll or poll isn't
1401 * enabled, then don't update ITR, and just enable the interrupt.
1402 */
1403 if (!wb_en) {
1404 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1405 } else {
1406 q_vector->wb_on_itr = false;
1407
1408 /* do two things here with a single write. Set up the third ITR
1409 * index to be used for software interrupt moderation, and then
1410 * trigger a software interrupt with a rate limit of 20K on
1411 * software interrupts, this will help avoid high interrupt
1412 * loads due to frequently polling and exiting polling.
1413 */
1414 itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1415 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1416 ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1417 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1418 }
1419 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1420}
1421
1422/**
1423 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1424 * @q_vector: q_vector to set WB_ON_ITR on
1425 *
1426 * We need to tell hardware to write-back completed descriptors even when
1427 * interrupts are disabled. Descriptors will be written back on cache line
1428 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1429 * descriptors may not be written back if they don't fill a cache line until
1430 * the next interrupt.
1431 *
1432 * This sets the write-back frequency to whatever was set previously for the
1433 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1434 * aren't meddling with the INTENA_M bit.
1435 */
1436static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1437{
1438 struct ice_vsi *vsi = q_vector->vsi;
1439
1440 /* already in wb_on_itr mode no need to change it */
1441 if (q_vector->wb_on_itr)
1442 return;
1443
1444 /* use previously set ITR values for all of the ITR indices by
1445 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1446 * be static in non-adaptive mode (user configured)
1447 */
1448 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1449 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1450 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1451 GLINT_DYN_CTL_WB_ON_ITR_M);
1452
1453 q_vector->wb_on_itr = true;
1454}
1455
1456/**
1457 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1458 * @napi: napi struct with our devices info in it
1459 * @budget: amount of work driver is allowed to do this pass, in packets
1460 *
1461 * This function will clean all queues associated with a q_vector.
1462 *
1463 * Returns the amount of work done
1464 */
1465int ice_napi_poll(struct napi_struct *napi, int budget)
1466{
1467 struct ice_q_vector *q_vector =
1468 container_of(napi, struct ice_q_vector, napi);
1469 struct ice_tx_ring *tx_ring;
1470 struct ice_rx_ring *rx_ring;
1471 bool clean_complete = true;
1472 int budget_per_ring;
1473 int work_done = 0;
1474
1475 /* Since the actual Tx work is minimal, we can give the Tx a larger
1476 * budget and be more aggressive about cleaning up the Tx descriptors.
1477 */
1478 ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1479 bool wd;
1480
1481 if (tx_ring->xsk_pool)
1482 wd = ice_xmit_zc(tx_ring);
1483 else if (ice_ring_is_xdp(tx_ring))
1484 wd = true;
1485 else
1486 wd = ice_clean_tx_irq(tx_ring, budget);
1487
1488 if (!wd)
1489 clean_complete = false;
1490 }
1491
1492 /* Handle case where we are called by netpoll with a budget of 0 */
1493 if (unlikely(budget <= 0))
1494 return budget;
1495
1496 /* normally we have 1 Rx ring per q_vector */
1497 if (unlikely(q_vector->num_ring_rx > 1))
1498 /* We attempt to distribute budget to each Rx queue fairly, but
1499 * don't allow the budget to go below 1 because that would exit
1500 * polling early.
1501 */
1502 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1503 else
1504 /* Max of 1 Rx ring in this q_vector so give it the budget */
1505 budget_per_ring = budget;
1506
1507 ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1508 int cleaned;
1509
1510 /* A dedicated path for zero-copy allows making a single
1511 * comparison in the irq context instead of many inside the
1512 * ice_clean_rx_irq function and makes the codebase cleaner.
1513 */
1514 cleaned = rx_ring->xsk_pool ?
1515 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
1516 ice_clean_rx_irq(rx_ring, budget_per_ring);
1517 work_done += cleaned;
1518 /* if we clean as many as budgeted, we must not be done */
1519 if (cleaned >= budget_per_ring)
1520 clean_complete = false;
1521 }
1522
1523 /* If work not completed, return budget and polling will return */
1524 if (!clean_complete) {
1525 /* Set the writeback on ITR so partial completions of
1526 * cache-lines will still continue even if we're polling.
1527 */
1528 ice_set_wb_on_itr(q_vector);
1529 return budget;
1530 }
1531
1532 /* Exit the polling mode, but don't re-enable interrupts if stack might
1533 * poll us due to busy-polling
1534 */
1535 if (napi_complete_done(napi, work_done)) {
1536 ice_net_dim(q_vector);
1537 ice_enable_interrupt(q_vector);
1538 } else {
1539 ice_set_wb_on_itr(q_vector);
1540 }
1541
1542 return min_t(int, work_done, budget - 1);
1543}
1544
1545/**
1546 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1547 * @tx_ring: the ring to be checked
1548 * @size: the size buffer we want to assure is available
1549 *
1550 * Returns -EBUSY if a stop is needed, else 0
1551 */
1552static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1553{
1554 netif_tx_stop_queue(txring_txq(tx_ring));
1555 /* Memory barrier before checking head and tail */
1556 smp_mb();
1557
1558 /* Check again in a case another CPU has just made room available. */
1559 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1560 return -EBUSY;
1561
1562 /* A reprieve! - use start_queue because it doesn't call schedule */
1563 netif_tx_start_queue(txring_txq(tx_ring));
1564 ++tx_ring->ring_stats->tx_stats.restart_q;
1565 return 0;
1566}
1567
1568/**
1569 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1570 * @tx_ring: the ring to be checked
1571 * @size: the size buffer we want to assure is available
1572 *
1573 * Returns 0 if stop is not needed
1574 */
1575static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1576{
1577 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1578 return 0;
1579
1580 return __ice_maybe_stop_tx(tx_ring, size);
1581}
1582
1583/**
1584 * ice_tx_map - Build the Tx descriptor
1585 * @tx_ring: ring to send buffer on
1586 * @first: first buffer info buffer to use
1587 * @off: pointer to struct that holds offload parameters
1588 *
1589 * This function loops over the skb data pointed to by *first
1590 * and gets a physical address for each memory location and programs
1591 * it and the length into the transmit descriptor.
1592 */
1593static void
1594ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1595 struct ice_tx_offload_params *off)
1596{
1597 u64 td_offset, td_tag, td_cmd;
1598 u16 i = tx_ring->next_to_use;
1599 unsigned int data_len, size;
1600 struct ice_tx_desc *tx_desc;
1601 struct ice_tx_buf *tx_buf;
1602 struct sk_buff *skb;
1603 skb_frag_t *frag;
1604 dma_addr_t dma;
1605 bool kick;
1606
1607 td_tag = off->td_l2tag1;
1608 td_cmd = off->td_cmd;
1609 td_offset = off->td_offset;
1610 skb = first->skb;
1611
1612 data_len = skb->data_len;
1613 size = skb_headlen(skb);
1614
1615 tx_desc = ICE_TX_DESC(tx_ring, i);
1616
1617 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1618 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1619 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1620 ICE_TX_FLAGS_VLAN_S;
1621 }
1622
1623 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1624
1625 tx_buf = first;
1626
1627 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1628 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1629
1630 if (dma_mapping_error(tx_ring->dev, dma))
1631 goto dma_error;
1632
1633 /* record length, and DMA address */
1634 dma_unmap_len_set(tx_buf, len, size);
1635 dma_unmap_addr_set(tx_buf, dma, dma);
1636
1637 /* align size to end of page */
1638 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1639 tx_desc->buf_addr = cpu_to_le64(dma);
1640
1641 /* account for data chunks larger than the hardware
1642 * can handle
1643 */
1644 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1645 tx_desc->cmd_type_offset_bsz =
1646 ice_build_ctob(td_cmd, td_offset, max_data,
1647 td_tag);
1648
1649 tx_desc++;
1650 i++;
1651
1652 if (i == tx_ring->count) {
1653 tx_desc = ICE_TX_DESC(tx_ring, 0);
1654 i = 0;
1655 }
1656
1657 dma += max_data;
1658 size -= max_data;
1659
1660 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1661 tx_desc->buf_addr = cpu_to_le64(dma);
1662 }
1663
1664 if (likely(!data_len))
1665 break;
1666
1667 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1668 size, td_tag);
1669
1670 tx_desc++;
1671 i++;
1672
1673 if (i == tx_ring->count) {
1674 tx_desc = ICE_TX_DESC(tx_ring, 0);
1675 i = 0;
1676 }
1677
1678 size = skb_frag_size(frag);
1679 data_len -= size;
1680
1681 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1682 DMA_TO_DEVICE);
1683
1684 tx_buf = &tx_ring->tx_buf[i];
1685 }
1686
1687 /* record SW timestamp if HW timestamp is not available */
1688 skb_tx_timestamp(first->skb);
1689
1690 i++;
1691 if (i == tx_ring->count)
1692 i = 0;
1693
1694 /* write last descriptor with RS and EOP bits */
1695 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1696 tx_desc->cmd_type_offset_bsz =
1697 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1698
1699 /* Force memory writes to complete before letting h/w know there
1700 * are new descriptors to fetch.
1701 *
1702 * We also use this memory barrier to make certain all of the
1703 * status bits have been updated before next_to_watch is written.
1704 */
1705 wmb();
1706
1707 /* set next_to_watch value indicating a packet is present */
1708 first->next_to_watch = tx_desc;
1709
1710 tx_ring->next_to_use = i;
1711
1712 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1713
1714 /* notify HW of packet */
1715 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1716 netdev_xmit_more());
1717 if (kick)
1718 /* notify HW of packet */
1719 writel(i, tx_ring->tail);
1720
1721 return;
1722
1723dma_error:
1724 /* clear DMA mappings for failed tx_buf map */
1725 for (;;) {
1726 tx_buf = &tx_ring->tx_buf[i];
1727 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1728 if (tx_buf == first)
1729 break;
1730 if (i == 0)
1731 i = tx_ring->count;
1732 i--;
1733 }
1734
1735 tx_ring->next_to_use = i;
1736}
1737
1738/**
1739 * ice_tx_csum - Enable Tx checksum offloads
1740 * @first: pointer to the first descriptor
1741 * @off: pointer to struct that holds offload parameters
1742 *
1743 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1744 */
1745static
1746int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1747{
1748 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1749 struct sk_buff *skb = first->skb;
1750 union {
1751 struct iphdr *v4;
1752 struct ipv6hdr *v6;
1753 unsigned char *hdr;
1754 } ip;
1755 union {
1756 struct tcphdr *tcp;
1757 unsigned char *hdr;
1758 } l4;
1759 __be16 frag_off, protocol;
1760 unsigned char *exthdr;
1761 u32 offset, cmd = 0;
1762 u8 l4_proto = 0;
1763
1764 if (skb->ip_summed != CHECKSUM_PARTIAL)
1765 return 0;
1766
1767 protocol = vlan_get_protocol(skb);
1768
1769 if (eth_p_mpls(protocol)) {
1770 ip.hdr = skb_inner_network_header(skb);
1771 l4.hdr = skb_checksum_start(skb);
1772 } else {
1773 ip.hdr = skb_network_header(skb);
1774 l4.hdr = skb_transport_header(skb);
1775 }
1776
1777 /* compute outer L2 header size */
1778 l2_len = ip.hdr - skb->data;
1779 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1780
1781 /* set the tx_flags to indicate the IP protocol type. this is
1782 * required so that checksum header computation below is accurate.
1783 */
1784 if (ip.v4->version == 4)
1785 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1786 else if (ip.v6->version == 6)
1787 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1788
1789 if (skb->encapsulation) {
1790 bool gso_ena = false;
1791 u32 tunnel = 0;
1792
1793 /* define outer network header type */
1794 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1795 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1796 ICE_TX_CTX_EIPT_IPV4 :
1797 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1798 l4_proto = ip.v4->protocol;
1799 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1800 int ret;
1801
1802 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1803 exthdr = ip.hdr + sizeof(*ip.v6);
1804 l4_proto = ip.v6->nexthdr;
1805 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1806 &l4_proto, &frag_off);
1807 if (ret < 0)
1808 return -1;
1809 }
1810
1811 /* define outer transport */
1812 switch (l4_proto) {
1813 case IPPROTO_UDP:
1814 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1815 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1816 break;
1817 case IPPROTO_GRE:
1818 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1819 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1820 break;
1821 case IPPROTO_IPIP:
1822 case IPPROTO_IPV6:
1823 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1824 l4.hdr = skb_inner_network_header(skb);
1825 break;
1826 default:
1827 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1828 return -1;
1829
1830 skb_checksum_help(skb);
1831 return 0;
1832 }
1833
1834 /* compute outer L3 header size */
1835 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1836 ICE_TXD_CTX_QW0_EIPLEN_S;
1837
1838 /* switch IP header pointer from outer to inner header */
1839 ip.hdr = skb_inner_network_header(skb);
1840
1841 /* compute tunnel header size */
1842 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1843 ICE_TXD_CTX_QW0_NATLEN_S;
1844
1845 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1846 /* indicate if we need to offload outer UDP header */
1847 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1848 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1849 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1850
1851 /* record tunnel offload values */
1852 off->cd_tunnel_params |= tunnel;
1853
1854 /* set DTYP=1 to indicate that it's an Tx context descriptor
1855 * in IPsec tunnel mode with Tx offloads in Quad word 1
1856 */
1857 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1858
1859 /* switch L4 header pointer from outer to inner */
1860 l4.hdr = skb_inner_transport_header(skb);
1861 l4_proto = 0;
1862
1863 /* reset type as we transition from outer to inner headers */
1864 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1865 if (ip.v4->version == 4)
1866 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1867 if (ip.v6->version == 6)
1868 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1869 }
1870
1871 /* Enable IP checksum offloads */
1872 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1873 l4_proto = ip.v4->protocol;
1874 /* the stack computes the IP header already, the only time we
1875 * need the hardware to recompute it is in the case of TSO.
1876 */
1877 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1878 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1879 else
1880 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1881
1882 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1883 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1884 exthdr = ip.hdr + sizeof(*ip.v6);
1885 l4_proto = ip.v6->nexthdr;
1886 if (l4.hdr != exthdr)
1887 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1888 &frag_off);
1889 } else {
1890 return -1;
1891 }
1892
1893 /* compute inner L3 header size */
1894 l3_len = l4.hdr - ip.hdr;
1895 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1896
1897 /* Enable L4 checksum offloads */
1898 switch (l4_proto) {
1899 case IPPROTO_TCP:
1900 /* enable checksum offloads */
1901 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1902 l4_len = l4.tcp->doff;
1903 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1904 break;
1905 case IPPROTO_UDP:
1906 /* enable UDP checksum offload */
1907 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1908 l4_len = (sizeof(struct udphdr) >> 2);
1909 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1910 break;
1911 case IPPROTO_SCTP:
1912 /* enable SCTP checksum offload */
1913 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1914 l4_len = sizeof(struct sctphdr) >> 2;
1915 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1916 break;
1917
1918 default:
1919 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1920 return -1;
1921 skb_checksum_help(skb);
1922 return 0;
1923 }
1924
1925 off->td_cmd |= cmd;
1926 off->td_offset |= offset;
1927 return 1;
1928}
1929
1930/**
1931 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1932 * @tx_ring: ring to send buffer on
1933 * @first: pointer to struct ice_tx_buf
1934 *
1935 * Checks the skb and set up correspondingly several generic transmit flags
1936 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1937 */
1938static void
1939ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1940{
1941 struct sk_buff *skb = first->skb;
1942
1943 /* nothing left to do, software offloaded VLAN */
1944 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1945 return;
1946
1947 /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
1948 * feature flags, which the driver only allows either 802.1Q or 802.1ad
1949 * VLAN offloads exclusively so we only care about the VLAN ID here
1950 */
1951 if (skb_vlan_tag_present(skb)) {
1952 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1953 if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
1954 first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1955 else
1956 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1957 }
1958
1959 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1960}
1961
1962/**
1963 * ice_tso - computes mss and TSO length to prepare for TSO
1964 * @first: pointer to struct ice_tx_buf
1965 * @off: pointer to struct that holds offload parameters
1966 *
1967 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1968 */
1969static
1970int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1971{
1972 struct sk_buff *skb = first->skb;
1973 union {
1974 struct iphdr *v4;
1975 struct ipv6hdr *v6;
1976 unsigned char *hdr;
1977 } ip;
1978 union {
1979 struct tcphdr *tcp;
1980 struct udphdr *udp;
1981 unsigned char *hdr;
1982 } l4;
1983 u64 cd_mss, cd_tso_len;
1984 __be16 protocol;
1985 u32 paylen;
1986 u8 l4_start;
1987 int err;
1988
1989 if (skb->ip_summed != CHECKSUM_PARTIAL)
1990 return 0;
1991
1992 if (!skb_is_gso(skb))
1993 return 0;
1994
1995 err = skb_cow_head(skb, 0);
1996 if (err < 0)
1997 return err;
1998
1999 /* cppcheck-suppress unreadVariable */
2000 protocol = vlan_get_protocol(skb);
2001
2002 if (eth_p_mpls(protocol))
2003 ip.hdr = skb_inner_network_header(skb);
2004 else
2005 ip.hdr = skb_network_header(skb);
2006 l4.hdr = skb_checksum_start(skb);
2007
2008 /* initialize outer IP header fields */
2009 if (ip.v4->version == 4) {
2010 ip.v4->tot_len = 0;
2011 ip.v4->check = 0;
2012 } else {
2013 ip.v6->payload_len = 0;
2014 }
2015
2016 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2017 SKB_GSO_GRE_CSUM |
2018 SKB_GSO_IPXIP4 |
2019 SKB_GSO_IPXIP6 |
2020 SKB_GSO_UDP_TUNNEL |
2021 SKB_GSO_UDP_TUNNEL_CSUM)) {
2022 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2023 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2024 l4.udp->len = 0;
2025
2026 /* determine offset of outer transport header */
2027 l4_start = (u8)(l4.hdr - skb->data);
2028
2029 /* remove payload length from outer checksum */
2030 paylen = skb->len - l4_start;
2031 csum_replace_by_diff(&l4.udp->check,
2032 (__force __wsum)htonl(paylen));
2033 }
2034
2035 /* reset pointers to inner headers */
2036
2037 /* cppcheck-suppress unreadVariable */
2038 ip.hdr = skb_inner_network_header(skb);
2039 l4.hdr = skb_inner_transport_header(skb);
2040
2041 /* initialize inner IP header fields */
2042 if (ip.v4->version == 4) {
2043 ip.v4->tot_len = 0;
2044 ip.v4->check = 0;
2045 } else {
2046 ip.v6->payload_len = 0;
2047 }
2048 }
2049
2050 /* determine offset of transport header */
2051 l4_start = (u8)(l4.hdr - skb->data);
2052
2053 /* remove payload length from checksum */
2054 paylen = skb->len - l4_start;
2055
2056 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2057 csum_replace_by_diff(&l4.udp->check,
2058 (__force __wsum)htonl(paylen));
2059 /* compute length of UDP segmentation header */
2060 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2061 } else {
2062 csum_replace_by_diff(&l4.tcp->check,
2063 (__force __wsum)htonl(paylen));
2064 /* compute length of TCP segmentation header */
2065 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2066 }
2067
2068 /* update gso_segs and bytecount */
2069 first->gso_segs = skb_shinfo(skb)->gso_segs;
2070 first->bytecount += (first->gso_segs - 1) * off->header_len;
2071
2072 cd_tso_len = skb->len - off->header_len;
2073 cd_mss = skb_shinfo(skb)->gso_size;
2074
2075 /* record cdesc_qw1 with TSO parameters */
2076 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2077 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2078 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2079 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2080 first->tx_flags |= ICE_TX_FLAGS_TSO;
2081 return 1;
2082}
2083
2084/**
2085 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2086 * @size: transmit request size in bytes
2087 *
2088 * Due to hardware alignment restrictions (4K alignment), we need to
2089 * assume that we can have no more than 12K of data per descriptor, even
2090 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2091 * Thus, we need to divide by 12K. But division is slow! Instead,
2092 * we decompose the operation into shifts and one relatively cheap
2093 * multiply operation.
2094 *
2095 * To divide by 12K, we first divide by 4K, then divide by 3:
2096 * To divide by 4K, shift right by 12 bits
2097 * To divide by 3, multiply by 85, then divide by 256
2098 * (Divide by 256 is done by shifting right by 8 bits)
2099 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2100 * 3, we'll underestimate near each multiple of 12K. This is actually more
2101 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2102 * segment. For our purposes this is accurate out to 1M which is orders of
2103 * magnitude greater than our largest possible GSO size.
2104 *
2105 * This would then be implemented as:
2106 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2107 *
2108 * Since multiplication and division are commutative, we can reorder
2109 * operations into:
2110 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2111 */
2112static unsigned int ice_txd_use_count(unsigned int size)
2113{
2114 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2115}
2116
2117/**
2118 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2119 * @skb: send buffer
2120 *
2121 * Returns number of data descriptors needed for this skb.
2122 */
2123static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2124{
2125 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2126 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2127 unsigned int count = 0, size = skb_headlen(skb);
2128
2129 for (;;) {
2130 count += ice_txd_use_count(size);
2131
2132 if (!nr_frags--)
2133 break;
2134
2135 size = skb_frag_size(frag++);
2136 }
2137
2138 return count;
2139}
2140
2141/**
2142 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2143 * @skb: send buffer
2144 *
2145 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2146 * and so we need to figure out the cases where we need to linearize the skb.
2147 *
2148 * For TSO we need to count the TSO header and segment payload separately.
2149 * As such we need to check cases where we have 7 fragments or more as we
2150 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2151 * the segment payload in the first descriptor, and another 7 for the
2152 * fragments.
2153 */
2154static bool __ice_chk_linearize(struct sk_buff *skb)
2155{
2156 const skb_frag_t *frag, *stale;
2157 int nr_frags, sum;
2158
2159 /* no need to check if number of frags is less than 7 */
2160 nr_frags = skb_shinfo(skb)->nr_frags;
2161 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2162 return false;
2163
2164 /* We need to walk through the list and validate that each group
2165 * of 6 fragments totals at least gso_size.
2166 */
2167 nr_frags -= ICE_MAX_BUF_TXD - 2;
2168 frag = &skb_shinfo(skb)->frags[0];
2169
2170 /* Initialize size to the negative value of gso_size minus 1. We
2171 * use this as the worst case scenario in which the frag ahead
2172 * of us only provides one byte which is why we are limited to 6
2173 * descriptors for a single transmit as the header and previous
2174 * fragment are already consuming 2 descriptors.
2175 */
2176 sum = 1 - skb_shinfo(skb)->gso_size;
2177
2178 /* Add size of frags 0 through 4 to create our initial sum */
2179 sum += skb_frag_size(frag++);
2180 sum += skb_frag_size(frag++);
2181 sum += skb_frag_size(frag++);
2182 sum += skb_frag_size(frag++);
2183 sum += skb_frag_size(frag++);
2184
2185 /* Walk through fragments adding latest fragment, testing it, and
2186 * then removing stale fragments from the sum.
2187 */
2188 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2189 int stale_size = skb_frag_size(stale);
2190
2191 sum += skb_frag_size(frag++);
2192
2193 /* The stale fragment may present us with a smaller
2194 * descriptor than the actual fragment size. To account
2195 * for that we need to remove all the data on the front and
2196 * figure out what the remainder would be in the last
2197 * descriptor associated with the fragment.
2198 */
2199 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2200 int align_pad = -(skb_frag_off(stale)) &
2201 (ICE_MAX_READ_REQ_SIZE - 1);
2202
2203 sum -= align_pad;
2204 stale_size -= align_pad;
2205
2206 do {
2207 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2208 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2209 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2210 }
2211
2212 /* if sum is negative we failed to make sufficient progress */
2213 if (sum < 0)
2214 return true;
2215
2216 if (!nr_frags--)
2217 break;
2218
2219 sum -= stale_size;
2220 }
2221
2222 return false;
2223}
2224
2225/**
2226 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2227 * @skb: send buffer
2228 * @count: number of buffers used
2229 *
2230 * Note: Our HW can't scatter-gather more than 8 fragments to build
2231 * a packet on the wire and so we need to figure out the cases where we
2232 * need to linearize the skb.
2233 */
2234static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2235{
2236 /* Both TSO and single send will work if count is less than 8 */
2237 if (likely(count < ICE_MAX_BUF_TXD))
2238 return false;
2239
2240 if (skb_is_gso(skb))
2241 return __ice_chk_linearize(skb);
2242
2243 /* we can support up to 8 data buffers for a single send */
2244 return count != ICE_MAX_BUF_TXD;
2245}
2246
2247/**
2248 * ice_tstamp - set up context descriptor for hardware timestamp
2249 * @tx_ring: pointer to the Tx ring to send buffer on
2250 * @skb: pointer to the SKB we're sending
2251 * @first: Tx buffer
2252 * @off: Tx offload parameters
2253 */
2254static void
2255ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2256 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2257{
2258 s8 idx;
2259
2260 /* only timestamp the outbound packet if the user has requested it */
2261 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2262 return;
2263
2264 if (!tx_ring->ptp_tx)
2265 return;
2266
2267 /* Tx timestamps cannot be sampled when doing TSO */
2268 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2269 return;
2270
2271 /* Grab an open timestamp slot */
2272 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2273 if (idx < 0) {
2274 tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
2275 return;
2276 }
2277
2278 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2279 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2280 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2281 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2282}
2283
2284/**
2285 * ice_xmit_frame_ring - Sends buffer on Tx ring
2286 * @skb: send buffer
2287 * @tx_ring: ring to send buffer on
2288 *
2289 * Returns NETDEV_TX_OK if sent, else an error code
2290 */
2291static netdev_tx_t
2292ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2293{
2294 struct ice_tx_offload_params offload = { 0 };
2295 struct ice_vsi *vsi = tx_ring->vsi;
2296 struct ice_tx_buf *first;
2297 struct ethhdr *eth;
2298 unsigned int count;
2299 int tso, csum;
2300
2301 ice_trace(xmit_frame_ring, tx_ring, skb);
2302
2303 count = ice_xmit_desc_count(skb);
2304 if (ice_chk_linearize(skb, count)) {
2305 if (__skb_linearize(skb))
2306 goto out_drop;
2307 count = ice_txd_use_count(skb->len);
2308 tx_ring->ring_stats->tx_stats.tx_linearize++;
2309 }
2310
2311 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2312 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2313 * + 4 desc gap to avoid the cache line where head is,
2314 * + 1 desc for context descriptor,
2315 * otherwise try next time
2316 */
2317 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2318 ICE_DESCS_FOR_CTX_DESC)) {
2319 tx_ring->ring_stats->tx_stats.tx_busy++;
2320 return NETDEV_TX_BUSY;
2321 }
2322
2323 /* prefetch for bql data which is infrequently used */
2324 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2325
2326 offload.tx_ring = tx_ring;
2327
2328 /* record the location of the first descriptor for this packet */
2329 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2330 first->skb = skb;
2331 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2332 first->gso_segs = 1;
2333 first->tx_flags = 0;
2334
2335 /* prepare the VLAN tagging flags for Tx */
2336 ice_tx_prepare_vlan_flags(tx_ring, first);
2337 if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2338 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2339 (ICE_TX_CTX_DESC_IL2TAG2 <<
2340 ICE_TXD_CTX_QW1_CMD_S));
2341 offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
2342 ICE_TX_FLAGS_VLAN_S;
2343 }
2344
2345 /* set up TSO offload */
2346 tso = ice_tso(first, &offload);
2347 if (tso < 0)
2348 goto out_drop;
2349
2350 /* always set up Tx checksum offload */
2351 csum = ice_tx_csum(first, &offload);
2352 if (csum < 0)
2353 goto out_drop;
2354
2355 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2356 eth = (struct ethhdr *)skb_mac_header(skb);
2357 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2358 eth->h_proto == htons(ETH_P_LLDP)) &&
2359 vsi->type == ICE_VSI_PF &&
2360 vsi->port_info->qos_cfg.is_sw_lldp))
2361 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2362 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2363 ICE_TXD_CTX_QW1_CMD_S);
2364
2365 ice_tstamp(tx_ring, skb, first, &offload);
2366 if (ice_is_switchdev_running(vsi->back))
2367 ice_eswitch_set_target_vsi(skb, &offload);
2368
2369 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2370 struct ice_tx_ctx_desc *cdesc;
2371 u16 i = tx_ring->next_to_use;
2372
2373 /* grab the next descriptor */
2374 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2375 i++;
2376 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2377
2378 /* setup context descriptor */
2379 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2380 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2381 cdesc->rsvd = cpu_to_le16(0);
2382 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2383 }
2384
2385 ice_tx_map(tx_ring, first, &offload);
2386 return NETDEV_TX_OK;
2387
2388out_drop:
2389 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2390 dev_kfree_skb_any(skb);
2391 return NETDEV_TX_OK;
2392}
2393
2394/**
2395 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2396 * @skb: send buffer
2397 * @netdev: network interface device structure
2398 *
2399 * Returns NETDEV_TX_OK if sent, else an error code
2400 */
2401netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2402{
2403 struct ice_netdev_priv *np = netdev_priv(netdev);
2404 struct ice_vsi *vsi = np->vsi;
2405 struct ice_tx_ring *tx_ring;
2406
2407 tx_ring = vsi->tx_rings[skb->queue_mapping];
2408
2409 /* hardware can't handle really short frames, hardware padding works
2410 * beyond this point
2411 */
2412 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2413 return NETDEV_TX_OK;
2414
2415 return ice_xmit_frame_ring(skb, tx_ring);
2416}
2417
2418/**
2419 * ice_get_dscp_up - return the UP/TC value for a SKB
2420 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2421 * @skb: SKB to query for info to determine UP/TC
2422 *
2423 * This function is to only be called when the PF is in L3 DSCP PFC mode
2424 */
2425static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2426{
2427 u8 dscp = 0;
2428
2429 if (skb->protocol == htons(ETH_P_IP))
2430 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2431 else if (skb->protocol == htons(ETH_P_IPV6))
2432 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2433
2434 return dcbcfg->dscp_map[dscp];
2435}
2436
2437u16
2438ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2439 struct net_device *sb_dev)
2440{
2441 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2442 struct ice_dcbx_cfg *dcbcfg;
2443
2444 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2445 if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2446 skb->priority = ice_get_dscp_up(dcbcfg, skb);
2447
2448 return netdev_pick_tx(netdev, skb, sb_dev);
2449}
2450
2451/**
2452 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2453 * @tx_ring: tx_ring to clean
2454 */
2455void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2456{
2457 struct ice_vsi *vsi = tx_ring->vsi;
2458 s16 i = tx_ring->next_to_clean;
2459 int budget = ICE_DFLT_IRQ_WORK;
2460 struct ice_tx_desc *tx_desc;
2461 struct ice_tx_buf *tx_buf;
2462
2463 tx_buf = &tx_ring->tx_buf[i];
2464 tx_desc = ICE_TX_DESC(tx_ring, i);
2465 i -= tx_ring->count;
2466
2467 do {
2468 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2469
2470 /* if next_to_watch is not set then there is no pending work */
2471 if (!eop_desc)
2472 break;
2473
2474 /* prevent any other reads prior to eop_desc */
2475 smp_rmb();
2476
2477 /* if the descriptor isn't done, no work to do */
2478 if (!(eop_desc->cmd_type_offset_bsz &
2479 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2480 break;
2481
2482 /* clear next_to_watch to prevent false hangs */
2483 tx_buf->next_to_watch = NULL;
2484 tx_desc->buf_addr = 0;
2485 tx_desc->cmd_type_offset_bsz = 0;
2486
2487 /* move past filter desc */
2488 tx_buf++;
2489 tx_desc++;
2490 i++;
2491 if (unlikely(!i)) {
2492 i -= tx_ring->count;
2493 tx_buf = tx_ring->tx_buf;
2494 tx_desc = ICE_TX_DESC(tx_ring, 0);
2495 }
2496
2497 /* unmap the data header */
2498 if (dma_unmap_len(tx_buf, len))
2499 dma_unmap_single(tx_ring->dev,
2500 dma_unmap_addr(tx_buf, dma),
2501 dma_unmap_len(tx_buf, len),
2502 DMA_TO_DEVICE);
2503 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2504 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2505
2506 /* clear next_to_watch to prevent false hangs */
2507 tx_buf->raw_buf = NULL;
2508 tx_buf->tx_flags = 0;
2509 tx_buf->next_to_watch = NULL;
2510 dma_unmap_len_set(tx_buf, len, 0);
2511 tx_desc->buf_addr = 0;
2512 tx_desc->cmd_type_offset_bsz = 0;
2513
2514 /* move past eop_desc for start of next FD desc */
2515 tx_buf++;
2516 tx_desc++;
2517 i++;
2518 if (unlikely(!i)) {
2519 i -= tx_ring->count;
2520 tx_buf = tx_ring->tx_buf;
2521 tx_desc = ICE_TX_DESC(tx_ring, 0);
2522 }
2523
2524 budget--;
2525 } while (likely(budget));
2526
2527 i += tx_ring->count;
2528 tx_ring->next_to_clean = i;
2529
2530 /* re-enable interrupt if needed */
2531 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2532}