Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2018 Intel Corporation. */
  3
  4#include <linux/bpf_trace.h>
  5#include <net/xdp_sock_drv.h>
  6#include <net/xdp.h>
  7
  8#include "ixgbe.h"
  9#include "ixgbe_txrx_common.h"
 10
 11struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
 12				     struct ixgbe_ring *ring)
 13{
 14	bool xdp_on = READ_ONCE(adapter->xdp_prog);
 15	int qid = ring->ring_idx;
 16
 17	if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
 18		return NULL;
 19
 20	return xsk_get_pool_from_qid(adapter->netdev, qid);
 21}
 22
 23static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
 24				 struct xsk_buff_pool *pool,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25				 u16 qid)
 26{
 27	struct net_device *netdev = adapter->netdev;
 
 28	bool if_running;
 29	int err;
 30
 31	if (qid >= adapter->num_rx_queues)
 32		return -EINVAL;
 33
 34	if (qid >= netdev->real_num_rx_queues ||
 35	    qid >= netdev->real_num_tx_queues)
 36		return -EINVAL;
 37
 38	err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
 
 
 
 
 
 
 39	if (err)
 40		return err;
 41
 42	if_running = netif_running(adapter->netdev) &&
 43		     ixgbe_enabled_xdp_adapter(adapter);
 44
 45	if (if_running)
 46		ixgbe_txrx_ring_disable(adapter, qid);
 47
 48	set_bit(qid, adapter->af_xdp_zc_qps);
 49
 50	if (if_running) {
 51		ixgbe_txrx_ring_enable(adapter, qid);
 52
 53		/* Kick start the NAPI context so that receiving will start */
 54		err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
 55		if (err) {
 56			clear_bit(qid, adapter->af_xdp_zc_qps);
 57			xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
 58			return err;
 59		}
 60	}
 61
 62	return 0;
 63}
 64
 65static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
 66{
 67	struct xsk_buff_pool *pool;
 68	bool if_running;
 69
 70	pool = xsk_get_pool_from_qid(adapter->netdev, qid);
 71	if (!pool)
 72		return -EINVAL;
 73
 74	if_running = netif_running(adapter->netdev) &&
 75		     ixgbe_enabled_xdp_adapter(adapter);
 76
 77	if (if_running)
 78		ixgbe_txrx_ring_disable(adapter, qid);
 79
 80	clear_bit(qid, adapter->af_xdp_zc_qps);
 81	xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
 82
 83	if (if_running)
 84		ixgbe_txrx_ring_enable(adapter, qid);
 85
 86	return 0;
 87}
 88
 89int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
 90			 struct xsk_buff_pool *pool,
 91			 u16 qid)
 92{
 93	return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
 94		ixgbe_xsk_pool_disable(adapter, qid);
 95}
 96
 97static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
 98			    struct ixgbe_ring *rx_ring,
 99			    struct xdp_buff *xdp)
100{
 
101	int err, result = IXGBE_XDP_PASS;
102	struct bpf_prog *xdp_prog;
103	struct ixgbe_ring *ring;
104	struct xdp_frame *xdpf;
 
105	u32 act;
106
 
107	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108	act = bpf_prog_run_xdp(xdp_prog, xdp);
 
109
110	if (likely(act == XDP_REDIRECT)) {
111		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
112		if (!err)
113			return IXGBE_XDP_REDIR;
114		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
115			result = IXGBE_XDP_EXIT;
116		else
117			result = IXGBE_XDP_CONSUMED;
118		goto out_failure;
119	}
120
121	switch (act) {
122	case XDP_PASS:
123		break;
124	case XDP_TX:
125		xdpf = xdp_convert_buff_to_frame(xdp);
126		if (unlikely(!xdpf))
127			goto out_failure;
128		ring = ixgbe_determine_xdp_ring(adapter);
129		if (static_branch_unlikely(&ixgbe_xdp_locking_key))
130			spin_lock(&ring->tx_lock);
131		result = ixgbe_xmit_xdp_ring(ring, xdpf);
132		if (static_branch_unlikely(&ixgbe_xdp_locking_key))
133			spin_unlock(&ring->tx_lock);
134		if (result == IXGBE_XDP_CONSUMED)
135			goto out_failure;
136		break;
137	case XDP_DROP:
138		result = IXGBE_XDP_CONSUMED;
 
139		break;
140	default:
141		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
142		fallthrough;
143	case XDP_ABORTED:
144		result = IXGBE_XDP_CONSUMED;
145out_failure:
146		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 
 
 
 
147	}
 
148	return result;
149}
150
151bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152{
153	union ixgbe_adv_rx_desc *rx_desc;
154	struct ixgbe_rx_buffer *bi;
155	u16 i = rx_ring->next_to_use;
156	dma_addr_t dma;
157	bool ok = true;
158
159	/* nothing to do */
160	if (!count)
161		return true;
162
163	rx_desc = IXGBE_RX_DESC(rx_ring, i);
164	bi = &rx_ring->rx_buffer_info[i];
165	i -= rx_ring->count;
166
167	do {
168		bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
169		if (!bi->xdp) {
170			ok = false;
171			break;
172		}
173
174		dma = xsk_buff_xdp_get_dma(bi->xdp);
 
 
 
 
175
176		/* Refresh the desc even if buffer_addrs didn't change
177		 * because each write-back erases this info.
178		 */
179		rx_desc->read.pkt_addr = cpu_to_le64(dma);
180
181		rx_desc++;
182		bi++;
183		i++;
184		if (unlikely(!i)) {
185			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
186			bi = rx_ring->rx_buffer_info;
187			i -= rx_ring->count;
188		}
189
190		/* clear the length for the next_to_use descriptor */
191		rx_desc->wb.upper.length = 0;
192
193		count--;
194	} while (count);
195
196	i += rx_ring->count;
197
198	if (rx_ring->next_to_use != i) {
199		rx_ring->next_to_use = i;
200
 
 
 
201		/* Force memory writes to complete before letting h/w
202		 * know there are new descriptors to fetch.  (Only
203		 * applicable for weak-ordered memory model archs,
204		 * such as IA-64).
205		 */
206		wmb();
207		writel(i, rx_ring->tail);
208	}
209
210	return ok;
211}
212
 
 
 
 
 
 
 
 
 
 
 
 
 
213static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
214					      const struct xdp_buff *xdp)
 
215{
216	unsigned int totalsize = xdp->data_end - xdp->data_meta;
217	unsigned int metasize = xdp->data - xdp->data_meta;
 
218	struct sk_buff *skb;
219
220	net_prefetch(xdp->data_meta);
221
222	/* allocate a skb to store the frags */
223	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
 
224			       GFP_ATOMIC | __GFP_NOWARN);
225	if (unlikely(!skb))
226		return NULL;
227
228	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
229	       ALIGN(totalsize, sizeof(long)));
230
231	if (metasize) {
232		skb_metadata_set(skb, metasize);
233		__skb_pull(skb, metasize);
234	}
235
 
236	return skb;
237}
238
239static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
240{
241	u32 ntc = rx_ring->next_to_clean + 1;
242
243	ntc = (ntc < rx_ring->count) ? ntc : 0;
244	rx_ring->next_to_clean = ntc;
245	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
246}
247
248int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
249			  struct ixgbe_ring *rx_ring,
250			  const int budget)
251{
252	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
253	struct ixgbe_adapter *adapter = q_vector->adapter;
254	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
255	unsigned int xdp_res, xdp_xmit = 0;
256	bool failure = false;
257	struct sk_buff *skb;
 
 
 
258
259	while (likely(total_rx_packets < budget)) {
260		union ixgbe_adv_rx_desc *rx_desc;
261		struct ixgbe_rx_buffer *bi;
262		unsigned int size;
263
264		/* return some buffers to hardware, one at a time is too slow */
265		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
266			failure = failure ||
267				  !ixgbe_alloc_rx_buffers_zc(rx_ring,
268							     cleaned_count);
269			cleaned_count = 0;
270		}
271
272		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
273		size = le16_to_cpu(rx_desc->wb.upper.length);
274		if (!size)
275			break;
276
277		/* This memory barrier is needed to keep us from reading
278		 * any other fields out of the rx_desc until we know the
279		 * descriptor has been written back
280		 */
281		dma_rmb();
282
283		bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
284
285		if (unlikely(!ixgbe_test_staterr(rx_desc,
286						 IXGBE_RXD_STAT_EOP))) {
287			struct ixgbe_rx_buffer *next_bi;
288
289			xsk_buff_free(bi->xdp);
290			bi->xdp = NULL;
291			ixgbe_inc_ntc(rx_ring);
292			next_bi =
293			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
294			next_bi->discard = true;
295			continue;
296		}
297
298		if (unlikely(bi->discard)) {
299			xsk_buff_free(bi->xdp);
300			bi->xdp = NULL;
301			bi->discard = false;
302			ixgbe_inc_ntc(rx_ring);
303			continue;
304		}
305
306		bi->xdp->data_end = bi->xdp->data + size;
307		xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
308		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
309
310		if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
311			xdp_xmit |= xdp_res;
312		} else if (xdp_res == IXGBE_XDP_EXIT) {
313			failure = true;
314			break;
315		} else if (xdp_res == IXGBE_XDP_CONSUMED) {
316			xsk_buff_free(bi->xdp);
317		} else if (xdp_res == IXGBE_XDP_PASS) {
318			goto construct_skb;
319		}
320
321		bi->xdp = NULL;
322		total_rx_packets++;
323		total_rx_bytes += size;
324
325		cleaned_count++;
326		ixgbe_inc_ntc(rx_ring);
327		continue;
 
328
329construct_skb:
330		/* XDP_PASS path */
331		skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
332		if (!skb) {
333			rx_ring->rx_stats.alloc_rx_buff_failed++;
334			break;
335		}
336
337		xsk_buff_free(bi->xdp);
338		bi->xdp = NULL;
339
340		cleaned_count++;
341		ixgbe_inc_ntc(rx_ring);
342
343		if (eth_skb_pad(skb))
344			continue;
345
346		total_rx_bytes += skb->len;
347		total_rx_packets++;
348
349		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
350		ixgbe_rx_skb(q_vector, skb);
351	}
352
353	if (xdp_xmit & IXGBE_XDP_REDIR)
354		xdp_do_flush();
355
356	if (xdp_xmit & IXGBE_XDP_TX) {
357		struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
358
359		ixgbe_xdp_ring_update_tail_locked(ring);
 
 
 
 
360	}
361
362	u64_stats_update_begin(&rx_ring->syncp);
363	rx_ring->stats.packets += total_rx_packets;
364	rx_ring->stats.bytes += total_rx_bytes;
365	u64_stats_update_end(&rx_ring->syncp);
366	q_vector->rx.total_packets += total_rx_packets;
367	q_vector->rx.total_bytes += total_rx_bytes;
368
369	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
370		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
371			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
372		else
373			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
374
375		return (int)total_rx_packets;
376	}
377	return failure ? budget : (int)total_rx_packets;
378}
379
380void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
381{
382	struct ixgbe_rx_buffer *bi;
383	u16 i;
384
385	for (i = 0; i < rx_ring->count; i++) {
386		bi = &rx_ring->rx_buffer_info[i];
387
388		if (!bi->xdp)
389			continue;
390
391		xsk_buff_free(bi->xdp);
392		bi->xdp = NULL;
 
 
 
393	}
394}
395
396static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
397{
398	struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
399	union ixgbe_adv_tx_desc *tx_desc = NULL;
400	struct ixgbe_tx_buffer *tx_bi;
401	bool work_done = true;
402	struct xdp_desc desc;
403	dma_addr_t dma;
404	u32 cmd_type;
405
406	while (budget-- > 0) {
407		if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
 
408			work_done = false;
409			break;
410		}
411
412		if (!netif_carrier_ok(xdp_ring->netdev))
413			break;
414
415		if (!xsk_tx_peek_desc(pool, &desc))
416			break;
417
418		dma = xsk_buff_raw_get_dma(pool, desc.addr);
419		xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
420
421		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
422		tx_bi->bytecount = desc.len;
423		tx_bi->xdpf = NULL;
424		tx_bi->gso_segs = 1;
425
426		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
427		tx_desc->read.buffer_addr = cpu_to_le64(dma);
428
429		/* put descriptor type bits */
430		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
431			   IXGBE_ADVTXD_DCMD_DEXT |
432			   IXGBE_ADVTXD_DCMD_IFCS;
433		cmd_type |= desc.len | IXGBE_TXD_CMD;
434		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
435		tx_desc->read.olinfo_status =
436			cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
437
438		xdp_ring->next_to_use++;
439		if (xdp_ring->next_to_use == xdp_ring->count)
440			xdp_ring->next_to_use = 0;
441	}
442
443	if (tx_desc) {
444		ixgbe_xdp_ring_update_tail(xdp_ring);
445		xsk_tx_release(pool);
446	}
447
448	return !!budget && work_done;
449}
450
451static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
452				      struct ixgbe_tx_buffer *tx_bi)
453{
454	xdp_return_frame(tx_bi->xdpf);
455	dma_unmap_single(tx_ring->dev,
456			 dma_unmap_addr(tx_bi, dma),
457			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
458	dma_unmap_len_set(tx_bi, len, 0);
459}
460
461bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
462			    struct ixgbe_ring *tx_ring, int napi_budget)
463{
464	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
465	unsigned int total_packets = 0, total_bytes = 0;
466	struct xsk_buff_pool *pool = tx_ring->xsk_pool;
467	union ixgbe_adv_tx_desc *tx_desc;
468	struct ixgbe_tx_buffer *tx_bi;
469	u32 xsk_frames = 0;
470
471	tx_bi = &tx_ring->tx_buffer_info[ntc];
472	tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
473
474	while (ntc != ntu) {
475		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
476			break;
477
478		total_bytes += tx_bi->bytecount;
479		total_packets += tx_bi->gso_segs;
480
481		if (tx_bi->xdpf)
482			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
483		else
484			xsk_frames++;
485
486		tx_bi->xdpf = NULL;
487
488		tx_bi++;
489		tx_desc++;
490		ntc++;
491		if (unlikely(ntc == tx_ring->count)) {
492			ntc = 0;
493			tx_bi = tx_ring->tx_buffer_info;
494			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
495		}
496
497		/* issue prefetch for next Tx descriptor */
498		prefetch(tx_desc);
499	}
500
501	tx_ring->next_to_clean = ntc;
502
503	u64_stats_update_begin(&tx_ring->syncp);
504	tx_ring->stats.bytes += total_bytes;
505	tx_ring->stats.packets += total_packets;
506	u64_stats_update_end(&tx_ring->syncp);
507	q_vector->tx.total_bytes += total_bytes;
508	q_vector->tx.total_packets += total_packets;
509
510	if (xsk_frames)
511		xsk_tx_completed(pool, xsk_frames);
512
513	if (xsk_uses_need_wakeup(pool))
514		xsk_set_tx_need_wakeup(pool);
515
516	return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
517}
518
519int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
520{
521	struct ixgbe_adapter *adapter = netdev_priv(dev);
522	struct ixgbe_ring *ring;
523
524	if (test_bit(__IXGBE_DOWN, &adapter->state))
525		return -ENETDOWN;
526
527	if (!READ_ONCE(adapter->xdp_prog))
528		return -EINVAL;
529
530	if (qid >= adapter->num_xdp_queues)
531		return -EINVAL;
532
533	ring = adapter->xdp_ring[qid];
534
535	if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
536		return -ENETDOWN;
537
538	if (!ring->xsk_pool)
539		return -EINVAL;
540
 
541	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
542		u64 eics = BIT_ULL(ring->q_vector->v_idx);
543
544		ixgbe_irq_rearm_queues(adapter, eics);
545	}
546
547	return 0;
548}
549
550void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
551{
552	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
553	struct xsk_buff_pool *pool = tx_ring->xsk_pool;
554	struct ixgbe_tx_buffer *tx_bi;
555	u32 xsk_frames = 0;
556
557	while (ntc != ntu) {
558		tx_bi = &tx_ring->tx_buffer_info[ntc];
559
560		if (tx_bi->xdpf)
561			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
562		else
563			xsk_frames++;
564
565		tx_bi->xdpf = NULL;
566
567		ntc++;
568		if (ntc == tx_ring->count)
569			ntc = 0;
570	}
571
572	if (xsk_frames)
573		xsk_tx_completed(pool, xsk_frames);
574}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2018 Intel Corporation. */
  3
  4#include <linux/bpf_trace.h>
  5#include <net/xdp_sock.h>
  6#include <net/xdp.h>
  7
  8#include "ixgbe.h"
  9#include "ixgbe_txrx_common.h"
 10
 11struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
 12				struct ixgbe_ring *ring)
 13{
 14	bool xdp_on = READ_ONCE(adapter->xdp_prog);
 15	int qid = ring->ring_idx;
 16
 17	if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
 18		return NULL;
 19
 20	return xdp_get_umem_from_qid(adapter->netdev, qid);
 21}
 22
 23static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
 24				  struct xdp_umem *umem)
 25{
 26	struct device *dev = &adapter->pdev->dev;
 27	unsigned int i, j;
 28	dma_addr_t dma;
 29
 30	for (i = 0; i < umem->npgs; i++) {
 31		dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
 32					 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
 33		if (dma_mapping_error(dev, dma))
 34			goto out_unmap;
 35
 36		umem->pages[i].dma = dma;
 37	}
 38
 39	return 0;
 40
 41out_unmap:
 42	for (j = 0; j < i; j++) {
 43		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
 44				     DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
 45		umem->pages[i].dma = 0;
 46	}
 47
 48	return -1;
 49}
 50
 51static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
 52				     struct xdp_umem *umem)
 53{
 54	struct device *dev = &adapter->pdev->dev;
 55	unsigned int i;
 56
 57	for (i = 0; i < umem->npgs; i++) {
 58		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
 59				     DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
 60
 61		umem->pages[i].dma = 0;
 62	}
 63}
 64
 65static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
 66				 struct xdp_umem *umem,
 67				 u16 qid)
 68{
 69	struct net_device *netdev = adapter->netdev;
 70	struct xdp_umem_fq_reuse *reuseq;
 71	bool if_running;
 72	int err;
 73
 74	if (qid >= adapter->num_rx_queues)
 75		return -EINVAL;
 76
 77	if (qid >= netdev->real_num_rx_queues ||
 78	    qid >= netdev->real_num_tx_queues)
 79		return -EINVAL;
 80
 81	reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
 82	if (!reuseq)
 83		return -ENOMEM;
 84
 85	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
 86
 87	err = ixgbe_xsk_umem_dma_map(adapter, umem);
 88	if (err)
 89		return err;
 90
 91	if_running = netif_running(adapter->netdev) &&
 92		     ixgbe_enabled_xdp_adapter(adapter);
 93
 94	if (if_running)
 95		ixgbe_txrx_ring_disable(adapter, qid);
 96
 97	set_bit(qid, adapter->af_xdp_zc_qps);
 98
 99	if (if_running) {
100		ixgbe_txrx_ring_enable(adapter, qid);
101
102		/* Kick start the NAPI context so that receiving will start */
103		err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
104		if (err)
 
 
105			return err;
 
106	}
107
108	return 0;
109}
110
111static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
112{
113	struct xdp_umem *umem;
114	bool if_running;
115
116	umem = xdp_get_umem_from_qid(adapter->netdev, qid);
117	if (!umem)
118		return -EINVAL;
119
120	if_running = netif_running(adapter->netdev) &&
121		     ixgbe_enabled_xdp_adapter(adapter);
122
123	if (if_running)
124		ixgbe_txrx_ring_disable(adapter, qid);
125
126	clear_bit(qid, adapter->af_xdp_zc_qps);
127	ixgbe_xsk_umem_dma_unmap(adapter, umem);
128
129	if (if_running)
130		ixgbe_txrx_ring_enable(adapter, qid);
131
132	return 0;
133}
134
135int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
 
136			 u16 qid)
137{
138	return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
139		ixgbe_xsk_umem_disable(adapter, qid);
140}
141
142static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
143			    struct ixgbe_ring *rx_ring,
144			    struct xdp_buff *xdp)
145{
146	struct xdp_umem *umem = rx_ring->xsk_umem;
147	int err, result = IXGBE_XDP_PASS;
148	struct bpf_prog *xdp_prog;
 
149	struct xdp_frame *xdpf;
150	u64 offset;
151	u32 act;
152
153	rcu_read_lock();
154	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
155	act = bpf_prog_run_xdp(xdp_prog, xdp);
156	offset = xdp->data - xdp->data_hard_start;
157
158	xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
 
 
 
 
 
 
 
 
 
159
160	switch (act) {
161	case XDP_PASS:
162		break;
163	case XDP_TX:
164		xdpf = convert_to_xdp_frame(xdp);
165		if (unlikely(!xdpf)) {
166			result = IXGBE_XDP_CONSUMED;
167			break;
168		}
169		result = ixgbe_xmit_xdp_ring(adapter, xdpf);
 
 
 
 
 
170		break;
171	case XDP_REDIRECT:
172		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
173		result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
174		break;
175	default:
176		bpf_warn_invalid_xdp_action(act);
177		/* fallthrough */
178	case XDP_ABORTED:
 
 
179		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
180		/* fallthrough -- handle aborts by dropping packet */
181	case XDP_DROP:
182		result = IXGBE_XDP_CONSUMED;
183		break;
184	}
185	rcu_read_unlock();
186	return result;
187}
188
189static struct
190ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
191					unsigned int size)
192{
193	struct ixgbe_rx_buffer *bi;
194
195	bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
196
197	/* we are reusing so sync this buffer for CPU use */
198	dma_sync_single_range_for_cpu(rx_ring->dev,
199				      bi->dma, 0,
200				      size,
201				      DMA_BIDIRECTIONAL);
202
203	return bi;
204}
205
206static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
207				     struct ixgbe_rx_buffer *obi)
208{
209	u16 nta = rx_ring->next_to_alloc;
210	struct ixgbe_rx_buffer *nbi;
211
212	nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
213	/* update, and store next to alloc */
214	nta++;
215	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
216
217	/* transfer page from old buffer to new buffer */
218	nbi->dma = obi->dma;
219	nbi->addr = obi->addr;
220	nbi->handle = obi->handle;
221
222	obi->addr = NULL;
223	obi->skb = NULL;
224}
225
226void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
227{
228	struct ixgbe_rx_buffer *bi;
229	struct ixgbe_ring *rx_ring;
230	u64 hr, mask;
231	u16 nta;
232
233	rx_ring = container_of(alloc, struct ixgbe_ring, zca);
234	hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
235	mask = rx_ring->xsk_umem->chunk_mask;
236
237	nta = rx_ring->next_to_alloc;
238	bi = rx_ring->rx_buffer_info;
239
240	nta++;
241	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
242
243	handle &= mask;
244
245	bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
246	bi->dma += hr;
247
248	bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
249	bi->addr += hr;
250
251	bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
252					    rx_ring->xsk_umem->headroom);
253}
254
255static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
256				  struct ixgbe_rx_buffer *bi)
257{
258	struct xdp_umem *umem = rx_ring->xsk_umem;
259	void *addr = bi->addr;
260	u64 handle, hr;
261
262	if (addr)
263		return true;
264
265	if (!xsk_umem_peek_addr(umem, &handle)) {
266		rx_ring->rx_stats.alloc_rx_page_failed++;
267		return false;
268	}
269
270	hr = umem->headroom + XDP_PACKET_HEADROOM;
271
272	bi->dma = xdp_umem_get_dma(umem, handle);
273	bi->dma += hr;
274
275	bi->addr = xdp_umem_get_data(umem, handle);
276	bi->addr += hr;
277
278	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
279
280	xsk_umem_discard_addr(umem);
281	return true;
282}
283
284static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
285				       struct ixgbe_rx_buffer *bi)
286{
287	struct xdp_umem *umem = rx_ring->xsk_umem;
288	u64 handle, hr;
289
290	if (!xsk_umem_peek_addr_rq(umem, &handle)) {
291		rx_ring->rx_stats.alloc_rx_page_failed++;
292		return false;
293	}
294
295	handle &= rx_ring->xsk_umem->chunk_mask;
296
297	hr = umem->headroom + XDP_PACKET_HEADROOM;
298
299	bi->dma = xdp_umem_get_dma(umem, handle);
300	bi->dma += hr;
301
302	bi->addr = xdp_umem_get_data(umem, handle);
303	bi->addr += hr;
304
305	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
306
307	xsk_umem_discard_addr_rq(umem);
308	return true;
309}
310
311static __always_inline bool
312__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
313			    bool alloc(struct ixgbe_ring *rx_ring,
314				       struct ixgbe_rx_buffer *bi))
315{
316	union ixgbe_adv_rx_desc *rx_desc;
317	struct ixgbe_rx_buffer *bi;
318	u16 i = rx_ring->next_to_use;
 
319	bool ok = true;
320
321	/* nothing to do */
322	if (!cleaned_count)
323		return true;
324
325	rx_desc = IXGBE_RX_DESC(rx_ring, i);
326	bi = &rx_ring->rx_buffer_info[i];
327	i -= rx_ring->count;
328
329	do {
330		if (!alloc(rx_ring, bi)) {
 
331			ok = false;
332			break;
333		}
334
335		/* sync the buffer for use by the device */
336		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
337						 bi->page_offset,
338						 rx_ring->rx_buf_len,
339						 DMA_BIDIRECTIONAL);
340
341		/* Refresh the desc even if buffer_addrs didn't change
342		 * because each write-back erases this info.
343		 */
344		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
345
346		rx_desc++;
347		bi++;
348		i++;
349		if (unlikely(!i)) {
350			rx_desc = IXGBE_RX_DESC(rx_ring, 0);
351			bi = rx_ring->rx_buffer_info;
352			i -= rx_ring->count;
353		}
354
355		/* clear the length for the next_to_use descriptor */
356		rx_desc->wb.upper.length = 0;
357
358		cleaned_count--;
359	} while (cleaned_count);
360
361	i += rx_ring->count;
362
363	if (rx_ring->next_to_use != i) {
364		rx_ring->next_to_use = i;
365
366		/* update next to alloc since we have filled the ring */
367		rx_ring->next_to_alloc = i;
368
369		/* Force memory writes to complete before letting h/w
370		 * know there are new descriptors to fetch.  (Only
371		 * applicable for weak-ordered memory model archs,
372		 * such as IA-64).
373		 */
374		wmb();
375		writel(i, rx_ring->tail);
376	}
377
378	return ok;
379}
380
381void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
382{
383	__ixgbe_alloc_rx_buffers_zc(rx_ring, count,
384				    ixgbe_alloc_buffer_slow_zc);
385}
386
387static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
388					   u16 count)
389{
390	return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
391					   ixgbe_alloc_buffer_zc);
392}
393
394static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
395					      struct ixgbe_rx_buffer *bi,
396					      struct xdp_buff *xdp)
397{
 
398	unsigned int metasize = xdp->data - xdp->data_meta;
399	unsigned int datasize = xdp->data_end - xdp->data;
400	struct sk_buff *skb;
401
 
 
402	/* allocate a skb to store the frags */
403	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
404			       xdp->data_end - xdp->data_hard_start,
405			       GFP_ATOMIC | __GFP_NOWARN);
406	if (unlikely(!skb))
407		return NULL;
408
409	skb_reserve(skb, xdp->data - xdp->data_hard_start);
410	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
411	if (metasize)
 
412		skb_metadata_set(skb, metasize);
 
 
413
414	ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
415	return skb;
416}
417
418static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
419{
420	u32 ntc = rx_ring->next_to_clean + 1;
421
422	ntc = (ntc < rx_ring->count) ? ntc : 0;
423	rx_ring->next_to_clean = ntc;
424	prefetch(IXGBE_RX_DESC(rx_ring, ntc));
425}
426
427int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
428			  struct ixgbe_ring *rx_ring,
429			  const int budget)
430{
431	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
432	struct ixgbe_adapter *adapter = q_vector->adapter;
433	u16 cleaned_count = ixgbe_desc_unused(rx_ring);
434	unsigned int xdp_res, xdp_xmit = 0;
435	bool failure = false;
436	struct sk_buff *skb;
437	struct xdp_buff xdp;
438
439	xdp.rxq = &rx_ring->xdp_rxq;
440
441	while (likely(total_rx_packets < budget)) {
442		union ixgbe_adv_rx_desc *rx_desc;
443		struct ixgbe_rx_buffer *bi;
444		unsigned int size;
445
446		/* return some buffers to hardware, one at a time is too slow */
447		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
448			failure = failure ||
449				  !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
450								 cleaned_count);
451			cleaned_count = 0;
452		}
453
454		rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
455		size = le16_to_cpu(rx_desc->wb.upper.length);
456		if (!size)
457			break;
458
459		/* This memory barrier is needed to keep us from reading
460		 * any other fields out of the rx_desc until we know the
461		 * descriptor has been written back
462		 */
463		dma_rmb();
464
465		bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
466
467		if (unlikely(!ixgbe_test_staterr(rx_desc,
468						 IXGBE_RXD_STAT_EOP))) {
469			struct ixgbe_rx_buffer *next_bi;
470
471			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
 
472			ixgbe_inc_ntc(rx_ring);
473			next_bi =
474			       &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
475			next_bi->skb = ERR_PTR(-EINVAL);
476			continue;
477		}
478
479		if (unlikely(bi->skb)) {
480			ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
 
 
481			ixgbe_inc_ntc(rx_ring);
482			continue;
483		}
484
485		xdp.data = bi->addr;
486		xdp.data_meta = xdp.data;
487		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
488		xdp.data_end = xdp.data + size;
489		xdp.handle = bi->handle;
490
491		xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
492
493		if (xdp_res) {
494			if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
495				xdp_xmit |= xdp_res;
496				bi->addr = NULL;
497				bi->skb = NULL;
498			} else {
499				ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
500			}
501			total_rx_packets++;
502			total_rx_bytes += size;
503
504			cleaned_count++;
505			ixgbe_inc_ntc(rx_ring);
506			continue;
507		}
508
 
509		/* XDP_PASS path */
510		skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
511		if (!skb) {
512			rx_ring->rx_stats.alloc_rx_buff_failed++;
513			break;
514		}
515
 
 
 
516		cleaned_count++;
517		ixgbe_inc_ntc(rx_ring);
518
519		if (eth_skb_pad(skb))
520			continue;
521
522		total_rx_bytes += skb->len;
523		total_rx_packets++;
524
525		ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
526		ixgbe_rx_skb(q_vector, skb);
527	}
528
529	if (xdp_xmit & IXGBE_XDP_REDIR)
530		xdp_do_flush_map();
531
532	if (xdp_xmit & IXGBE_XDP_TX) {
533		struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
534
535		/* Force memory writes to complete before letting h/w
536		 * know there are new descriptors to fetch.
537		 */
538		wmb();
539		writel(ring->next_to_use, ring->tail);
540	}
541
542	u64_stats_update_begin(&rx_ring->syncp);
543	rx_ring->stats.packets += total_rx_packets;
544	rx_ring->stats.bytes += total_rx_bytes;
545	u64_stats_update_end(&rx_ring->syncp);
546	q_vector->rx.total_packets += total_rx_packets;
547	q_vector->rx.total_bytes += total_rx_bytes;
548
549	if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
550		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
551			xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
552		else
553			xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
554
555		return (int)total_rx_packets;
556	}
557	return failure ? budget : (int)total_rx_packets;
558}
559
560void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
561{
562	u16 i = rx_ring->next_to_clean;
563	struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
 
 
 
564
565	while (i != rx_ring->next_to_alloc) {
566		xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
567		i++;
568		bi++;
569		if (i == rx_ring->count) {
570			i = 0;
571			bi = rx_ring->rx_buffer_info;
572		}
573	}
574}
575
576static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
577{
 
578	union ixgbe_adv_tx_desc *tx_desc = NULL;
579	struct ixgbe_tx_buffer *tx_bi;
580	bool work_done = true;
581	struct xdp_desc desc;
582	dma_addr_t dma;
583	u32 cmd_type;
584
585	while (budget-- > 0) {
586		if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
587		    !netif_carrier_ok(xdp_ring->netdev)) {
588			work_done = false;
589			break;
590		}
591
592		if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
593			break;
594
595		dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
 
596
597		dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
598					   DMA_BIDIRECTIONAL);
599
600		tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
601		tx_bi->bytecount = desc.len;
602		tx_bi->xdpf = NULL;
603		tx_bi->gso_segs = 1;
604
605		tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
606		tx_desc->read.buffer_addr = cpu_to_le64(dma);
607
608		/* put descriptor type bits */
609		cmd_type = IXGBE_ADVTXD_DTYP_DATA |
610			   IXGBE_ADVTXD_DCMD_DEXT |
611			   IXGBE_ADVTXD_DCMD_IFCS;
612		cmd_type |= desc.len | IXGBE_TXD_CMD;
613		tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
614		tx_desc->read.olinfo_status =
615			cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
616
617		xdp_ring->next_to_use++;
618		if (xdp_ring->next_to_use == xdp_ring->count)
619			xdp_ring->next_to_use = 0;
620	}
621
622	if (tx_desc) {
623		ixgbe_xdp_ring_update_tail(xdp_ring);
624		xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
625	}
626
627	return !!budget && work_done;
628}
629
630static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
631				      struct ixgbe_tx_buffer *tx_bi)
632{
633	xdp_return_frame(tx_bi->xdpf);
634	dma_unmap_single(tx_ring->dev,
635			 dma_unmap_addr(tx_bi, dma),
636			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
637	dma_unmap_len_set(tx_bi, len, 0);
638}
639
640bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
641			    struct ixgbe_ring *tx_ring, int napi_budget)
642{
643	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
644	unsigned int total_packets = 0, total_bytes = 0;
645	struct xdp_umem *umem = tx_ring->xsk_umem;
646	union ixgbe_adv_tx_desc *tx_desc;
647	struct ixgbe_tx_buffer *tx_bi;
648	u32 xsk_frames = 0;
649
650	tx_bi = &tx_ring->tx_buffer_info[ntc];
651	tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
652
653	while (ntc != ntu) {
654		if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
655			break;
656
657		total_bytes += tx_bi->bytecount;
658		total_packets += tx_bi->gso_segs;
659
660		if (tx_bi->xdpf)
661			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
662		else
663			xsk_frames++;
664
665		tx_bi->xdpf = NULL;
666
667		tx_bi++;
668		tx_desc++;
669		ntc++;
670		if (unlikely(ntc == tx_ring->count)) {
671			ntc = 0;
672			tx_bi = tx_ring->tx_buffer_info;
673			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
674		}
675
676		/* issue prefetch for next Tx descriptor */
677		prefetch(tx_desc);
678	}
679
680	tx_ring->next_to_clean = ntc;
681
682	u64_stats_update_begin(&tx_ring->syncp);
683	tx_ring->stats.bytes += total_bytes;
684	tx_ring->stats.packets += total_packets;
685	u64_stats_update_end(&tx_ring->syncp);
686	q_vector->tx.total_bytes += total_bytes;
687	q_vector->tx.total_packets += total_packets;
688
689	if (xsk_frames)
690		xsk_umem_complete_tx(umem, xsk_frames);
691
692	if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
693		xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
694
695	return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
696}
697
698int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
699{
700	struct ixgbe_adapter *adapter = netdev_priv(dev);
701	struct ixgbe_ring *ring;
702
703	if (test_bit(__IXGBE_DOWN, &adapter->state))
704		return -ENETDOWN;
705
706	if (!READ_ONCE(adapter->xdp_prog))
707		return -ENXIO;
708
709	if (qid >= adapter->num_xdp_queues)
710		return -ENXIO;
 
 
711
712	if (!adapter->xdp_ring[qid]->xsk_umem)
713		return -ENXIO;
 
 
 
714
715	ring = adapter->xdp_ring[qid];
716	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
717		u64 eics = BIT_ULL(ring->q_vector->v_idx);
718
719		ixgbe_irq_rearm_queues(adapter, eics);
720	}
721
722	return 0;
723}
724
725void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
726{
727	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
728	struct xdp_umem *umem = tx_ring->xsk_umem;
729	struct ixgbe_tx_buffer *tx_bi;
730	u32 xsk_frames = 0;
731
732	while (ntc != ntu) {
733		tx_bi = &tx_ring->tx_buffer_info[ntc];
734
735		if (tx_bi->xdpf)
736			ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
737		else
738			xsk_frames++;
739
740		tx_bi->xdpf = NULL;
741
742		ntc++;
743		if (ntc == tx_ring->count)
744			ntc = 0;
745	}
746
747	if (xsk_frames)
748		xsk_umem_complete_tx(umem, xsk_frames);
749}