Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2018 Intel Corporation. */
  3
  4#include <linux/bpf_trace.h>
  5#include <net/xdp_sock_drv.h>
 
 
 
  6#include "i40e_txrx_common.h"
  7#include "i40e_xsk.h"
  8
  9void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
 10{
 11	memset(rx_ring->rx_bi_zc, 0,
 12	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
 13}
 14
 15static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
 16{
 17	return &rx_ring->rx_bi_zc[idx];
 18}
 19
 20/**
 21 * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
 22 * @rx_ring: Current rx ring
 23 * @pool_present: is pool for XSK present
 24 *
 25 * Try allocating memory and return ENOMEM, if failed to allocate.
 26 * If allocation was successful, substitute buffer with allocated one.
 27 * Returns 0 on success, negative on failure
 28 */
 29static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
 30{
 31	size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
 32					  sizeof(*rx_ring->rx_bi);
 33	void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
 
 34
 35	if (!sw_ring)
 36		return -ENOMEM;
 
 
 
 
 37
 38	if (pool_present) {
 39		kfree(rx_ring->rx_bi);
 40		rx_ring->rx_bi = NULL;
 41		rx_ring->rx_bi_zc = sw_ring;
 42	} else {
 43		kfree(rx_ring->rx_bi_zc);
 44		rx_ring->rx_bi_zc = NULL;
 45		rx_ring->rx_bi = sw_ring;
 46	}
 
 47	return 0;
 
 
 
 
 
 
 
 
 
 48}
 49
 50/**
 51 * i40e_realloc_rx_bi_zc - reallocate rx SW rings
 52 * @vsi: Current VSI
 53 * @zc: is zero copy set
 54 *
 55 * Reallocate buffer for rx_rings that might be used by XSK.
 56 * XDP requires more memory, than rx_buf provides.
 57 * Returns 0 on success, negative on failure
 58 */
 59int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
 60{
 61	struct i40e_ring *rx_ring;
 62	unsigned long q;
 
 63
 64	for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
 65		rx_ring = vsi->rx_rings[q];
 66		if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
 67			return -ENOMEM;
 
 
 
 68	}
 69	return 0;
 70}
 71
 72/**
 73 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
 74 * certain ring/qid
 75 * @vsi: Current VSI
 76 * @pool: buffer pool
 77 * @qid: Rx ring to associate buffer pool with
 78 *
 79 * Returns 0 on success, <0 on failure
 80 **/
 81static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
 82				struct xsk_buff_pool *pool,
 83				u16 qid)
 84{
 85	struct net_device *netdev = vsi->netdev;
 
 86	bool if_running;
 87	int err;
 88
 89	if (vsi->type != I40E_VSI_MAIN)
 90		return -EINVAL;
 91
 92	if (qid >= vsi->num_queue_pairs)
 93		return -EINVAL;
 94
 95	if (qid >= netdev->real_num_rx_queues ||
 96	    qid >= netdev->real_num_tx_queues)
 97		return -EINVAL;
 98
 99	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
 
 
 
 
 
 
100	if (err)
101		return err;
102
103	set_bit(qid, vsi->af_xdp_zc_qps);
104
105	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
106
107	if (if_running) {
108		err = i40e_queue_pair_disable(vsi, qid);
109		if (err)
110			return err;
111
112		err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
113		if (err)
114			return err;
115
116		err = i40e_queue_pair_enable(vsi, qid);
117		if (err)
118			return err;
119
120		/* Kick start the NAPI context so that receiving will start */
121		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
122		if (err)
123			return err;
124	}
125
126	return 0;
127}
128
129/**
130 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
131 * certain ring/qid
132 * @vsi: Current VSI
133 * @qid: Rx ring to associate buffer pool with
134 *
135 * Returns 0 on success, <0 on failure
136 **/
137static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
138{
139	struct net_device *netdev = vsi->netdev;
140	struct xsk_buff_pool *pool;
141	bool if_running;
142	int err;
143
144	pool = xsk_get_pool_from_qid(netdev, qid);
145	if (!pool)
146		return -EINVAL;
147
148	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
149
150	if (if_running) {
151		err = i40e_queue_pair_disable(vsi, qid);
152		if (err)
153			return err;
154	}
155
156	clear_bit(qid, vsi->af_xdp_zc_qps);
157	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
158
159	if (if_running) {
160		err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
161		if (err)
162			return err;
163		err = i40e_queue_pair_enable(vsi, qid);
164		if (err)
165			return err;
166	}
167
168	return 0;
169}
170
171/**
172 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
173 * a ring/qid
174 * @vsi: Current VSI
175 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
176 * @qid: Rx ring to (dis)associate buffer pool (from)to
177 *
178 * This function enables or disables a buffer pool to a certain ring.
179 *
180 * Returns 0 on success, <0 on failure
181 **/
182int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
183			u16 qid)
184{
185	return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
186		i40e_xsk_pool_disable(vsi, qid);
187}
188
189/**
190 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
191 * @rx_ring: Rx ring
192 * @xdp: xdp_buff used as input to the XDP program
193 * @xdp_prog: XDP program to run
 
194 *
195 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
196 **/
197static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
198			   struct bpf_prog *xdp_prog)
199{
 
200	int err, result = I40E_XDP_PASS;
201	struct i40e_ring *xdp_ring;
 
 
202	u32 act;
203
 
 
 
 
 
204	act = bpf_prog_run_xdp(xdp_prog, xdp);
 
205
206	if (likely(act == XDP_REDIRECT)) {
207		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
208		if (!err)
209			return I40E_XDP_REDIR;
210		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
211			result = I40E_XDP_EXIT;
212		else
213			result = I40E_XDP_CONSUMED;
214		goto out_failure;
215	}
216
217	switch (act) {
218	case XDP_PASS:
219		break;
220	case XDP_TX:
221		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
222		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
223		if (result == I40E_XDP_CONSUMED)
224			goto out_failure;
225		break;
226	case XDP_DROP:
227		result = I40E_XDP_CONSUMED;
 
228		break;
229	default:
230		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
231		fallthrough;
232	case XDP_ABORTED:
233		result = I40E_XDP_CONSUMED;
234out_failure:
235		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 
 
 
 
236	}
 
237	return result;
238}
239
240bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241{
242	u16 ntu = rx_ring->next_to_use;
243	union i40e_rx_desc *rx_desc;
244	struct xdp_buff **xdp;
245	u32 nb_buffs, i;
246	dma_addr_t dma;
247
248	rx_desc = I40E_RX_DESC(rx_ring, ntu);
249	xdp = i40e_rx_bi(rx_ring, ntu);
 
 
 
 
 
250
251	nb_buffs = min_t(u16, count, rx_ring->count - ntu);
252	nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
253	if (!nb_buffs)
254		return false;
255
256	i = nb_buffs;
257	while (i--) {
258		dma = xsk_buff_xdp_get_dma(*xdp);
259		rx_desc->read.pkt_addr = cpu_to_le64(dma);
260		rx_desc->read.hdr_addr = 0;
261
262		rx_desc++;
263		xdp++;
264	}
265
266	ntu += nb_buffs;
267	if (ntu == rx_ring->count) {
268		rx_desc = I40E_RX_DESC(rx_ring, 0);
269		ntu = 0;
270	}
271
272	/* clear the status bits for the next_to_use descriptor */
273	rx_desc->wb.qword1.status_error_len = 0;
274	i40e_release_rx_desc(rx_ring, ntu);
275
276	return count == nb_buffs;
 
 
 
 
277}
278
279/**
280 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
281 * @rx_ring: Rx ring
282 * @xdp: xdp_buff
283 *
284 * This functions allocates a new skb from a zero-copy Rx buffer.
 
285 *
286 * Returns the skb, or NULL on failure.
287 **/
288static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
289					     struct xdp_buff *xdp)
290{
291	unsigned int totalsize = xdp->data_end - xdp->data_meta;
292	unsigned int metasize = xdp->data - xdp->data_meta;
293	struct skb_shared_info *sinfo = NULL;
294	struct sk_buff *skb;
295	u32 nr_frags = 0;
296
297	if (unlikely(xdp_buff_has_frags(xdp))) {
298		sinfo = xdp_get_shared_info_from_buff(xdp);
299		nr_frags = sinfo->nr_frags;
300	}
301	net_prefetch(xdp->data_meta);
 
 
 
 
 
 
 
 
 
 
302
303	/* allocate a skb to store the frags */
304	skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
305	if (unlikely(!skb))
306		goto out;
 
 
 
 
 
 
 
 
 
 
307
308	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
309	       ALIGN(totalsize, sizeof(long)));
310
311	if (metasize) {
312		skb_metadata_set(skb, metasize);
313		__skb_pull(skb, metasize);
314	}
 
315
316	if (likely(!xdp_buff_has_frags(xdp)))
317		goto out;
318
319	for (int i = 0; i < nr_frags; i++) {
320		struct skb_shared_info *skinfo = skb_shinfo(skb);
321		skb_frag_t *frag = &sinfo->frags[i];
322		struct page *page;
323		void *addr;
324
325		page = dev_alloc_page();
326		if (!page) {
327			dev_kfree_skb(skb);
328			return NULL;
329		}
330		addr = page_to_virt(page);
 
331
332		memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
 
 
333
334		__skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
335					   addr, 0, skb_frag_size(frag));
336	}
 
337
338out:
339	xsk_buff_free(xdp);
340	return skb;
341}
342
343static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
344				      struct xdp_buff *xdp_buff,
345				      union i40e_rx_desc *rx_desc,
346				      unsigned int *rx_packets,
347				      unsigned int *rx_bytes,
348				      unsigned int xdp_res,
349				      bool *failure)
350{
351	struct sk_buff *skb;
 
 
 
352
353	*rx_packets = 1;
354	*rx_bytes = xdp_get_buff_len(xdp_buff);
 
355
356	if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
357		return;
358
359	if (xdp_res == I40E_XDP_EXIT) {
360		*failure = true;
361		return;
362	}
363
364	if (xdp_res == I40E_XDP_CONSUMED) {
365		xsk_buff_free(xdp_buff);
366		return;
367	}
368	if (xdp_res == I40E_XDP_PASS) {
369		/* NB! We are not checking for errors using
370		 * i40e_test_staterr with
371		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
372		 * SBP is *not* set in PRT_SBPVSI (default not set).
373		 */
374		skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
375		if (!skb) {
376			rx_ring->rx_stats.alloc_buff_failed++;
377			*rx_packets = 0;
378			*rx_bytes = 0;
379			return;
380		}
381
382		if (eth_skb_pad(skb)) {
383			*rx_packets = 0;
384			*rx_bytes = 0;
385			return;
386		}
387
388		i40e_process_skb_fields(rx_ring, rx_desc, skb);
389		napi_gro_receive(&rx_ring->q_vector->napi, skb);
390		return;
391	}
392
393	/* Should never get here, as all valid cases have been handled already.
394	 */
395	WARN_ON_ONCE(1);
396}
397
398static int
399i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
400		  struct xdp_buff *xdp, const unsigned int size)
 
 
 
 
 
 
 
 
 
 
401{
402	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
 
 
403
404	if (!xdp_buff_has_frags(first)) {
405		sinfo->nr_frags = 0;
406		sinfo->xdp_frags_size = 0;
407		xdp_buff_set_frags_flag(first);
408	}
 
409
410	if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
411		xsk_buff_free(first);
412		return -ENOMEM;
413	}
414
415	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
416				   virt_to_page(xdp->data_hard_start),
417				   XDP_PACKET_HEADROOM, size);
418	sinfo->xdp_frags_size += size;
419	xsk_buff_add_frag(xdp);
420
421	return 0;
 
 
 
 
 
 
 
 
 
 
422}
423
424/**
425 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
426 * @rx_ring: Rx ring
427 * @budget: NAPI budget
428 *
429 * Returns amount of work completed
430 **/
431int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
432{
433	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
434	u16 next_to_process = rx_ring->next_to_process;
435	u16 next_to_clean = rx_ring->next_to_clean;
436	unsigned int xdp_res, xdp_xmit = 0;
437	struct xdp_buff *first = NULL;
438	u32 count = rx_ring->count;
439	struct bpf_prog *xdp_prog;
440	u32 entries_to_alloc;
441	bool failure = false;
 
 
442
443	if (next_to_process != next_to_clean)
444		first = *i40e_rx_bi(rx_ring, next_to_clean);
445
446	/* NB! xdp_prog will always be !NULL, due to the fact that
447	 * this path is enabled by setting an XDP program.
448	 */
449	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
450
451	while (likely(total_rx_packets < (unsigned int)budget)) {
 
452		union i40e_rx_desc *rx_desc;
453		unsigned int rx_packets;
454		unsigned int rx_bytes;
455		struct xdp_buff *bi;
456		unsigned int size;
457		u64 qword;
458
459		rx_desc = I40E_RX_DESC(rx_ring, next_to_process);
 
 
 
 
 
 
 
460		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
461
462		/* This memory barrier is needed to keep us from reading
463		 * any other fields out of the rx_desc until we have
464		 * verified the descriptor has been written back.
465		 */
466		dma_rmb();
467
468		if (i40e_rx_is_programming_status(qword)) {
469			i40e_clean_programming_status(rx_ring,
470						      rx_desc->raw.qword[0],
471						      qword);
472			bi = *i40e_rx_bi(rx_ring, next_to_process);
473			xsk_buff_free(bi);
474			if (++next_to_process == count)
475				next_to_process = 0;
476			continue;
477		}
478
479		size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
 
480		if (!size)
481			break;
482
483		bi = *i40e_rx_bi(rx_ring, next_to_process);
484		xsk_buff_set_size(bi, size);
485		xsk_buff_dma_sync_for_cpu(bi);
486
487		if (!first)
488			first = bi;
489		else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
490			break;
 
 
 
 
 
 
 
491
492		if (++next_to_process == count)
493			next_to_process = 0;
494
495		if (i40e_is_non_eop(rx_ring, rx_desc))
 
496			continue;
 
497
498		xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
499		i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
500					  &rx_bytes, xdp_res, &failure);
501		next_to_clean = next_to_process;
502		if (failure)
 
 
 
 
 
503			break;
504		total_rx_packets += rx_packets;
505		total_rx_bytes += rx_bytes;
506		xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
507		first = NULL;
508	}
 
 
 
 
 
509
510	rx_ring->next_to_clean = next_to_clean;
511	rx_ring->next_to_process = next_to_process;
512
513	entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
514	if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
515		failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
516
517	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
518	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
519
520	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
521		if (failure || next_to_clean == rx_ring->next_to_use)
522			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
523		else
524			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
525
526		return (int)total_rx_packets;
527	}
528	return failure ? budget : (int)total_rx_packets;
529}
530
531static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
532			  unsigned int *total_bytes)
533{
534	u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(desc);
535	struct i40e_tx_desc *tx_desc;
536	dma_addr_t dma;
537
538	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
539	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
540
541	tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
542	tx_desc->buffer_addr = cpu_to_le64(dma);
543	tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc->len, 0);
544
545	*total_bytes += desc->len;
546}
547
548static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
549				unsigned int *total_bytes)
550{
551	u16 ntu = xdp_ring->next_to_use;
552	struct i40e_tx_desc *tx_desc;
553	dma_addr_t dma;
554	u32 i;
555
556	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
557		u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]);
558
559		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
560		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);
561
562		tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
563		tx_desc->buffer_addr = cpu_to_le64(dma);
564		tx_desc->cmd_type_offset_bsz = build_ctob(cmd, 0, desc[i].len, 0);
565
566		*total_bytes += desc[i].len;
567	}
568
569	xdp_ring->next_to_use = ntu;
570}
571
572static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
573				 unsigned int *total_bytes)
574{
575	u32 batched, leftover, i;
576
577	batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
578	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
579	for (i = 0; i < batched; i += PKTS_PER_BATCH)
580		i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
581	for (i = batched; i < batched + leftover; i++)
582		i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
583}
584
585static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
586{
587	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
588	struct i40e_tx_desc *tx_desc;
589
590	tx_desc = I40E_TX_DESC(xdp_ring, ntu);
591	tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
592}
593
594/**
595 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
596 * @xdp_ring: XDP Tx ring
597 * @budget: NAPI budget
598 *
599 * Returns true if the work is finished.
600 **/
601static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
602{
603	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
604	u32 nb_pkts, nb_processed = 0;
605	unsigned int total_bytes = 0;
 
 
606
607	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
608	if (!nb_pkts)
609		return true;
 
 
 
610
611	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
612		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
613		i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
614		xdp_ring->next_to_use = 0;
615	}
616
617	i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
618			     &total_bytes);
619
620	/* Request an interrupt for the last frame and bump tail ptr. */
621	i40e_set_rs_bit(xdp_ring);
622	i40e_xdp_ring_update_tail(xdp_ring);
623
624	i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
 
 
 
 
 
 
 
 
625
626	return nb_pkts < budget;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627}
628
629/**
630 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
631 * @tx_ring: XDP Tx ring
632 * @tx_bi: Tx buffer info to clean
633 **/
634static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
635				     struct i40e_tx_buffer *tx_bi)
636{
637	xdp_return_frame(tx_bi->xdpf);
638	tx_ring->xdp_tx_active--;
639	dma_unmap_single(tx_ring->dev,
640			 dma_unmap_addr(tx_bi, dma),
641			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
642	dma_unmap_len_set(tx_bi, len, 0);
643}
644
645/**
646 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
647 * @vsi: Current VSI
648 * @tx_ring: XDP Tx ring
 
649 *
650 * Returns true if cleanup/transmission is done.
651 **/
652bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
 
653{
654	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
655	u32 i, completed_frames, xsk_frames = 0;
 
656	u32 head_idx = i40e_get_head(tx_ring);
 
657	struct i40e_tx_buffer *tx_bi;
658	unsigned int ntc;
659
660	if (head_idx < tx_ring->next_to_clean)
661		head_idx += tx_ring->count;
662	completed_frames = head_idx - tx_ring->next_to_clean;
663
664	if (completed_frames == 0)
665		goto out_xmit;
666
667	if (likely(!tx_ring->xdp_tx_active)) {
668		xsk_frames = completed_frames;
669		goto skip;
 
670	}
671
672	ntc = tx_ring->next_to_clean;
673
674	for (i = 0; i < completed_frames; i++) {
675		tx_bi = &tx_ring->tx_bi[ntc];
676
677		if (tx_bi->xdpf) {
678			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
679			tx_bi->xdpf = NULL;
680		} else {
681			xsk_frames++;
682		}
 
 
683
684		if (++ntc >= tx_ring->count)
685			ntc = 0;
686	}
687
688skip:
689	tx_ring->next_to_clean += completed_frames;
690	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
691		tx_ring->next_to_clean -= tx_ring->count;
692
693	if (xsk_frames)
694		xsk_tx_completed(bp, xsk_frames);
695
696	i40e_arm_wb(tx_ring, vsi, completed_frames);
 
697
698out_xmit:
699	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
700		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
701
702	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
 
 
703}
704
705/**
706 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
707 * @dev: the netdevice
708 * @queue_id: queue id to wake up
709 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
710 *
711 * Returns <0 for errors, 0 otherwise.
712 **/
713int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
714{
715	struct i40e_netdev_priv *np = netdev_priv(dev);
716	struct i40e_vsi *vsi = np->vsi;
717	struct i40e_pf *pf = vsi->back;
718	struct i40e_ring *ring;
719
720	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
721		return -EAGAIN;
722
723	if (test_bit(__I40E_VSI_DOWN, vsi->state))
724		return -ENETDOWN;
725
726	if (!i40e_enabled_xdp_vsi(vsi))
727		return -EINVAL;
728
729	if (queue_id >= vsi->num_queue_pairs)
730		return -EINVAL;
731
732	if (!vsi->xdp_rings[queue_id]->xsk_pool)
733		return -EINVAL;
734
735	ring = vsi->xdp_rings[queue_id];
736
737	/* The idea here is that if NAPI is running, mark a miss, so
738	 * it will run again. If not, trigger an interrupt and
739	 * schedule the NAPI from interrupt context. If NAPI would be
740	 * scheduled here, the interrupt affinity would not be
741	 * honored.
742	 */
743	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
744		i40e_force_wb(vsi, ring->q_vector);
745
746	return 0;
747}
748
749void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
750{
751	u16 ntc = rx_ring->next_to_clean;
752	u16 ntu = rx_ring->next_to_use;
753
754	while (ntc != ntu) {
755		struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
756
757		xsk_buff_free(rx_bi);
758		ntc++;
759		if (ntc >= rx_ring->count)
760			ntc = 0;
 
761	}
762}
763
764/**
765 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown
766 * @tx_ring: XDP Tx ring
767 **/
768void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
769{
770	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
771	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
772	struct i40e_tx_buffer *tx_bi;
773	u32 xsk_frames = 0;
774
775	while (ntc != ntu) {
776		tx_bi = &tx_ring->tx_bi[ntc];
777
778		if (tx_bi->xdpf)
779			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
780		else
781			xsk_frames++;
782
783		tx_bi->xdpf = NULL;
784
785		ntc++;
786		if (ntc >= tx_ring->count)
787			ntc = 0;
788	}
789
790	if (xsk_frames)
791		xsk_tx_completed(bp, xsk_frames);
792}
793
794/**
795 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
796 * buffer pool attached
797 * @vsi: vsi
798 *
799 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
800 **/
801bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
802{
803	struct net_device *netdev = vsi->netdev;
804	int i;
805
806	for (i = 0; i < vsi->num_queue_pairs; i++) {
807		if (xsk_get_pool_from_qid(netdev, i))
808			return true;
809	}
810
811	return false;
812}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright(c) 2018 Intel Corporation. */
  3
  4#include <linux/bpf_trace.h>
  5#include <net/xdp_sock.h>
  6#include <net/xdp.h>
  7
  8#include "i40e.h"
  9#include "i40e_txrx_common.h"
 10#include "i40e_xsk.h"
 11
 
 
 
 
 
 
 
 
 
 
 
 12/**
 13 * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
 14 * @vsi: Current VSI
 15 * @umem: UMEM to DMA map
 16 *
 17 * Returns 0 on success, <0 on failure
 18 **/
 19static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
 
 
 20{
 21	struct i40e_pf *pf = vsi->back;
 22	struct device *dev;
 23	unsigned int i, j;
 24	dma_addr_t dma;
 25
 26	dev = &pf->pdev->dev;
 27	for (i = 0; i < umem->npgs; i++) {
 28		dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
 29					 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
 30		if (dma_mapping_error(dev, dma))
 31			goto out_unmap;
 32
 33		umem->pages[i].dma = dma;
 
 
 
 
 
 
 
 34	}
 35
 36	return 0;
 37
 38out_unmap:
 39	for (j = 0; j < i; j++) {
 40		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
 41				     DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
 42		umem->pages[i].dma = 0;
 43	}
 44
 45	return -1;
 46}
 47
 48/**
 49 * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
 50 * @vsi: Current VSI
 51 * @umem: UMEM to DMA map
 52 **/
 53static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
 
 
 
 
 54{
 55	struct i40e_pf *pf = vsi->back;
 56	struct device *dev;
 57	unsigned int i;
 58
 59	dev = &pf->pdev->dev;
 60
 61	for (i = 0; i < umem->npgs; i++) {
 62		dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
 63				     DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
 64
 65		umem->pages[i].dma = 0;
 66	}
 
 67}
 68
 69/**
 70 * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
 
 71 * @vsi: Current VSI
 72 * @umem: UMEM
 73 * @qid: Rx ring to associate UMEM to
 74 *
 75 * Returns 0 on success, <0 on failure
 76 **/
 77static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
 
 78				u16 qid)
 79{
 80	struct net_device *netdev = vsi->netdev;
 81	struct xdp_umem_fq_reuse *reuseq;
 82	bool if_running;
 83	int err;
 84
 85	if (vsi->type != I40E_VSI_MAIN)
 86		return -EINVAL;
 87
 88	if (qid >= vsi->num_queue_pairs)
 89		return -EINVAL;
 90
 91	if (qid >= netdev->real_num_rx_queues ||
 92	    qid >= netdev->real_num_tx_queues)
 93		return -EINVAL;
 94
 95	reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
 96	if (!reuseq)
 97		return -ENOMEM;
 98
 99	xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
100
101	err = i40e_xsk_umem_dma_map(vsi, umem);
102	if (err)
103		return err;
104
105	set_bit(qid, vsi->af_xdp_zc_qps);
106
107	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
108
109	if (if_running) {
110		err = i40e_queue_pair_disable(vsi, qid);
111		if (err)
112			return err;
113
 
 
 
 
114		err = i40e_queue_pair_enable(vsi, qid);
115		if (err)
116			return err;
117
118		/* Kick start the NAPI context so that receiving will start */
119		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
120		if (err)
121			return err;
122	}
123
124	return 0;
125}
126
127/**
128 * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
 
129 * @vsi: Current VSI
130 * @qid: Rx ring to associate UMEM to
131 *
132 * Returns 0 on success, <0 on failure
133 **/
134static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
135{
136	struct net_device *netdev = vsi->netdev;
137	struct xdp_umem *umem;
138	bool if_running;
139	int err;
140
141	umem = xdp_get_umem_from_qid(netdev, qid);
142	if (!umem)
143		return -EINVAL;
144
145	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
146
147	if (if_running) {
148		err = i40e_queue_pair_disable(vsi, qid);
149		if (err)
150			return err;
151	}
152
153	clear_bit(qid, vsi->af_xdp_zc_qps);
154	i40e_xsk_umem_dma_unmap(vsi, umem);
155
156	if (if_running) {
 
 
 
157		err = i40e_queue_pair_enable(vsi, qid);
158		if (err)
159			return err;
160	}
161
162	return 0;
163}
164
165/**
166 * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
 
167 * @vsi: Current VSI
168 * @umem: UMEM to enable/associate to a ring, or NULL to disable
169 * @qid: Rx ring to (dis)associate UMEM (from)to
170 *
171 * This function enables or disables a UMEM to a certain ring.
172 *
173 * Returns 0 on success, <0 on failure
174 **/
175int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
176			u16 qid)
177{
178	return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
179		i40e_xsk_umem_disable(vsi, qid);
180}
181
182/**
183 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
184 * @rx_ring: Rx ring
185 * @xdp: xdp_buff used as input to the XDP program
186 *
187 * This function enables or disables a UMEM to a certain ring.
188 *
189 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
190 **/
191static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 
192{
193	struct xdp_umem *umem = rx_ring->xsk_umem;
194	int err, result = I40E_XDP_PASS;
195	struct i40e_ring *xdp_ring;
196	struct bpf_prog *xdp_prog;
197	u64 offset;
198	u32 act;
199
200	rcu_read_lock();
201	/* NB! xdp_prog will always be !NULL, due to the fact that
202	 * this path is enabled by setting an XDP program.
203	 */
204	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
205	act = bpf_prog_run_xdp(xdp_prog, xdp);
206	offset = xdp->data - xdp->data_hard_start;
207
208	xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
 
 
 
 
 
 
 
 
 
209
210	switch (act) {
211	case XDP_PASS:
212		break;
213	case XDP_TX:
214		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
215		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
 
 
216		break;
217	case XDP_REDIRECT:
218		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
219		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
220		break;
221	default:
222		bpf_warn_invalid_xdp_action(act);
223		/* fall through */
224	case XDP_ABORTED:
 
 
225		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
226		/* fallthrough -- handle aborts by dropping packet */
227	case XDP_DROP:
228		result = I40E_XDP_CONSUMED;
229		break;
230	}
231	rcu_read_unlock();
232	return result;
233}
234
235/**
236 * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer
237 * @rx_ring: Rx ring
238 * @bi: Rx buffer to populate
239 *
240 * This function allocates an Rx buffer. The buffer can come from fill
241 * queue, or via the recycle queue (next_to_alloc).
242 *
243 * Returns true for a successful allocation, false otherwise
244 **/
245static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
246				 struct i40e_rx_buffer *bi)
247{
248	struct xdp_umem *umem = rx_ring->xsk_umem;
249	void *addr = bi->addr;
250	u64 handle, hr;
251
252	if (addr) {
253		rx_ring->rx_stats.page_reuse_count++;
254		return true;
255	}
256
257	if (!xsk_umem_peek_addr(umem, &handle)) {
258		rx_ring->rx_stats.alloc_page_failed++;
259		return false;
260	}
261
262	hr = umem->headroom + XDP_PACKET_HEADROOM;
263
264	bi->dma = xdp_umem_get_dma(umem, handle);
265	bi->dma += hr;
266
267	bi->addr = xdp_umem_get_data(umem, handle);
268	bi->addr += hr;
269
270	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
271
272	xsk_umem_discard_addr(umem);
273	return true;
274}
275
276/**
277 * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
278 * @rx_ring: Rx ring
279 * @bi: Rx buffer to populate
280 *
281 * This function allocates an Rx buffer. The buffer can come from fill
282 * queue, or via the reuse queue.
283 *
284 * Returns true for a successful allocation, false otherwise
285 **/
286static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
287				      struct i40e_rx_buffer *bi)
288{
289	struct xdp_umem *umem = rx_ring->xsk_umem;
290	u64 handle, hr;
291
292	if (!xsk_umem_peek_addr_rq(umem, &handle)) {
293		rx_ring->rx_stats.alloc_page_failed++;
294		return false;
295	}
296
297	handle &= rx_ring->xsk_umem->chunk_mask;
298
299	hr = umem->headroom + XDP_PACKET_HEADROOM;
300
301	bi->dma = xdp_umem_get_dma(umem, handle);
302	bi->dma += hr;
303
304	bi->addr = xdp_umem_get_data(umem, handle);
305	bi->addr += hr;
306
307	bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
308
309	xsk_umem_discard_addr_rq(umem);
310	return true;
311}
312
313static __always_inline bool
314__i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
315			   bool alloc(struct i40e_ring *rx_ring,
316				      struct i40e_rx_buffer *bi))
317{
318	u16 ntu = rx_ring->next_to_use;
319	union i40e_rx_desc *rx_desc;
320	struct i40e_rx_buffer *bi;
321	bool ok = true;
 
322
323	rx_desc = I40E_RX_DESC(rx_ring, ntu);
324	bi = &rx_ring->rx_bi[ntu];
325	do {
326		if (!alloc(rx_ring, bi)) {
327			ok = false;
328			goto no_buffers;
329		}
330
331		dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
332						 rx_ring->rx_buf_len,
333						 DMA_BIDIRECTIONAL);
 
334
335		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 
 
 
 
336
337		rx_desc++;
338		bi++;
339		ntu++;
340
341		if (unlikely(ntu == rx_ring->count)) {
342			rx_desc = I40E_RX_DESC(rx_ring, 0);
343			bi = rx_ring->rx_bi;
344			ntu = 0;
345		}
346
347		rx_desc->wb.qword1.status_error_len = 0;
348		count--;
349	} while (count);
350
351no_buffers:
352	if (rx_ring->next_to_use != ntu)
353		i40e_release_rx_desc(rx_ring, ntu);
354
355	return ok;
356}
357
358/**
359 * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
360 * @rx_ring: Rx ring
361 * @count: The number of buffers to allocate
362 *
363 * This function allocates a number of Rx buffers from the reuse queue
364 * or fill ring and places them on the Rx ring.
365 *
366 * Returns true for a successful allocation, false otherwise
367 **/
368bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
 
369{
370	return __i40e_alloc_rx_buffers_zc(rx_ring, count,
371					  i40e_alloc_buffer_slow_zc);
372}
 
 
373
374/**
375 * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
376 * @rx_ring: Rx ring
377 * @count: The number of buffers to allocate
378 *
379 * This function allocates a number of Rx buffers from the fill ring
380 * or the internal recycle mechanism and places them on the Rx ring.
381 *
382 * Returns true for a successful allocation, false otherwise
383 **/
384static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
385{
386	return __i40e_alloc_rx_buffers_zc(rx_ring, count,
387					  i40e_alloc_buffer_zc);
388}
389
390/**
391 * i40e_get_rx_buffer_zc - Return the current Rx buffer
392 * @rx_ring: Rx ring
393 * @size: The size of the rx buffer (read from descriptor)
394 *
395 * This function returns the current, received Rx buffer, and also
396 * does DMA synchronization.  the Rx ring.
397 *
398 * Returns the received Rx buffer
399 **/
400static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
401						    const unsigned int size)
402{
403	struct i40e_rx_buffer *bi;
404
405	bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
 
406
407	/* we are reusing so sync this buffer for CPU use */
408	dma_sync_single_range_for_cpu(rx_ring->dev,
409				      bi->dma, 0,
410				      size,
411				      DMA_BIDIRECTIONAL);
412
413	return bi;
414}
415
416/**
417 * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
418 * @rx_ring: Rx ring
419 * @old_bi: The Rx buffer to recycle
420 *
421 * This function recycles a finished Rx buffer, and places it on the
422 * recycle queue (next_to_alloc).
423 **/
424static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
425				    struct i40e_rx_buffer *old_bi)
426{
427	struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
428	u16 nta = rx_ring->next_to_alloc;
429
430	/* update, and store next to alloc */
431	nta++;
432	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
433
434	/* transfer page from old buffer to new buffer */
435	new_bi->dma = old_bi->dma;
436	new_bi->addr = old_bi->addr;
437	new_bi->handle = old_bi->handle;
438
439	old_bi->addr = NULL;
 
 
440}
441
442/**
443 * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
444 * @alloc: Zero-copy allocator
445 * @handle: Buffer handle
446 **/
447void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
 
448{
449	struct i40e_rx_buffer *bi;
450	struct i40e_ring *rx_ring;
451	u64 hr, mask;
452	u16 nta;
453
454	rx_ring = container_of(alloc, struct i40e_ring, zca);
455	hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
456	mask = rx_ring->xsk_umem->chunk_mask;
457
458	nta = rx_ring->next_to_alloc;
459	bi = &rx_ring->rx_bi[nta];
460
461	nta++;
462	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
 
463
464	handle &= mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
466	bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
467	bi->dma += hr;
 
 
 
468
469	bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
470	bi->addr += hr;
 
 
471
472	bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
473					    rx_ring->xsk_umem->headroom);
 
474}
475
476/**
477 * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
478 * @rx_ring: Rx ring
479 * @bi: Rx buffer
480 * @xdp: xdp_buff
481 *
482 * This functions allocates a new skb from a zero-copy Rx buffer.
483 *
484 * Returns the skb, or NULL on failure.
485 **/
486static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
487					     struct i40e_rx_buffer *bi,
488					     struct xdp_buff *xdp)
489{
490	unsigned int metasize = xdp->data - xdp->data_meta;
491	unsigned int datasize = xdp->data_end - xdp->data;
492	struct sk_buff *skb;
493
494	/* allocate a skb to store the frags */
495	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
496			       xdp->data_end - xdp->data_hard_start,
497			       GFP_ATOMIC | __GFP_NOWARN);
498	if (unlikely(!skb))
499		return NULL;
500
501	skb_reserve(skb, xdp->data - xdp->data_hard_start);
502	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
503	if (metasize)
504		skb_metadata_set(skb, metasize);
505
506	i40e_reuse_rx_buffer_zc(rx_ring, bi);
507	return skb;
508}
 
 
509
510/**
511 * i40e_inc_ntc: Advance the next_to_clean index
512 * @rx_ring: Rx ring
513 **/
514static void i40e_inc_ntc(struct i40e_ring *rx_ring)
515{
516	u32 ntc = rx_ring->next_to_clean + 1;
517
518	ntc = (ntc < rx_ring->count) ? ntc : 0;
519	rx_ring->next_to_clean = ntc;
520	prefetch(I40E_RX_DESC(rx_ring, ntc));
521}
522
523/**
524 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
525 * @rx_ring: Rx ring
526 * @budget: NAPI budget
527 *
528 * Returns amount of work completed
529 **/
530int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
531{
532	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
533	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
 
534	unsigned int xdp_res, xdp_xmit = 0;
 
 
 
 
535	bool failure = false;
536	struct sk_buff *skb;
537	struct xdp_buff xdp;
538
539	xdp.rxq = &rx_ring->xdp_rxq;
 
 
 
 
 
 
540
541	while (likely(total_rx_packets < (unsigned int)budget)) {
542		struct i40e_rx_buffer *bi;
543		union i40e_rx_desc *rx_desc;
 
 
 
544		unsigned int size;
545		u64 qword;
546
547		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
548			failure = failure ||
549				  !i40e_alloc_rx_buffers_fast_zc(rx_ring,
550								 cleaned_count);
551			cleaned_count = 0;
552		}
553
554		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
555		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
556
557		/* This memory barrier is needed to keep us from reading
558		 * any other fields out of the rx_desc until we have
559		 * verified the descriptor has been written back.
560		 */
561		dma_rmb();
562
563		bi = i40e_clean_programming_status(rx_ring, rx_desc,
564						   qword);
565		if (unlikely(bi)) {
566			i40e_reuse_rx_buffer_zc(rx_ring, bi);
567			cleaned_count++;
 
 
 
568			continue;
569		}
570
571		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
572		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
573		if (!size)
574			break;
575
576		bi = i40e_get_rx_buffer_zc(rx_ring, size);
577		xdp.data = bi->addr;
578		xdp.data_meta = xdp.data;
579		xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
580		xdp.data_end = xdp.data + size;
581		xdp.handle = bi->handle;
582
583		xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
584		if (xdp_res) {
585			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
586				xdp_xmit |= xdp_res;
587				bi->addr = NULL;
588			} else {
589				i40e_reuse_rx_buffer_zc(rx_ring, bi);
590			}
591
592			total_rx_bytes += size;
593			total_rx_packets++;
594
595			cleaned_count++;
596			i40e_inc_ntc(rx_ring);
597			continue;
598		}
599
600		/* XDP_PASS path */
601
602		/* NB! We are not checking for errors using
603		 * i40e_test_staterr with
604		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
605		 * SBP is *not* set in PRT_SBPVSI (default not set).
606		 */
607		skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
608		if (!skb) {
609			rx_ring->rx_stats.alloc_buff_failed++;
610			break;
611		}
612
613		cleaned_count++;
614		i40e_inc_ntc(rx_ring);
615
616		if (eth_skb_pad(skb))
617			continue;
618
619		total_rx_bytes += skb->len;
620		total_rx_packets++;
621
622		i40e_process_skb_fields(rx_ring, rx_desc, skb);
623		napi_gro_receive(&rx_ring->q_vector->napi, skb);
624	}
 
 
 
625
626	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
627	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
628
629	if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
630		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
631			xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
632		else
633			xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
634
635		return (int)total_rx_packets;
636	}
637	return failure ? budget : (int)total_rx_packets;
638}
639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
640/**
641 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
642 * @xdp_ring: XDP Tx ring
643 * @budget: NAPI budget
644 *
645 * Returns true if the work is finished.
646 **/
647static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
648{
649	struct i40e_tx_desc *tx_desc = NULL;
650	struct i40e_tx_buffer *tx_bi;
651	bool work_done = true;
652	struct xdp_desc desc;
653	dma_addr_t dma;
654
655	while (budget-- > 0) {
656		if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
657			xdp_ring->tx_stats.tx_busy++;
658			work_done = false;
659			break;
660		}
661
662		if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
663			break;
 
 
 
664
665		dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
 
666
667		dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
668					   DMA_BIDIRECTIONAL);
 
669
670		tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
671		tx_bi->bytecount = desc.len;
672
673		tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
674		tx_desc->buffer_addr = cpu_to_le64(dma);
675		tx_desc->cmd_type_offset_bsz =
676			build_ctob(I40E_TX_DESC_CMD_ICRC
677				   | I40E_TX_DESC_CMD_EOP,
678				   0, desc.len, 0);
679
680		xdp_ring->next_to_use++;
681		if (xdp_ring->next_to_use == xdp_ring->count)
682			xdp_ring->next_to_use = 0;
683	}
684
685	if (tx_desc) {
686		/* Request an interrupt for the last frame and bump tail ptr. */
687		tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
688						 I40E_TXD_QW1_CMD_SHIFT);
689		i40e_xdp_ring_update_tail(xdp_ring);
690
691		xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
692	}
693
694	return !!budget && work_done;
695}
696
697/**
698 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
699 * @tx_ring: XDP Tx ring
700 * @tx_bi: Tx buffer info to clean
701 **/
702static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
703				     struct i40e_tx_buffer *tx_bi)
704{
705	xdp_return_frame(tx_bi->xdpf);
 
706	dma_unmap_single(tx_ring->dev,
707			 dma_unmap_addr(tx_bi, dma),
708			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
709	dma_unmap_len_set(tx_bi, len, 0);
710}
711
712/**
713 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
 
714 * @tx_ring: XDP Tx ring
715 * @tx_bi: Tx buffer info to clean
716 *
717 * Returns true if cleanup/tranmission is done.
718 **/
719bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
720			   struct i40e_ring *tx_ring, int napi_budget)
721{
722	unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
723	u32 i, completed_frames, frames_ready, xsk_frames = 0;
724	struct xdp_umem *umem = tx_ring->xsk_umem;
725	u32 head_idx = i40e_get_head(tx_ring);
726	bool work_done = true, xmit_done;
727	struct i40e_tx_buffer *tx_bi;
 
728
729	if (head_idx < tx_ring->next_to_clean)
730		head_idx += tx_ring->count;
731	frames_ready = head_idx - tx_ring->next_to_clean;
732
733	if (frames_ready == 0) {
734		goto out_xmit;
735	} else if (frames_ready > budget) {
736		completed_frames = budget;
737		work_done = false;
738	} else {
739		completed_frames = frames_ready;
740	}
741
742	ntc = tx_ring->next_to_clean;
743
744	for (i = 0; i < completed_frames; i++) {
745		tx_bi = &tx_ring->tx_bi[ntc];
746
747		if (tx_bi->xdpf)
748			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
749		else
 
750			xsk_frames++;
751
752		tx_bi->xdpf = NULL;
753		total_bytes += tx_bi->bytecount;
754
755		if (++ntc >= tx_ring->count)
756			ntc = 0;
757	}
758
 
759	tx_ring->next_to_clean += completed_frames;
760	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
761		tx_ring->next_to_clean -= tx_ring->count;
762
763	if (xsk_frames)
764		xsk_umem_complete_tx(umem, xsk_frames);
765
766	i40e_arm_wb(tx_ring, vsi, budget);
767	i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
768
769out_xmit:
770	if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
771		xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
772
773	xmit_done = i40e_xmit_zc(tx_ring, budget);
774
775	return work_done && xmit_done;
776}
777
778/**
779 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
780 * @dev: the netdevice
781 * @queue_id: queue id to wake up
782 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
783 *
784 * Returns <0 for errors, 0 otherwise.
785 **/
786int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
787{
788	struct i40e_netdev_priv *np = netdev_priv(dev);
789	struct i40e_vsi *vsi = np->vsi;
 
790	struct i40e_ring *ring;
791
 
 
 
792	if (test_bit(__I40E_VSI_DOWN, vsi->state))
793		return -ENETDOWN;
794
795	if (!i40e_enabled_xdp_vsi(vsi))
796		return -ENXIO;
797
798	if (queue_id >= vsi->num_queue_pairs)
799		return -ENXIO;
800
801	if (!vsi->xdp_rings[queue_id]->xsk_umem)
802		return -ENXIO;
803
804	ring = vsi->xdp_rings[queue_id];
805
806	/* The idea here is that if NAPI is running, mark a miss, so
807	 * it will run again. If not, trigger an interrupt and
808	 * schedule the NAPI from interrupt context. If NAPI would be
809	 * scheduled here, the interrupt affinity would not be
810	 * honored.
811	 */
812	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
813		i40e_force_wb(vsi, ring->q_vector);
814
815	return 0;
816}
817
818void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
819{
820	u16 i;
 
821
822	for (i = 0; i < rx_ring->count; i++) {
823		struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
824
825		if (!rx_bi->addr)
826			continue;
827
828		xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
829		rx_bi->addr = NULL;
830	}
831}
832
833/**
834 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
835 * @xdp_ring: XDP Tx ring
836 **/
837void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
838{
839	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
840	struct xdp_umem *umem = tx_ring->xsk_umem;
841	struct i40e_tx_buffer *tx_bi;
842	u32 xsk_frames = 0;
843
844	while (ntc != ntu) {
845		tx_bi = &tx_ring->tx_bi[ntc];
846
847		if (tx_bi->xdpf)
848			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
849		else
850			xsk_frames++;
851
852		tx_bi->xdpf = NULL;
853
854		ntc++;
855		if (ntc >= tx_ring->count)
856			ntc = 0;
857	}
858
859	if (xsk_frames)
860		xsk_umem_complete_tx(umem, xsk_frames);
861}
862
863/**
864 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
 
865 * @vsi: vsi
866 *
867 * Returns true if any of the Rx rings has an AF_XDP UMEM attached
868 **/
869bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
870{
871	struct net_device *netdev = vsi->netdev;
872	int i;
873
874	for (i = 0; i < vsi->num_queue_pairs; i++) {
875		if (xdp_get_umem_from_qid(netdev, i))
876			return true;
877	}
878
879	return false;
880}