Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019, Intel Corporation. */
   3
   4#include <linux/bpf_trace.h>
   5#include <net/xdp_sock_drv.h>
   6#include <net/xdp.h>
   7#include "ice.h"
   8#include "ice_base.h"
   9#include "ice_type.h"
  10#include "ice_xsk.h"
  11#include "ice_txrx.h"
  12#include "ice_txrx_lib.h"
  13#include "ice_lib.h"
  14
  15static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
  16{
  17	return &rx_ring->xdp_buf[idx];
  18}
  19
  20/**
  21 * ice_qp_reset_stats - Resets all stats for rings of given index
  22 * @vsi: VSI that contains rings of interest
  23 * @q_idx: ring index in array
  24 */
  25static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
  26{
  27	struct ice_vsi_stats *vsi_stat;
  28	struct ice_pf *pf;
  29
  30	pf = vsi->back;
  31	if (!pf->vsi_stats)
  32		return;
  33
  34	vsi_stat = pf->vsi_stats[vsi->idx];
  35	if (!vsi_stat)
  36		return;
  37
  38	memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
  39	       sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
  40	memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
  41	       sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
  42	if (ice_is_xdp_ena_vsi(vsi))
  43		memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
  44		       sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
  45}
  46
  47/**
  48 * ice_qp_clean_rings - Cleans all the rings of a given index
  49 * @vsi: VSI that contains rings of interest
  50 * @q_idx: ring index in array
  51 */
  52static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
  53{
  54	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
  55	if (ice_is_xdp_ena_vsi(vsi)) {
  56		synchronize_rcu();
  57		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
  58	}
  59	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
  60}
  61
  62/**
  63 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
  64 * @vsi: VSI that has netdev
  65 * @q_vector: q_vector that has NAPI context
  66 * @enable: true for enable, false for disable
  67 */
  68static void
  69ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
  70		     bool enable)
  71{
  72	if (!vsi->netdev || !q_vector)
  73		return;
  74
  75	if (enable)
  76		napi_enable(&q_vector->napi);
  77	else
  78		napi_disable(&q_vector->napi);
  79}
  80
  81/**
  82 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
  83 * @vsi: the VSI that contains queue vector being un-configured
  84 * @rx_ring: Rx ring that will have its IRQ disabled
  85 * @q_vector: queue vector
  86 */
  87static void
  88ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
  89		 struct ice_q_vector *q_vector)
  90{
  91	struct ice_pf *pf = vsi->back;
  92	struct ice_hw *hw = &pf->hw;
  93	int base = vsi->base_vector;
  94	u16 reg;
  95	u32 val;
  96
  97	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
  98	 * here only QINT_RQCTL
  99	 */
 100	reg = rx_ring->reg_idx;
 101	val = rd32(hw, QINT_RQCTL(reg));
 102	val &= ~QINT_RQCTL_CAUSE_ENA_M;
 103	wr32(hw, QINT_RQCTL(reg), val);
 104
 105	if (q_vector) {
 106		u16 v_idx = q_vector->v_idx;
 107
 108		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
 109		ice_flush(hw);
 110		synchronize_irq(pf->msix_entries[v_idx + base].vector);
 111	}
 112}
 113
 114/**
 115 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
 116 * @vsi: the VSI that contains queue vector
 117 * @q_vector: queue vector
 118 */
 119static void
 120ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
 121{
 122	u16 reg_idx = q_vector->reg_idx;
 123	struct ice_pf *pf = vsi->back;
 124	struct ice_hw *hw = &pf->hw;
 125	struct ice_tx_ring *tx_ring;
 126	struct ice_rx_ring *rx_ring;
 127
 128	ice_cfg_itr(hw, q_vector);
 129
 130	ice_for_each_tx_ring(tx_ring, q_vector->tx)
 131		ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
 132				      q_vector->tx.itr_idx);
 133
 134	ice_for_each_rx_ring(rx_ring, q_vector->rx)
 135		ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
 136				      q_vector->rx.itr_idx);
 137
 138	ice_flush(hw);
 139}
 140
 141/**
 142 * ice_qvec_ena_irq - Enable IRQ for given queue vector
 143 * @vsi: the VSI that contains queue vector
 144 * @q_vector: queue vector
 145 */
 146static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
 147{
 148	struct ice_pf *pf = vsi->back;
 149	struct ice_hw *hw = &pf->hw;
 150
 151	ice_irq_dynamic_ena(hw, vsi, q_vector);
 152
 153	ice_flush(hw);
 154}
 155
 156/**
 157 * ice_qp_dis - Disables a queue pair
 158 * @vsi: VSI of interest
 159 * @q_idx: ring index in array
 160 *
 161 * Returns 0 on success, negative on failure.
 162 */
 163static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
 164{
 165	struct ice_txq_meta txq_meta = { };
 
 166	struct ice_q_vector *q_vector;
 167	struct ice_tx_ring *tx_ring;
 168	struct ice_rx_ring *rx_ring;
 169	int timeout = 50;
 170	int err;
 171
 172	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
 173		return -EINVAL;
 174
 175	tx_ring = vsi->tx_rings[q_idx];
 176	rx_ring = vsi->rx_rings[q_idx];
 177	q_vector = rx_ring->q_vector;
 178
 179	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
 180		timeout--;
 181		if (!timeout)
 182			return -EBUSY;
 183		usleep_range(1000, 2000);
 184	}
 185	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
 186
 187	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
 188
 189	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
 190	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
 191	if (err)
 192		return err;
 193	if (ice_is_xdp_ena_vsi(vsi)) {
 194		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
 195
 196		memset(&txq_meta, 0, sizeof(txq_meta));
 197		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
 198		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
 199					   &txq_meta);
 200		if (err)
 201			return err;
 202	}
 203	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
 204	if (err)
 205		return err;
 206	ice_clean_rx_ring(rx_ring);
 207
 208	ice_qvec_toggle_napi(vsi, q_vector, false);
 209	ice_qp_clean_rings(vsi, q_idx);
 210	ice_qp_reset_stats(vsi, q_idx);
 211
 212	return 0;
 213}
 214
 215/**
 216 * ice_qp_ena - Enables a queue pair
 217 * @vsi: VSI of interest
 218 * @q_idx: ring index in array
 219 *
 220 * Returns 0 on success, negative on failure.
 221 */
 222static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
 223{
 224	struct ice_aqc_add_tx_qgrp *qg_buf;
 
 225	struct ice_q_vector *q_vector;
 226	struct ice_tx_ring *tx_ring;
 227	struct ice_rx_ring *rx_ring;
 228	u16 size;
 229	int err;
 230
 231	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
 232		return -EINVAL;
 233
 234	size = struct_size(qg_buf, txqs, 1);
 235	qg_buf = kzalloc(size, GFP_KERNEL);
 236	if (!qg_buf)
 237		return -ENOMEM;
 238
 239	qg_buf->num_txqs = 1;
 240
 241	tx_ring = vsi->tx_rings[q_idx];
 242	rx_ring = vsi->rx_rings[q_idx];
 243	q_vector = rx_ring->q_vector;
 244
 245	err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
 246	if (err)
 247		goto free_buf;
 248
 249	if (ice_is_xdp_ena_vsi(vsi)) {
 250		struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
 251
 252		memset(qg_buf, 0, size);
 253		qg_buf->num_txqs = 1;
 254		err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
 255		if (err)
 256			goto free_buf;
 257		ice_set_ring_xdp(xdp_ring);
 258		ice_tx_xsk_pool(vsi, q_idx);
 259	}
 260
 261	err = ice_vsi_cfg_rxq(rx_ring);
 262	if (err)
 263		goto free_buf;
 264
 265	ice_qvec_cfg_msix(vsi, q_vector);
 266
 267	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
 268	if (err)
 269		goto free_buf;
 270
 271	clear_bit(ICE_CFG_BUSY, vsi->state);
 272	ice_qvec_toggle_napi(vsi, q_vector, true);
 273	ice_qvec_ena_irq(vsi, q_vector);
 274
 275	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
 276free_buf:
 277	kfree(qg_buf);
 278	return err;
 279}
 280
 281/**
 282 * ice_xsk_pool_disable - disable a buffer pool region
 283 * @vsi: Current VSI
 284 * @qid: queue ID
 285 *
 286 * Returns 0 on success, negative on failure
 287 */
 288static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
 289{
 290	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
 291
 292	if (!pool)
 293		return -EINVAL;
 294
 295	clear_bit(qid, vsi->af_xdp_zc_qps);
 296	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
 297
 298	return 0;
 299}
 300
 301/**
 302 * ice_xsk_pool_enable - enable a buffer pool region
 303 * @vsi: Current VSI
 304 * @pool: pointer to a requested buffer pool region
 305 * @qid: queue ID
 306 *
 307 * Returns 0 on success, negative on failure
 308 */
 309static int
 310ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
 311{
 312	int err;
 313
 314	if (vsi->type != ICE_VSI_PF)
 315		return -EINVAL;
 316
 317	if (qid >= vsi->netdev->real_num_rx_queues ||
 318	    qid >= vsi->netdev->real_num_tx_queues)
 319		return -EINVAL;
 320
 321	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
 322			       ICE_RX_DMA_ATTR);
 323	if (err)
 324		return err;
 325
 326	set_bit(qid, vsi->af_xdp_zc_qps);
 327
 328	return 0;
 329}
 330
 331/**
 332 * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
 333 * @rx_ring: Rx ring
 334 * @pool_present: is pool for XSK present
 335 *
 336 * Try allocating memory and return ENOMEM, if failed to allocate.
 337 * If allocation was successful, substitute buffer with allocated one.
 338 * Returns 0 on success, negative on failure
 339 */
 340static int
 341ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
 342{
 343	size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
 344					  sizeof(*rx_ring->rx_buf);
 345	void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
 346
 347	if (!sw_ring)
 348		return -ENOMEM;
 349
 350	if (pool_present) {
 351		kfree(rx_ring->rx_buf);
 352		rx_ring->rx_buf = NULL;
 353		rx_ring->xdp_buf = sw_ring;
 354	} else {
 355		kfree(rx_ring->xdp_buf);
 356		rx_ring->xdp_buf = NULL;
 357		rx_ring->rx_buf = sw_ring;
 358	}
 359
 360	return 0;
 361}
 362
 363/**
 364 * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
 365 * @vsi: Current VSI
 366 * @zc: is zero copy set
 367 *
 368 * Reallocate buffer for rx_rings that might be used by XSK.
 369 * XDP requires more memory, than rx_buf provides.
 370 * Returns 0 on success, negative on failure
 371 */
 372int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
 373{
 374	struct ice_rx_ring *rx_ring;
 375	unsigned long q;
 376
 377	for_each_set_bit(q, vsi->af_xdp_zc_qps,
 378			 max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
 379		rx_ring = vsi->rx_rings[q];
 380		if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
 381			return -ENOMEM;
 382	}
 383
 384	return 0;
 385}
 386
 387/**
 388 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
 389 * @vsi: Current VSI
 390 * @pool: buffer pool to enable/associate to a ring, NULL to disable
 391 * @qid: queue ID
 392 *
 393 * Returns 0 on success, negative on failure
 394 */
 395int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
 396{
 397	bool if_running, pool_present = !!pool;
 398	int ret = 0, pool_failure = 0;
 399
 400	if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
 401		netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
 402		pool_failure = -EINVAL;
 403		goto failure;
 404	}
 405
 406	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
 407
 408	if (if_running) {
 409		struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
 410
 411		ret = ice_qp_dis(vsi, qid);
 412		if (ret) {
 413			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
 414			goto xsk_pool_if_up;
 415		}
 416
 417		ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
 418		if (ret)
 419			goto xsk_pool_if_up;
 420	}
 421
 422	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
 423				      ice_xsk_pool_disable(vsi, qid);
 424
 425xsk_pool_if_up:
 426	if (if_running) {
 427		ret = ice_qp_ena(vsi, qid);
 428		if (!ret && pool_present)
 429			napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
 430		else if (ret)
 431			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
 432	}
 433
 434failure:
 435	if (pool_failure) {
 436		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
 437			   pool_present ? "en" : "dis", pool_failure);
 438		return pool_failure;
 439	}
 440
 441	return ret;
 442}
 443
 444/**
 445 * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
 446 * @pool: XSK Buffer pool to pull the buffers from
 447 * @xdp: SW ring of xdp_buff that will hold the buffers
 448 * @rx_desc: Pointer to Rx descriptors that will be filled
 449 * @count: The number of buffers to allocate
 450 *
 451 * This function allocates a number of Rx buffers from the fill ring
 452 * or the internal recycle mechanism and places them on the Rx ring.
 453 *
 454 * Note that ring wrap should be handled by caller of this function.
 455 *
 456 * Returns the amount of allocated Rx descriptors
 457 */
 458static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
 459			     union ice_32b_rx_flex_desc *rx_desc, u16 count)
 460{
 461	dma_addr_t dma;
 462	u16 buffs;
 463	int i;
 464
 465	buffs = xsk_buff_alloc_batch(pool, xdp, count);
 466	for (i = 0; i < buffs; i++) {
 467		dma = xsk_buff_xdp_get_dma(*xdp);
 468		rx_desc->read.pkt_addr = cpu_to_le64(dma);
 469		rx_desc->wb.status_error0 = 0;
 470
 471		rx_desc++;
 472		xdp++;
 473	}
 474
 475	return buffs;
 476}
 477
 478/**
 479 * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
 480 * @rx_ring: Rx ring
 481 * @count: The number of buffers to allocate
 482 *
 483 * Place the @count of descriptors onto Rx ring. Handle the ring wrap
 484 * for case where space from next_to_use up to the end of ring is less
 485 * than @count. Finally do a tail bump.
 486 *
 487 * Returns true if all allocations were successful, false if any fail.
 488 */
 489static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
 490{
 491	u32 nb_buffs_extra = 0, nb_buffs = 0;
 492	union ice_32b_rx_flex_desc *rx_desc;
 493	u16 ntu = rx_ring->next_to_use;
 494	u16 total_count = count;
 495	struct xdp_buff **xdp;
 
 
 
 
 496
 497	rx_desc = ICE_RX_DESC(rx_ring, ntu);
 498	xdp = ice_xdp_buf(rx_ring, ntu);
 499
 500	if (ntu + count >= rx_ring->count) {
 501		nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
 502						   rx_desc,
 503						   rx_ring->count - ntu);
 504		if (nb_buffs_extra != rx_ring->count - ntu) {
 505			ntu += nb_buffs_extra;
 506			goto exit;
 507		}
 508		rx_desc = ICE_RX_DESC(rx_ring, 0);
 509		xdp = ice_xdp_buf(rx_ring, 0);
 510		ntu = 0;
 511		count -= nb_buffs_extra;
 512		ice_release_rx_desc(rx_ring, 0);
 513	}
 514
 515	nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
 516
 517	ntu += nb_buffs;
 518	if (ntu == rx_ring->count)
 519		ntu = 0;
 520
 521exit:
 522	if (rx_ring->next_to_use != ntu)
 523		ice_release_rx_desc(rx_ring, ntu);
 524
 525	return total_count == (nb_buffs_extra + nb_buffs);
 526}
 
 527
 528/**
 529 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
 530 * @rx_ring: Rx ring
 531 * @count: The number of buffers to allocate
 532 *
 533 * Wrapper for internal allocation routine; figure out how many tail
 534 * bumps should take place based on the given threshold
 535 *
 536 * Returns true if all calls to internal alloc routine succeeded
 537 */
 538bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
 539{
 540	u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
 541	u16 leftover, i, tail_bumps;
 542
 543	tail_bumps = count / rx_thresh;
 544	leftover = count - (tail_bumps * rx_thresh);
 
 
 
 545
 546	for (i = 0; i < tail_bumps; i++)
 547		if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
 548			return false;
 549	return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
 550}
 551
 552/**
 553 * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
 554 * @rx_ring: Rx ring
 555 */
 556static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
 557{
 558	int ntc = rx_ring->next_to_clean + 1;
 559
 560	ntc = (ntc < rx_ring->count) ? ntc : 0;
 561	rx_ring->next_to_clean = ntc;
 562	prefetch(ICE_RX_DESC(rx_ring, ntc));
 563}
 564
 565/**
 566 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
 567 * @rx_ring: Rx ring
 568 * @xdp: Pointer to XDP buffer
 569 *
 570 * This function allocates a new skb from a zero-copy Rx buffer.
 571 *
 572 * Returns the skb on success, NULL on failure.
 573 */
 574static struct sk_buff *
 575ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
 576{
 577	unsigned int totalsize = xdp->data_end - xdp->data_meta;
 578	unsigned int metasize = xdp->data - xdp->data_meta;
 
 
 579	struct sk_buff *skb;
 580
 581	net_prefetch(xdp->data_meta);
 582
 583	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
 584			       GFP_ATOMIC | __GFP_NOWARN);
 585	if (unlikely(!skb))
 586		return NULL;
 587
 588	memcpy(__skb_put(skb, totalsize), xdp->data_meta,
 589	       ALIGN(totalsize, sizeof(long)));
 590
 591	if (metasize) {
 592		skb_metadata_set(skb, metasize);
 593		__skb_pull(skb, metasize);
 594	}
 595
 596	xsk_buff_free(xdp);
 
 597	return skb;
 598}
 599
 600/**
 601 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
 602 * @rx_ring: Rx ring
 603 * @xdp: xdp_buff used as input to the XDP program
 604 * @xdp_prog: XDP program to run
 605 * @xdp_ring: ring to be used for XDP_TX action
 606 *
 607 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 608 */
 609static int
 610ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
 611	       struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
 612{
 613	int err, result = ICE_XDP_PASS;
 
 
 614	u32 act;
 615
 
 
 
 
 
 616	act = bpf_prog_run_xdp(xdp_prog, xdp);
 617
 618	if (likely(act == XDP_REDIRECT)) {
 619		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
 620		if (!err)
 621			return ICE_XDP_REDIR;
 622		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
 623			result = ICE_XDP_EXIT;
 624		else
 625			result = ICE_XDP_CONSUMED;
 626		goto out_failure;
 627	}
 628
 629	switch (act) {
 630	case XDP_PASS:
 631		break;
 632	case XDP_TX:
 
 633		result = ice_xmit_xdp_buff(xdp, xdp_ring);
 634		if (result == ICE_XDP_CONSUMED)
 635			goto out_failure;
 636		break;
 637	case XDP_DROP:
 638		result = ICE_XDP_CONSUMED;
 639		break;
 640	default:
 641		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
 642		fallthrough;
 643	case XDP_ABORTED:
 644		result = ICE_XDP_CONSUMED;
 645out_failure:
 646		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 
 
 
 647		break;
 648	}
 649
 650	return result;
 651}
 652
 653/**
 654 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
 655 * @rx_ring: AF_XDP Rx ring
 656 * @budget: NAPI budget
 657 *
 658 * Returns number of processed packets on success, remaining budget on failure.
 659 */
 660int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 661{
 662	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 663	struct ice_tx_ring *xdp_ring;
 664	unsigned int xdp_xmit = 0;
 665	struct bpf_prog *xdp_prog;
 666	bool failure = false;
 667	int entries_to_alloc;
 668
 669	/* ZC patch is enabled only when XDP program is set,
 670	 * so here it can not be NULL
 671	 */
 672	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
 673	xdp_ring = rx_ring->xdp_ring;
 674
 675	while (likely(total_rx_packets < (unsigned int)budget)) {
 676		union ice_32b_rx_flex_desc *rx_desc;
 677		unsigned int size, xdp_res = 0;
 678		struct xdp_buff *xdp;
 679		struct sk_buff *skb;
 680		u16 stat_err_bits;
 681		u16 vlan_tag = 0;
 682		u16 rx_ptype;
 683
 684		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 685
 686		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
 687		if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
 688			break;
 689
 690		/* This memory barrier is needed to keep us from reading
 691		 * any other fields out of the rx_desc until we have
 692		 * verified the descriptor has been written back.
 693		 */
 694		dma_rmb();
 695
 696		if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
 697			break;
 698
 699		xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
 700
 701		size = le16_to_cpu(rx_desc->wb.pkt_len) &
 702				   ICE_RX_FLX_DESC_PKT_LEN_M;
 703		if (!size) {
 704			xdp->data = NULL;
 705			xdp->data_end = NULL;
 706			xdp->data_hard_start = NULL;
 707			xdp->data_meta = NULL;
 708			goto construct_skb;
 709		}
 710
 711		xsk_buff_set_size(xdp, size);
 712		xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
 713
 714		xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
 715		if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
 716			xdp_xmit |= xdp_res;
 717		} else if (xdp_res == ICE_XDP_EXIT) {
 718			failure = true;
 719			break;
 720		} else if (xdp_res == ICE_XDP_CONSUMED) {
 721			xsk_buff_free(xdp);
 722		} else if (xdp_res == ICE_XDP_PASS) {
 723			goto construct_skb;
 724		}
 725
 726		total_rx_bytes += size;
 727		total_rx_packets++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 728
 729		ice_bump_ntc(rx_ring);
 730		continue;
 
 731
 732construct_skb:
 733		/* XDP_PASS path */
 734		skb = ice_construct_skb_zc(rx_ring, xdp);
 735		if (!skb) {
 736			rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
 737			break;
 738		}
 739
 
 740		ice_bump_ntc(rx_ring);
 741
 742		if (eth_skb_pad(skb)) {
 743			skb = NULL;
 744			continue;
 745		}
 746
 747		total_rx_bytes += skb->len;
 748		total_rx_packets++;
 749
 750		vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
 
 
 751
 752		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
 753				       ICE_RX_FLEX_DESC_PTYPE_M;
 754
 755		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
 756		ice_receive_skb(rx_ring, skb, vlan_tag);
 757	}
 758
 759	entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
 760	if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
 761		failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
 762
 763	ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
 764	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
 765
 766	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
 767		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
 768			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
 769		else
 770			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
 771
 772		return (int)total_rx_packets;
 773	}
 774
 775	return failure ? budget : (int)total_rx_packets;
 776}
 777
 778/**
 779 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
 780 * @xdp_ring: XDP Tx ring
 781 * @tx_buf: Tx buffer to clean
 782 */
 783static void
 784ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
 785{
 786	page_frag_free(tx_buf->raw_buf);
 787	xdp_ring->xdp_tx_active--;
 788	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
 789			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
 790	dma_unmap_len_set(tx_buf, len, 0);
 791}
 792
 793/**
 794 * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
 795 * @xdp_ring: XDP Tx ring
 
 
 
 796 */
 797static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
 798{
 799	u16 ntc = xdp_ring->next_to_clean;
 800	struct ice_tx_desc *tx_desc;
 801	u16 cnt = xdp_ring->count;
 802	struct ice_tx_buf *tx_buf;
 803	u16 completed_frames = 0;
 804	u16 xsk_frames = 0;
 805	u16 last_rs;
 806	int i;
 807
 808	last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
 809	tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
 810	if ((tx_desc->cmd_type_offset_bsz &
 811	    cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
 812		if (last_rs >= ntc)
 813			completed_frames = last_rs - ntc + 1;
 814		else
 815			completed_frames = last_rs + cnt - ntc + 1;
 816	}
 817
 818	if (!completed_frames)
 819		return;
 820
 821	if (likely(!xdp_ring->xdp_tx_active)) {
 822		xsk_frames = completed_frames;
 823		goto skip;
 824	}
 825
 826	ntc = xdp_ring->next_to_clean;
 827	for (i = 0; i < completed_frames; i++) {
 828		tx_buf = &xdp_ring->tx_buf[ntc];
 829
 830		if (tx_buf->raw_buf) {
 831			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
 832			tx_buf->raw_buf = NULL;
 833		} else {
 834			xsk_frames++;
 835		}
 836
 837		ntc++;
 838		if (ntc >= xdp_ring->count)
 839			ntc = 0;
 840	}
 841skip:
 842	tx_desc->cmd_type_offset_bsz = 0;
 843	xdp_ring->next_to_clean += completed_frames;
 844	if (xdp_ring->next_to_clean >= cnt)
 845		xdp_ring->next_to_clean -= cnt;
 846	if (xsk_frames)
 847		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
 848}
 849
 850/**
 851 * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
 852 * @xdp_ring: XDP ring to produce the HW Tx descriptor on
 853 * @desc: AF_XDP descriptor to pull the DMA address and length from
 854 * @total_bytes: bytes accumulator that will be used for stats update
 855 */
 856static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
 857			 unsigned int *total_bytes)
 858{
 859	struct ice_tx_desc *tx_desc;
 
 
 860	dma_addr_t dma;
 861
 862	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
 863	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
 864
 865	tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
 866	tx_desc->buf_addr = cpu_to_le64(dma);
 867	tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
 868						      0, desc->len, 0);
 
 869
 870	*total_bytes += desc->len;
 871}
 872
 873/**
 874 * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
 875 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 876 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
 877 * @total_bytes: bytes accumulator that will be used for stats update
 878 */
 879static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
 880			       unsigned int *total_bytes)
 881{
 882	u16 ntu = xdp_ring->next_to_use;
 883	struct ice_tx_desc *tx_desc;
 884	u32 i;
 885
 886	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
 887		dma_addr_t dma;
 
 888
 889		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
 890		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
 891
 892		tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
 893		tx_desc->buf_addr = cpu_to_le64(dma);
 894		tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
 895							      0, descs[i].len, 0);
 
 
 
 
 
 896
 897		*total_bytes += descs[i].len;
 
 
 898	}
 899
 900	xdp_ring->next_to_use = ntu;
 901}
 902
 903/**
 904 * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
 905 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 906 * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
 907 * @nb_pkts: count of packets to be send
 908 * @total_bytes: bytes accumulator that will be used for stats update
 909 */
 910static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
 911				u32 nb_pkts, unsigned int *total_bytes)
 912{
 913	u32 batched, leftover, i;
 914
 915	batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
 916	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
 917	for (i = 0; i < batched; i += PKTS_PER_BATCH)
 918		ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
 919	for (; i < batched + leftover; i++)
 920		ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
 921}
 922
 923/**
 924 * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
 925 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 
 
 
 926 */
 927static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
 928{
 929	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
 
 930	struct ice_tx_desc *tx_desc;
 
 
 
 931
 932	tx_desc = ICE_TX_DESC(xdp_ring, ntu);
 933	tx_desc->cmd_type_offset_bsz |=
 934		cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
 935}
 
 
 
 
 936
 937/**
 938 * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
 939 * @xdp_ring: XDP ring to produce the HW Tx descriptors on
 940 *
 941 * Returns true if there is no more work that needs to be done, false otherwise
 942 */
 943bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
 944{
 945	struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
 946	u32 nb_pkts, nb_processed = 0;
 947	unsigned int total_bytes = 0;
 948	int budget;
 949
 950	ice_clean_xdp_irq_zc(xdp_ring);
 
 
 
 
 
 951
 952	budget = ICE_DESC_UNUSED(xdp_ring);
 953	budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
 
 
 954
 955	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
 956	if (!nb_pkts)
 957		return true;
 
 
 958
 959	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
 960		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
 961		ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
 962		xdp_ring->next_to_use = 0;
 963	}
 964
 965	ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
 966			    &total_bytes);
 967
 968	ice_set_rs_bit(xdp_ring);
 969	ice_xdp_ring_update_tail(xdp_ring);
 970	ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
 
 
 971
 972	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
 973		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
 974
 975	return nb_pkts < budget;
 
 
 
 976}
 977
 978/**
 979 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
 980 * @netdev: net_device
 981 * @queue_id: queue to wake up
 982 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
 983 *
 984 * Returns negative on error, zero otherwise.
 985 */
 986int
 987ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
 988	       u32 __always_unused flags)
 989{
 990	struct ice_netdev_priv *np = netdev_priv(netdev);
 991	struct ice_q_vector *q_vector;
 992	struct ice_vsi *vsi = np->vsi;
 993	struct ice_tx_ring *ring;
 994
 995	if (test_bit(ICE_VSI_DOWN, vsi->state))
 996		return -ENETDOWN;
 997
 998	if (!ice_is_xdp_ena_vsi(vsi))
 999		return -EINVAL;
1000
1001	if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
1002		return -EINVAL;
1003
1004	ring = vsi->rx_rings[queue_id]->xdp_ring;
 
1005
1006	if (!ring->xsk_pool)
1007		return -EINVAL;
1008
1009	/* The idea here is that if NAPI is running, mark a miss, so
1010	 * it will run again. If not, trigger an interrupt and
1011	 * schedule the NAPI from interrupt context. If NAPI would be
1012	 * scheduled here, the interrupt affinity would not be
1013	 * honored.
1014	 */
1015	q_vector = ring->q_vector;
1016	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1017		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1018
1019	return 0;
1020}
1021
1022/**
1023 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
1024 * @vsi: VSI to be checked
1025 *
1026 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
1027 */
1028bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1029{
1030	int i;
1031
1032	ice_for_each_rxq(vsi, i) {
1033		if (xsk_get_pool_from_qid(vsi->netdev, i))
1034			return true;
1035	}
1036
1037	return false;
1038}
1039
1040/**
1041 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
1042 * @rx_ring: ring to be cleaned
1043 */
1044void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
1045{
1046	u16 ntc = rx_ring->next_to_clean;
1047	u16 ntu = rx_ring->next_to_use;
1048
1049	while (ntc != ntu) {
1050		struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
1051
1052		xsk_buff_free(xdp);
1053		ntc++;
1054		if (ntc >= rx_ring->count)
1055			ntc = 0;
1056	}
1057}
1058
1059/**
1060 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
1061 * @xdp_ring: XDP_Tx ring
1062 */
1063void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
1064{
1065	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1066	u32 xsk_frames = 0;
1067
1068	while (ntc != ntu) {
1069		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1070
1071		if (tx_buf->raw_buf)
1072			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
1073		else
1074			xsk_frames++;
1075
1076		tx_buf->raw_buf = NULL;
1077
1078		ntc++;
1079		if (ntc >= xdp_ring->count)
1080			ntc = 0;
1081	}
1082
1083	if (xsk_frames)
1084		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
1085}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2019, Intel Corporation. */
  3
  4#include <linux/bpf_trace.h>
  5#include <net/xdp_sock_drv.h>
  6#include <net/xdp.h>
  7#include "ice.h"
  8#include "ice_base.h"
  9#include "ice_type.h"
 10#include "ice_xsk.h"
 11#include "ice_txrx.h"
 12#include "ice_txrx_lib.h"
 13#include "ice_lib.h"
 14
 
 
 
 
 
 15/**
 16 * ice_qp_reset_stats - Resets all stats for rings of given index
 17 * @vsi: VSI that contains rings of interest
 18 * @q_idx: ring index in array
 19 */
 20static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
 21{
 22	memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
 23	       sizeof(vsi->rx_rings[q_idx]->rx_stats));
 24	memset(&vsi->tx_rings[q_idx]->stats, 0,
 25	       sizeof(vsi->tx_rings[q_idx]->stats));
 
 
 
 
 
 
 
 
 
 
 
 26	if (ice_is_xdp_ena_vsi(vsi))
 27		memset(&vsi->xdp_rings[q_idx]->stats, 0,
 28		       sizeof(vsi->xdp_rings[q_idx]->stats));
 29}
 30
 31/**
 32 * ice_qp_clean_rings - Cleans all the rings of a given index
 33 * @vsi: VSI that contains rings of interest
 34 * @q_idx: ring index in array
 35 */
 36static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
 37{
 38	ice_clean_tx_ring(vsi->tx_rings[q_idx]);
 39	if (ice_is_xdp_ena_vsi(vsi))
 
 40		ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
 
 41	ice_clean_rx_ring(vsi->rx_rings[q_idx]);
 42}
 43
 44/**
 45 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
 46 * @vsi: VSI that has netdev
 47 * @q_vector: q_vector that has NAPI context
 48 * @enable: true for enable, false for disable
 49 */
 50static void
 51ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
 52		     bool enable)
 53{
 54	if (!vsi->netdev || !q_vector)
 55		return;
 56
 57	if (enable)
 58		napi_enable(&q_vector->napi);
 59	else
 60		napi_disable(&q_vector->napi);
 61}
 62
 63/**
 64 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
 65 * @vsi: the VSI that contains queue vector being un-configured
 66 * @rx_ring: Rx ring that will have its IRQ disabled
 67 * @q_vector: queue vector
 68 */
 69static void
 70ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
 71		 struct ice_q_vector *q_vector)
 72{
 73	struct ice_pf *pf = vsi->back;
 74	struct ice_hw *hw = &pf->hw;
 75	int base = vsi->base_vector;
 76	u16 reg;
 77	u32 val;
 78
 79	/* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
 80	 * here only QINT_RQCTL
 81	 */
 82	reg = rx_ring->reg_idx;
 83	val = rd32(hw, QINT_RQCTL(reg));
 84	val &= ~QINT_RQCTL_CAUSE_ENA_M;
 85	wr32(hw, QINT_RQCTL(reg), val);
 86
 87	if (q_vector) {
 88		u16 v_idx = q_vector->v_idx;
 89
 90		wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
 91		ice_flush(hw);
 92		synchronize_irq(pf->msix_entries[v_idx + base].vector);
 93	}
 94}
 95
 96/**
 97 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
 98 * @vsi: the VSI that contains queue vector
 99 * @q_vector: queue vector
100 */
101static void
102ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
103{
104	u16 reg_idx = q_vector->reg_idx;
105	struct ice_pf *pf = vsi->back;
106	struct ice_hw *hw = &pf->hw;
107	struct ice_ring *ring;
 
108
109	ice_cfg_itr(hw, q_vector);
110
111	ice_for_each_ring(ring, q_vector->tx)
112		ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
113				      q_vector->tx.itr_idx);
114
115	ice_for_each_ring(ring, q_vector->rx)
116		ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
117				      q_vector->rx.itr_idx);
118
119	ice_flush(hw);
120}
121
122/**
123 * ice_qvec_ena_irq - Enable IRQ for given queue vector
124 * @vsi: the VSI that contains queue vector
125 * @q_vector: queue vector
126 */
127static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
128{
129	struct ice_pf *pf = vsi->back;
130	struct ice_hw *hw = &pf->hw;
131
132	ice_irq_dynamic_ena(hw, vsi, q_vector);
133
134	ice_flush(hw);
135}
136
137/**
138 * ice_qp_dis - Disables a queue pair
139 * @vsi: VSI of interest
140 * @q_idx: ring index in array
141 *
142 * Returns 0 on success, negative on failure.
143 */
144static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
145{
146	struct ice_txq_meta txq_meta = { };
147	struct ice_ring *tx_ring, *rx_ring;
148	struct ice_q_vector *q_vector;
 
 
149	int timeout = 50;
150	int err;
151
152	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
153		return -EINVAL;
154
155	tx_ring = vsi->tx_rings[q_idx];
156	rx_ring = vsi->rx_rings[q_idx];
157	q_vector = rx_ring->q_vector;
158
159	while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
160		timeout--;
161		if (!timeout)
162			return -EBUSY;
163		usleep_range(1000, 2000);
164	}
165	netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
166
167	ice_qvec_dis_irq(vsi, rx_ring, q_vector);
168
169	ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
170	err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
171	if (err)
172		return err;
173	if (ice_is_xdp_ena_vsi(vsi)) {
174		struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
175
176		memset(&txq_meta, 0, sizeof(txq_meta));
177		ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
178		err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
179					   &txq_meta);
180		if (err)
181			return err;
182	}
183	err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
184	if (err)
185		return err;
 
186
187	ice_qvec_toggle_napi(vsi, q_vector, false);
188	ice_qp_clean_rings(vsi, q_idx);
189	ice_qp_reset_stats(vsi, q_idx);
190
191	return 0;
192}
193
194/**
195 * ice_qp_ena - Enables a queue pair
196 * @vsi: VSI of interest
197 * @q_idx: ring index in array
198 *
199 * Returns 0 on success, negative on failure.
200 */
201static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
202{
203	struct ice_aqc_add_tx_qgrp *qg_buf;
204	struct ice_ring *tx_ring, *rx_ring;
205	struct ice_q_vector *q_vector;
 
 
206	u16 size;
207	int err;
208
209	if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
210		return -EINVAL;
211
212	size = struct_size(qg_buf, txqs, 1);
213	qg_buf = kzalloc(size, GFP_KERNEL);
214	if (!qg_buf)
215		return -ENOMEM;
216
217	qg_buf->num_txqs = 1;
218
219	tx_ring = vsi->tx_rings[q_idx];
220	rx_ring = vsi->rx_rings[q_idx];
221	q_vector = rx_ring->q_vector;
222
223	err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
224	if (err)
225		goto free_buf;
226
227	if (ice_is_xdp_ena_vsi(vsi)) {
228		struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
229
230		memset(qg_buf, 0, size);
231		qg_buf->num_txqs = 1;
232		err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
233		if (err)
234			goto free_buf;
235		ice_set_ring_xdp(xdp_ring);
236		xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
237	}
238
239	err = ice_vsi_cfg_rxq(rx_ring);
240	if (err)
241		goto free_buf;
242
243	ice_qvec_cfg_msix(vsi, q_vector);
244
245	err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
246	if (err)
247		goto free_buf;
248
249	clear_bit(ICE_CFG_BUSY, vsi->state);
250	ice_qvec_toggle_napi(vsi, q_vector, true);
251	ice_qvec_ena_irq(vsi, q_vector);
252
253	netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
254free_buf:
255	kfree(qg_buf);
256	return err;
257}
258
259/**
260 * ice_xsk_pool_disable - disable a buffer pool region
261 * @vsi: Current VSI
262 * @qid: queue ID
263 *
264 * Returns 0 on success, negative on failure
265 */
266static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
267{
268	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
269
270	if (!pool)
271		return -EINVAL;
272
273	clear_bit(qid, vsi->af_xdp_zc_qps);
274	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
275
276	return 0;
277}
278
279/**
280 * ice_xsk_pool_enable - enable a buffer pool region
281 * @vsi: Current VSI
282 * @pool: pointer to a requested buffer pool region
283 * @qid: queue ID
284 *
285 * Returns 0 on success, negative on failure
286 */
287static int
288ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
289{
290	int err;
291
292	if (vsi->type != ICE_VSI_PF)
293		return -EINVAL;
294
295	if (qid >= vsi->netdev->real_num_rx_queues ||
296	    qid >= vsi->netdev->real_num_tx_queues)
297		return -EINVAL;
298
299	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
300			       ICE_RX_DMA_ATTR);
301	if (err)
302		return err;
303
304	set_bit(qid, vsi->af_xdp_zc_qps);
305
306	return 0;
307}
308
309/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
311 * @vsi: Current VSI
312 * @pool: buffer pool to enable/associate to a ring, NULL to disable
313 * @qid: queue ID
314 *
315 * Returns 0 on success, negative on failure
316 */
317int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
318{
319	bool if_running, pool_present = !!pool;
320	int ret = 0, pool_failure = 0;
321
 
 
 
 
 
 
322	if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
323
324	if (if_running) {
 
 
325		ret = ice_qp_dis(vsi, qid);
326		if (ret) {
327			netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
328			goto xsk_pool_if_up;
329		}
 
 
 
 
330	}
331
332	pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
333				      ice_xsk_pool_disable(vsi, qid);
334
335xsk_pool_if_up:
336	if (if_running) {
337		ret = ice_qp_ena(vsi, qid);
338		if (!ret && pool_present)
339			napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
340		else if (ret)
341			netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
342	}
343
 
344	if (pool_failure) {
345		netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
346			   pool_present ? "en" : "dis", pool_failure);
347		return pool_failure;
348	}
349
350	return ret;
351}
352
353/**
354 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
355 * @rx_ring: Rx ring
 
 
356 * @count: The number of buffers to allocate
357 *
358 * This function allocates a number of Rx buffers from the fill ring
359 * or the internal recycle mechanism and places them on the Rx ring.
360 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361 * Returns true if all allocations were successful, false if any fail.
362 */
363bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
364{
 
365	union ice_32b_rx_flex_desc *rx_desc;
366	u16 ntu = rx_ring->next_to_use;
367	struct ice_rx_buf *rx_buf;
368	bool ok = true;
369	dma_addr_t dma;
370
371	if (!count)
372		return true;
373
374	rx_desc = ICE_RX_DESC(rx_ring, ntu);
375	rx_buf = &rx_ring->rx_buf[ntu];
376
377	do {
378		rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
379		if (!rx_buf->xdp) {
380			ok = false;
381			break;
 
 
382		}
 
 
 
 
 
 
 
 
383
384		dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
385		rx_desc->read.pkt_addr = cpu_to_le64(dma);
386		rx_desc->wb.status_error0 = 0;
 
 
 
 
387
388		rx_desc++;
389		rx_buf++;
390		ntu++;
391
392		if (unlikely(ntu == rx_ring->count)) {
393			rx_desc = ICE_RX_DESC(rx_ring, 0);
394			rx_buf = rx_ring->rx_buf;
395			ntu = 0;
396		}
397	} while (--count);
 
 
 
 
 
 
 
 
398
399	if (rx_ring->next_to_use != ntu) {
400		/* clear the status bits for the next_to_use descriptor */
401		rx_desc->wb.status_error0 = 0;
402		ice_release_rx_desc(rx_ring, ntu);
403	}
404
405	return ok;
 
 
 
406}
407
408/**
409 * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
410 * @rx_ring: Rx ring
411 */
412static void ice_bump_ntc(struct ice_ring *rx_ring)
413{
414	int ntc = rx_ring->next_to_clean + 1;
415
416	ntc = (ntc < rx_ring->count) ? ntc : 0;
417	rx_ring->next_to_clean = ntc;
418	prefetch(ICE_RX_DESC(rx_ring, ntc));
419}
420
421/**
422 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
423 * @rx_ring: Rx ring
424 * @rx_buf: zero-copy Rx buffer
425 *
426 * This function allocates a new skb from a zero-copy Rx buffer.
427 *
428 * Returns the skb on success, NULL on failure.
429 */
430static struct sk_buff *
431ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
432{
433	unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
434	unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
435	unsigned int datasize_hard = rx_buf->xdp->data_end -
436				     rx_buf->xdp->data_hard_start;
437	struct sk_buff *skb;
438
439	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
 
 
440			       GFP_ATOMIC | __GFP_NOWARN);
441	if (unlikely(!skb))
442		return NULL;
443
444	skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
445	memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
446	if (metasize)
 
447		skb_metadata_set(skb, metasize);
 
 
448
449	xsk_buff_free(rx_buf->xdp);
450	rx_buf->xdp = NULL;
451	return skb;
452}
453
454/**
455 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
456 * @rx_ring: Rx ring
457 * @xdp: xdp_buff used as input to the XDP program
 
 
458 *
459 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
460 */
461static int
462ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
 
463{
464	int err, result = ICE_XDP_PASS;
465	struct bpf_prog *xdp_prog;
466	struct ice_ring *xdp_ring;
467	u32 act;
468
469	/* ZC patch is enabled only when XDP program is set,
470	 * so here it can not be NULL
471	 */
472	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
473
474	act = bpf_prog_run_xdp(xdp_prog, xdp);
475
476	if (likely(act == XDP_REDIRECT)) {
477		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
478		if (err)
479			goto out_failure;
480		return ICE_XDP_REDIR;
 
 
 
 
481	}
482
483	switch (act) {
484	case XDP_PASS:
485		break;
486	case XDP_TX:
487		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
488		result = ice_xmit_xdp_buff(xdp, xdp_ring);
489		if (result == ICE_XDP_CONSUMED)
490			goto out_failure;
491		break;
 
 
 
492	default:
493		bpf_warn_invalid_xdp_action(act);
494		fallthrough;
495	case XDP_ABORTED:
 
496out_failure:
497		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
498		fallthrough;
499	case XDP_DROP:
500		result = ICE_XDP_CONSUMED;
501		break;
502	}
503
504	return result;
505}
506
507/**
508 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
509 * @rx_ring: AF_XDP Rx ring
510 * @budget: NAPI budget
511 *
512 * Returns number of processed packets on success, remaining budget on failure.
513 */
514int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
515{
516	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
517	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
518	unsigned int xdp_xmit = 0;
 
519	bool failure = false;
 
 
 
 
 
 
 
520
521	while (likely(total_rx_packets < (unsigned int)budget)) {
522		union ice_32b_rx_flex_desc *rx_desc;
523		unsigned int size, xdp_res = 0;
524		struct ice_rx_buf *rx_buf;
525		struct sk_buff *skb;
526		u16 stat_err_bits;
527		u16 vlan_tag = 0;
528		u16 rx_ptype;
529
530		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
531
532		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
533		if (!ice_test_staterr(rx_desc, stat_err_bits))
534			break;
535
536		/* This memory barrier is needed to keep us from reading
537		 * any other fields out of the rx_desc until we have
538		 * verified the descriptor has been written back.
539		 */
540		dma_rmb();
541
 
 
 
 
 
542		size = le16_to_cpu(rx_desc->wb.pkt_len) &
543				   ICE_RX_FLX_DESC_PKT_LEN_M;
544		if (!size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545			break;
 
 
 
 
 
546
547		rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
548		rx_buf->xdp->data_end = rx_buf->xdp->data + size;
549		xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
550
551		xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
552		if (xdp_res) {
553			if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
554				xdp_xmit |= xdp_res;
555			else
556				xsk_buff_free(rx_buf->xdp);
557
558			rx_buf->xdp = NULL;
559			total_rx_bytes += size;
560			total_rx_packets++;
561			cleaned_count++;
562
563			ice_bump_ntc(rx_ring);
564			continue;
565		}
566
 
567		/* XDP_PASS path */
568		skb = ice_construct_skb_zc(rx_ring, rx_buf);
569		if (!skb) {
570			rx_ring->rx_stats.alloc_buf_failed++;
571			break;
572		}
573
574		cleaned_count++;
575		ice_bump_ntc(rx_ring);
576
577		if (eth_skb_pad(skb)) {
578			skb = NULL;
579			continue;
580		}
581
582		total_rx_bytes += skb->len;
583		total_rx_packets++;
584
585		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
586		if (ice_test_staterr(rx_desc, stat_err_bits))
587			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
588
589		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
590				       ICE_RX_FLEX_DESC_PTYPE_M;
591
592		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
593		ice_receive_skb(rx_ring, skb, vlan_tag);
594	}
595
596	if (cleaned_count >= ICE_RX_BUF_WRITE)
597		failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
 
598
599	ice_finalize_xdp_rx(rx_ring, xdp_xmit);
600	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
601
602	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
603		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
604			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
605		else
606			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
607
608		return (int)total_rx_packets;
609	}
610
611	return failure ? budget : (int)total_rx_packets;
612}
613
614/**
615 * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616 * @xdp_ring: XDP Tx ring
617 * @budget: max number of frames to xmit
618 *
619 * Returns true if cleanup/transmission is done.
620 */
621static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
622{
623	struct ice_tx_desc *tx_desc = NULL;
624	bool work_done = true;
625	struct xdp_desc desc;
626	dma_addr_t dma;
627
628	while (likely(budget-- > 0)) {
629		struct ice_tx_buf *tx_buf;
630
631		if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
632			xdp_ring->tx_stats.tx_busy++;
633			work_done = false;
634			break;
635		}
636
637		tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
 
638
639		if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
640			break;
 
 
 
 
 
 
 
 
 
 
641
642		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
643		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
644						 desc.len);
645
646		tx_buf->bytecount = desc.len;
 
647
648		tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
649		tx_desc->buf_addr = cpu_to_le64(dma);
650		tx_desc->cmd_type_offset_bsz =
651			ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
652
653		xdp_ring->next_to_use++;
654		if (xdp_ring->next_to_use == xdp_ring->count)
655			xdp_ring->next_to_use = 0;
656	}
657
658	if (tx_desc) {
659		ice_xdp_ring_update_tail(xdp_ring);
660		xsk_tx_release(xdp_ring->xsk_pool);
661	}
662
663	return budget > 0 && work_done;
664}
665
666/**
667 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
668 * @xdp_ring: XDP Tx ring
669 * @tx_buf: Tx buffer to clean
 
 
670 */
671static void
672ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
673{
674	xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
675	dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
676			 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
677	dma_unmap_len_set(tx_buf, len, 0);
 
 
 
 
678}
679
680/**
681 * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
682 * @xdp_ring: XDP Tx ring
683 * @budget: NAPI budget
684 *
685 * Returns true if cleanup/tranmission is done.
686 */
687bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
688{
689	int total_packets = 0, total_bytes = 0;
690	s16 ntc = xdp_ring->next_to_clean;
691	struct ice_tx_desc *tx_desc;
692	struct ice_tx_buf *tx_buf;
693	u32 xsk_frames = 0;
694	bool xmit_done;
695
696	tx_desc = ICE_TX_DESC(xdp_ring, ntc);
697	tx_buf = &xdp_ring->tx_buf[ntc];
698	ntc -= xdp_ring->count;
699
700	do {
701		if (!(tx_desc->cmd_type_offset_bsz &
702		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
703			break;
704
705		total_bytes += tx_buf->bytecount;
706		total_packets++;
 
 
 
 
 
 
 
 
 
 
707
708		if (tx_buf->raw_buf) {
709			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
710			tx_buf->raw_buf = NULL;
711		} else {
712			xsk_frames++;
713		}
714
715		tx_desc->cmd_type_offset_bsz = 0;
716		tx_buf++;
717		tx_desc++;
718		ntc++;
719
720		if (unlikely(!ntc)) {
721			ntc -= xdp_ring->count;
722			tx_buf = xdp_ring->tx_buf;
723			tx_desc = ICE_TX_DESC(xdp_ring, 0);
724		}
725
726		prefetch(tx_desc);
 
 
 
 
727
728	} while (likely(--budget));
 
729
730	ntc += xdp_ring->count;
731	xdp_ring->next_to_clean = ntc;
732
733	if (xsk_frames)
734		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
735
736	if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
737		xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
738
739	ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
740	xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
741
742	return budget > 0 && xmit_done;
743}
744
745/**
746 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
747 * @netdev: net_device
748 * @queue_id: queue to wake up
749 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
750 *
751 * Returns negative on error, zero otherwise.
752 */
753int
754ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
755	       u32 __always_unused flags)
756{
757	struct ice_netdev_priv *np = netdev_priv(netdev);
758	struct ice_q_vector *q_vector;
759	struct ice_vsi *vsi = np->vsi;
760	struct ice_ring *ring;
761
762	if (test_bit(ICE_DOWN, vsi->state))
763		return -ENETDOWN;
764
765	if (!ice_is_xdp_ena_vsi(vsi))
766		return -ENXIO;
767
768	if (queue_id >= vsi->num_txq)
769		return -ENXIO;
770
771	if (!vsi->xdp_rings[queue_id]->xsk_pool)
772		return -ENXIO;
773
774	ring = vsi->xdp_rings[queue_id];
 
775
776	/* The idea here is that if NAPI is running, mark a miss, so
777	 * it will run again. If not, trigger an interrupt and
778	 * schedule the NAPI from interrupt context. If NAPI would be
779	 * scheduled here, the interrupt affinity would not be
780	 * honored.
781	 */
782	q_vector = ring->q_vector;
783	if (!napi_if_scheduled_mark_missed(&q_vector->napi))
784		ice_trigger_sw_intr(&vsi->back->hw, q_vector);
785
786	return 0;
787}
788
789/**
790 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
791 * @vsi: VSI to be checked
792 *
793 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
794 */
795bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
796{
797	int i;
798
799	ice_for_each_rxq(vsi, i) {
800		if (xsk_get_pool_from_qid(vsi->netdev, i))
801			return true;
802	}
803
804	return false;
805}
806
807/**
808 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
809 * @rx_ring: ring to be cleaned
810 */
811void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
812{
813	u16 i;
 
814
815	for (i = 0; i < rx_ring->count; i++) {
816		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
817
818		if (!rx_buf->xdp)
819			continue;
820
821		rx_buf->xdp = NULL;
822	}
823}
824
825/**
826 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
827 * @xdp_ring: XDP_Tx ring
828 */
829void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
830{
831	u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
832	u32 xsk_frames = 0;
833
834	while (ntc != ntu) {
835		struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
836
837		if (tx_buf->raw_buf)
838			ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
839		else
840			xsk_frames++;
841
842		tx_buf->raw_buf = NULL;
843
844		ntc++;
845		if (ntc >= xdp_ring->count)
846			ntc = 0;
847	}
848
849	if (xsk_frames)
850		xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
851}