Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2023 Intel Corporation */
   3
   4#include <net/libeth/rx.h>
   5#include <net/libeth/tx.h>
   6
   7#include "idpf.h"
   8#include "idpf_virtchnl.h"
   9
  10struct idpf_tx_stash {
  11	struct hlist_node hlist;
  12	struct libeth_sqe buf;
  13};
  14
  15#define idpf_tx_buf_compl_tag(buf)	(*(u32 *)&(buf)->priv)
  16LIBETH_SQE_CHECK_PRIV(u32);
  17
  18static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
  19			       unsigned int count);
  20
  21/**
  22 * idpf_buf_lifo_push - push a buffer pointer onto stack
  23 * @stack: pointer to stack struct
  24 * @buf: pointer to buf to push
  25 *
  26 * Returns 0 on success, negative on failure
  27 **/
  28static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
  29			      struct idpf_tx_stash *buf)
  30{
  31	if (unlikely(stack->top == stack->size))
  32		return -ENOSPC;
  33
  34	stack->bufs[stack->top++] = buf;
  35
  36	return 0;
  37}
  38
  39/**
  40 * idpf_buf_lifo_pop - pop a buffer pointer from stack
  41 * @stack: pointer to stack struct
  42 **/
  43static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
  44{
  45	if (unlikely(!stack->top))
  46		return NULL;
  47
  48	return stack->bufs[--stack->top];
  49}
  50
  51/**
  52 * idpf_tx_timeout - Respond to a Tx Hang
  53 * @netdev: network interface device structure
  54 * @txqueue: TX queue
  55 */
  56void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  57{
  58	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
  59
  60	adapter->tx_timeout_count++;
  61
  62	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
  63		   adapter->tx_timeout_count, txqueue);
  64	if (!idpf_is_reset_in_prog(adapter)) {
  65		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
  66		queue_delayed_work(adapter->vc_event_wq,
  67				   &adapter->vc_event_task,
  68				   msecs_to_jiffies(10));
  69	}
  70}
  71
  72/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73 * idpf_tx_buf_rel_all - Free any empty Tx buffers
  74 * @txq: queue to be cleaned
  75 */
  76static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
  77{
  78	struct libeth_sq_napi_stats ss = { };
  79	struct idpf_buf_lifo *buf_stack;
  80	struct idpf_tx_stash *stash;
  81	struct libeth_cq_pp cp = {
  82		.dev	= txq->dev,
  83		.ss	= &ss,
  84	};
  85	struct hlist_node *tmp;
  86	u32 i, tag;
  87
  88	/* Buffers already cleared, nothing to do */
  89	if (!txq->tx_buf)
  90		return;
  91
  92	/* Free all the Tx buffer sk_buffs */
  93	for (i = 0; i < txq->desc_count; i++)
  94		libeth_tx_complete(&txq->tx_buf[i], &cp);
  95
  96	kfree(txq->tx_buf);
  97	txq->tx_buf = NULL;
  98
  99	if (!idpf_queue_has(FLOW_SCH_EN, txq))
 100		return;
 101
 102	buf_stack = &txq->stash->buf_stack;
 103	if (!buf_stack->bufs)
 104		return;
 105
 106	/*
 107	 * If a Tx timeout occurred, there are potentially still bufs in the
 108	 * hash table, free them here.
 109	 */
 110	hash_for_each_safe(txq->stash->sched_buf_hash, tag, tmp, stash,
 111			   hlist) {
 112		if (!stash)
 113			continue;
 114
 115		libeth_tx_complete(&stash->buf, &cp);
 116		hash_del(&stash->hlist);
 117		idpf_buf_lifo_push(buf_stack, stash);
 118	}
 119
 120	for (i = 0; i < buf_stack->size; i++)
 121		kfree(buf_stack->bufs[i]);
 122
 123	kfree(buf_stack->bufs);
 124	buf_stack->bufs = NULL;
 125}
 126
 127/**
 128 * idpf_tx_desc_rel - Free Tx resources per queue
 129 * @txq: Tx descriptor ring for a specific queue
 
 130 *
 131 * Free all transmit software resources
 132 */
 133static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
 134{
 135	idpf_tx_buf_rel_all(txq);
 136	netdev_tx_reset_subqueue(txq->netdev, txq->idx);
 137
 138	if (!txq->desc_ring)
 139		return;
 140
 141	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
 142	txq->desc_ring = NULL;
 
 143	txq->next_to_use = 0;
 144	txq->next_to_clean = 0;
 145}
 146
 147/**
 148 * idpf_compl_desc_rel - Free completion resources per queue
 149 * @complq: completion queue
 150 *
 151 * Free all completion software resources.
 152 */
 153static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
 154{
 155	if (!complq->comp)
 156		return;
 157
 158	dma_free_coherent(complq->netdev->dev.parent, complq->size,
 159			  complq->comp, complq->dma);
 160	complq->comp = NULL;
 161	complq->next_to_use = 0;
 162	complq->next_to_clean = 0;
 163}
 164
 165/**
 166 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
 167 * @vport: virtual port structure
 168 *
 169 * Free all transmit software resources
 170 */
 171static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
 172{
 173	int i, j;
 174
 175	if (!vport->txq_grps)
 176		return;
 177
 178	for (i = 0; i < vport->num_txq_grp; i++) {
 179		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 180
 181		for (j = 0; j < txq_grp->num_txq; j++)
 182			idpf_tx_desc_rel(txq_grp->txqs[j]);
 183
 184		if (idpf_is_queue_model_split(vport->txq_model))
 185			idpf_compl_desc_rel(txq_grp->complq);
 186	}
 187}
 188
 189/**
 190 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
 191 * @tx_q: queue for which the buffers are allocated
 192 *
 193 * Returns 0 on success, negative on failure
 194 */
 195static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
 196{
 197	struct idpf_buf_lifo *buf_stack;
 198	int buf_size;
 199	int i;
 200
 201	/* Allocate book keeping buffers only. Buffers to be supplied to HW
 202	 * are allocated by kernel network stack and received as part of skb
 203	 */
 204	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
 205	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
 206	if (!tx_q->tx_buf)
 207		return -ENOMEM;
 208
 209	if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
 210		return 0;
 211
 212	buf_stack = &tx_q->stash->buf_stack;
 213
 214	/* Initialize tx buf stack for out-of-order completions if
 215	 * flow scheduling offload is enabled
 216	 */
 217	buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
 218				  GFP_KERNEL);
 219	if (!buf_stack->bufs)
 
 220		return -ENOMEM;
 221
 222	buf_stack->size = tx_q->desc_count;
 223	buf_stack->top = tx_q->desc_count;
 224
 225	for (i = 0; i < tx_q->desc_count; i++) {
 226		buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
 227					     GFP_KERNEL);
 228		if (!buf_stack->bufs[i])
 229			return -ENOMEM;
 230	}
 231
 232	return 0;
 233}
 234
 235/**
 236 * idpf_tx_desc_alloc - Allocate the Tx descriptors
 237 * @vport: vport to allocate resources for
 238 * @tx_q: the tx ring to set up
 
 239 *
 240 * Returns 0 on success, negative on failure
 241 */
 242static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
 243			      struct idpf_tx_queue *tx_q)
 244{
 245	struct device *dev = tx_q->dev;
 
 246	int err;
 247
 248	err = idpf_tx_buf_alloc_all(tx_q);
 249	if (err)
 250		goto err_alloc;
 
 251
 252	tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
 
 
 
 
 
 253
 254	/* Allocate descriptors also round up to nearest 4K */
 255	tx_q->size = ALIGN(tx_q->size, 4096);
 256	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
 257					      GFP_KERNEL);
 258	if (!tx_q->desc_ring) {
 259		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 260			tx_q->size);
 261		err = -ENOMEM;
 262		goto err_alloc;
 263	}
 264
 
 265	tx_q->next_to_use = 0;
 266	tx_q->next_to_clean = 0;
 267	idpf_queue_set(GEN_CHK, tx_q);
 268
 269	return 0;
 270
 271err_alloc:
 272	idpf_tx_desc_rel(tx_q);
 273
 274	return err;
 275}
 276
 277/**
 278 * idpf_compl_desc_alloc - allocate completion descriptors
 279 * @vport: vport to allocate resources for
 280 * @complq: completion queue to set up
 281 *
 282 * Return: 0 on success, -errno on failure.
 283 */
 284static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
 285				 struct idpf_compl_queue *complq)
 286{
 287	complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
 288
 289	complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
 290					  complq->size, &complq->dma,
 291					  GFP_KERNEL);
 292	if (!complq->comp)
 293		return -ENOMEM;
 294
 295	complq->next_to_use = 0;
 296	complq->next_to_clean = 0;
 297	idpf_queue_set(GEN_CHK, complq);
 298
 299	return 0;
 300}
 301
 302/**
 303 * idpf_tx_desc_alloc_all - allocate all queues Tx resources
 304 * @vport: virtual port private structure
 305 *
 306 * Returns 0 on success, negative on failure
 307 */
 308static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
 309{
 
 310	int err = 0;
 311	int i, j;
 312
 313	/* Setup buffer queues. In single queue model buffer queues and
 314	 * completion queues will be same
 315	 */
 316	for (i = 0; i < vport->num_txq_grp; i++) {
 317		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
 318			struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
 319			u8 gen_bits = 0;
 320			u16 bufidx_mask;
 321
 322			err = idpf_tx_desc_alloc(vport, txq);
 323			if (err) {
 324				pci_err(vport->adapter->pdev,
 325					"Allocation for Tx Queue %u failed\n",
 326					i);
 327				goto err_out;
 328			}
 329
 330			if (!idpf_is_queue_model_split(vport->txq_model))
 331				continue;
 332
 333			txq->compl_tag_cur_gen = 0;
 334
 335			/* Determine the number of bits in the bufid
 336			 * mask and add one to get the start of the
 337			 * generation bits
 338			 */
 339			bufidx_mask = txq->desc_count - 1;
 340			while (bufidx_mask >> 1) {
 341				txq->compl_tag_gen_s++;
 342				bufidx_mask = bufidx_mask >> 1;
 343			}
 344			txq->compl_tag_gen_s++;
 345
 346			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
 347							txq->compl_tag_gen_s;
 348			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
 349
 350			/* Set bufid mask based on location of first
 351			 * gen bit; it cannot simply be the descriptor
 352			 * ring size-1 since we can have size values
 353			 * where not all of those bits are set.
 354			 */
 355			txq->compl_tag_bufid_m =
 356				GETMAXVAL(txq->compl_tag_gen_s);
 357		}
 358
 359		if (!idpf_is_queue_model_split(vport->txq_model))
 360			continue;
 361
 362		/* Setup completion queues */
 363		err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
 364		if (err) {
 365			pci_err(vport->adapter->pdev,
 366				"Allocation for Tx Completion Queue %u failed\n",
 367				i);
 368			goto err_out;
 369		}
 370	}
 371
 372err_out:
 373	if (err)
 374		idpf_tx_desc_rel_all(vport);
 375
 376	return err;
 377}
 378
 379/**
 380 * idpf_rx_page_rel - Release an rx buffer page
 
 381 * @rx_buf: the buffer to free
 382 */
 383static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
 384{
 385	if (unlikely(!rx_buf->page))
 386		return;
 387
 388	page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
 389
 390	rx_buf->page = NULL;
 391	rx_buf->offset = 0;
 392}
 393
 394/**
 395 * idpf_rx_hdr_buf_rel_all - Release header buffer memory
 396 * @bufq: queue to use
 397 */
 398static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
 399{
 400	struct libeth_fq fq = {
 401		.fqes	= bufq->hdr_buf,
 402		.pp	= bufq->hdr_pp,
 403	};
 404
 405	for (u32 i = 0; i < bufq->desc_count; i++)
 406		idpf_rx_page_rel(&bufq->hdr_buf[i]);
 407
 408	libeth_rx_fq_destroy(&fq);
 409	bufq->hdr_buf = NULL;
 410	bufq->hdr_pp = NULL;
 
 
 411}
 412
 413/**
 414 * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
 415 * @bufq: queue to be cleaned
 416 */
 417static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
 418{
 419	struct libeth_fq fq = {
 420		.fqes	= bufq->buf,
 421		.pp	= bufq->pp,
 422	};
 423
 424	/* queue already cleared, nothing to do */
 425	if (!bufq->buf)
 426		return;
 427
 428	/* Free all the bufs allocated and given to hw on Rx queue */
 429	for (u32 i = 0; i < bufq->desc_count; i++)
 430		idpf_rx_page_rel(&bufq->buf[i]);
 431
 432	if (idpf_queue_has(HSPLIT_EN, bufq))
 433		idpf_rx_hdr_buf_rel_all(bufq);
 434
 435	libeth_rx_fq_destroy(&fq);
 436	bufq->buf = NULL;
 437	bufq->pp = NULL;
 438}
 439
 440/**
 441 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
 442 * @rxq: queue to be cleaned
 443 */
 444static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
 445{
 446	struct libeth_fq fq = {
 447		.fqes	= rxq->rx_buf,
 448		.pp	= rxq->pp,
 449	};
 450
 451	if (!rxq->rx_buf)
 452		return;
 453
 454	for (u32 i = 0; i < rxq->desc_count; i++)
 455		idpf_rx_page_rel(&rxq->rx_buf[i]);
 456
 457	libeth_rx_fq_destroy(&fq);
 458	rxq->rx_buf = NULL;
 459	rxq->pp = NULL;
 
 
 
 460}
 461
 462/**
 463 * idpf_rx_desc_rel - Free a specific Rx q resources
 464 * @rxq: queue to clean the resources from
 465 * @dev: device to free DMA memory
 466 * @model: single or split queue model
 467 *
 468 * Free a specific rx queue resources
 469 */
 470static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
 471			     u32 model)
 472{
 473	if (!rxq)
 474		return;
 475
 476	if (rxq->skb) {
 477		dev_kfree_skb_any(rxq->skb);
 478		rxq->skb = NULL;
 479	}
 480
 481	if (!idpf_is_queue_model_split(model))
 482		idpf_rx_buf_rel_all(rxq);
 483
 484	rxq->next_to_alloc = 0;
 485	rxq->next_to_clean = 0;
 486	rxq->next_to_use = 0;
 487	if (!rxq->desc_ring)
 488		return;
 489
 490	dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
 491	rxq->desc_ring = NULL;
 492}
 493
 494/**
 495 * idpf_rx_desc_rel_bufq - free buffer queue resources
 496 * @bufq: buffer queue to clean the resources from
 497 * @dev: device to free DMA memory
 498 */
 499static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
 500				  struct device *dev)
 501{
 502	if (!bufq)
 503		return;
 504
 505	idpf_rx_buf_rel_bufq(bufq);
 506
 507	bufq->next_to_alloc = 0;
 508	bufq->next_to_clean = 0;
 509	bufq->next_to_use = 0;
 510
 511	if (!bufq->split_buf)
 512		return;
 513
 514	dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
 515	bufq->split_buf = NULL;
 516}
 517
 518/**
 519 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
 520 * @vport: virtual port structure
 521 *
 522 * Free all rx queues resources
 523 */
 524static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
 525{
 526	struct device *dev = &vport->adapter->pdev->dev;
 527	struct idpf_rxq_group *rx_qgrp;
 528	u16 num_rxq;
 529	int i, j;
 530
 531	if (!vport->rxq_grps)
 532		return;
 533
 534	for (i = 0; i < vport->num_rxq_grp; i++) {
 535		rx_qgrp = &vport->rxq_grps[i];
 536
 537		if (!idpf_is_queue_model_split(vport->rxq_model)) {
 538			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
 539				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
 540						 VIRTCHNL2_QUEUE_MODEL_SINGLE);
 541			continue;
 542		}
 543
 544		num_rxq = rx_qgrp->splitq.num_rxq_sets;
 545		for (j = 0; j < num_rxq; j++)
 546			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
 547					 dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
 548
 549		if (!rx_qgrp->splitq.bufq_sets)
 550			continue;
 551
 552		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 553			struct idpf_bufq_set *bufq_set =
 554				&rx_qgrp->splitq.bufq_sets[j];
 555
 556			idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
 
 557		}
 558	}
 559}
 560
 561/**
 562 * idpf_rx_buf_hw_update - Store the new tail and head values
 563 * @bufq: queue to bump
 564 * @val: new head index
 565 */
 566static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
 567{
 568	bufq->next_to_use = val;
 569
 570	if (unlikely(!bufq->tail))
 571		return;
 572
 573	/* writel has an implicit memory barrier */
 574	writel(val, bufq->tail);
 575}
 576
 577/**
 578 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
 579 * @bufq: ring to use
 580 *
 581 * Returns 0 on success, negative on failure.
 582 */
 583static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
 584{
 585	struct libeth_fq fq = {
 586		.count	= bufq->desc_count,
 587		.type	= LIBETH_FQE_HDR,
 588		.nid	= idpf_q_vector_to_mem(bufq->q_vector),
 589	};
 590	int ret;
 591
 592	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
 593	if (ret)
 594		return ret;
 595
 596	bufq->hdr_pp = fq.pp;
 597	bufq->hdr_buf = fq.fqes;
 598	bufq->hdr_truesize = fq.truesize;
 599	bufq->rx_hbuf_size = fq.buf_len;
 600
 601	return 0;
 602}
 603
 604/**
 605 * idpf_rx_post_buf_refill - Post buffer id to refill queue
 606 * @refillq: refill queue to post to
 607 * @buf_id: buffer id to post
 608 */
 609static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
 610{
 611	u32 nta = refillq->next_to_use;
 612
 613	/* store the buffer ID and the SW maintained GEN bit to the refillq */
 614	refillq->ring[nta] =
 615		FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
 616		FIELD_PREP(IDPF_RX_BI_GEN_M,
 617			   idpf_queue_has(GEN_CHK, refillq));
 618
 619	if (unlikely(++nta == refillq->desc_count)) {
 620		nta = 0;
 621		idpf_queue_change(GEN_CHK, refillq);
 622	}
 623
 624	refillq->next_to_use = nta;
 625}
 626
 627/**
 628 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
 629 * @bufq: buffer queue to post to
 630 * @buf_id: buffer id to post
 631 *
 632 * Returns false if buffer could not be allocated, true otherwise.
 633 */
 634static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
 635{
 636	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
 637	struct libeth_fq_fp fq = {
 638		.count	= bufq->desc_count,
 639	};
 640	u16 nta = bufq->next_to_alloc;
 
 641	dma_addr_t addr;
 642
 643	splitq_rx_desc = &bufq->split_buf[nta];
 644
 645	if (idpf_queue_has(HSPLIT_EN, bufq)) {
 646		fq.pp = bufq->hdr_pp;
 647		fq.fqes = bufq->hdr_buf;
 648		fq.truesize = bufq->hdr_truesize;
 649
 650		addr = libeth_rx_alloc(&fq, buf_id);
 651		if (addr == DMA_MAPPING_ERROR)
 652			return false;
 653
 654		splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
 
 
 
 655	}
 656
 657	fq.pp = bufq->pp;
 658	fq.fqes = bufq->buf;
 659	fq.truesize = bufq->truesize;
 660
 661	addr = libeth_rx_alloc(&fq, buf_id);
 662	if (addr == DMA_MAPPING_ERROR)
 663		return false;
 664
 665	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
 666	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
 667
 668	nta++;
 669	if (unlikely(nta == bufq->desc_count))
 670		nta = 0;
 671	bufq->next_to_alloc = nta;
 672
 673	return true;
 674}
 675
 676/**
 677 * idpf_rx_post_init_bufs - Post initial buffers to bufq
 678 * @bufq: buffer queue to post working set to
 679 * @working_set: number of buffers to put in working set
 680 *
 681 * Returns true if @working_set bufs were posted successfully, false otherwise.
 682 */
 683static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
 684				   u16 working_set)
 685{
 686	int i;
 687
 688	for (i = 0; i < working_set; i++) {
 689		if (!idpf_rx_post_buf_desc(bufq, i))
 690			return false;
 691	}
 692
 693	idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
 694					       IDPF_RX_BUF_STRIDE));
 695
 696	return true;
 697}
 698
 699/**
 700 * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
 701 * @rxq: queue for which the buffers are allocated
 702 *
 703 * Return: 0 on success, -ENOMEM on failure.
 704 */
 705static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
 706{
 707	if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
 708		goto err;
 709
 710	return 0;
 711
 712err:
 713	idpf_rx_buf_rel_all(rxq);
 714
 715	return -ENOMEM;
 716}
 717
 718/**
 719 * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
 720 * @rxq: buffer queue to create page pool for
 721 *
 722 * Return: 0 on success, -errno on failure.
 723 */
 724static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
 725{
 726	struct libeth_fq fq = {
 727		.count	= rxq->desc_count,
 728		.type	= LIBETH_FQE_MTU,
 729		.nid	= idpf_q_vector_to_mem(rxq->q_vector),
 
 
 
 
 
 730	};
 731	int ret;
 732
 733	ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
 734	if (ret)
 735		return ret;
 736
 737	rxq->pp = fq.pp;
 738	rxq->rx_buf = fq.fqes;
 739	rxq->truesize = fq.truesize;
 740	rxq->rx_buf_size = fq.buf_len;
 741
 742	return idpf_rx_buf_alloc_singleq(rxq);
 743}
 744
 745/**
 746 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
 747 * @rxbufq: queue for which the buffers are allocated
 
 748 *
 749 * Returns 0 on success, negative on failure
 750 */
 751static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
 752{
 753	int err = 0;
 754
 755	if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
 
 
 
 
 
 
 
 
 756		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
 757		if (err)
 758			goto rx_buf_alloc_all_out;
 759	}
 760
 761	/* Allocate buffers to be given to HW.	 */
 762	if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
 763		err = -ENOMEM;
 
 
 
 
 
 
 
 
 764
 765rx_buf_alloc_all_out:
 766	if (err)
 767		idpf_rx_buf_rel_bufq(rxbufq);
 768
 769	return err;
 770}
 771
 772/**
 773 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
 774 * @bufq: buffer queue to create page pool for
 775 * @type: type of Rx buffers to allocate
 776 *
 777 * Returns 0 on success, negative on failure
 778 */
 779static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
 780			     enum libeth_fqe_type type)
 781{
 782	struct libeth_fq fq = {
 783		.truesize	= bufq->truesize,
 784		.count		= bufq->desc_count,
 785		.type		= type,
 786		.hsplit		= idpf_queue_has(HSPLIT_EN, bufq),
 787		.nid		= idpf_q_vector_to_mem(bufq->q_vector),
 788	};
 789	int ret;
 790
 791	ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
 792	if (ret)
 793		return ret;
 794
 795	bufq->pp = fq.pp;
 796	bufq->buf = fq.fqes;
 797	bufq->truesize = fq.truesize;
 798	bufq->rx_buf_size = fq.buf_len;
 799
 800	return idpf_rx_buf_alloc_all(bufq);
 801}
 802
 803/**
 804 * idpf_rx_bufs_init_all - Initialize all RX bufs
 805 * @vport: virtual port struct
 806 *
 807 * Returns 0 on success, negative on failure
 808 */
 809int idpf_rx_bufs_init_all(struct idpf_vport *vport)
 810{
 811	bool split = idpf_is_queue_model_split(vport->rxq_model);
 
 812	int i, j, err;
 813
 814	for (i = 0; i < vport->num_rxq_grp; i++) {
 815		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
 816		u32 truesize = 0;
 817
 818		/* Allocate bufs for the rxq itself in singleq */
 819		if (!split) {
 820			int num_rxq = rx_qgrp->singleq.num_rxq;
 821
 822			for (j = 0; j < num_rxq; j++) {
 823				struct idpf_rx_queue *q;
 824
 825				q = rx_qgrp->singleq.rxqs[j];
 826				err = idpf_rx_bufs_init_singleq(q);
 827				if (err)
 828					return err;
 829			}
 830
 831			continue;
 832		}
 833
 834		/* Otherwise, allocate bufs for the buffer queues */
 835		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 836			enum libeth_fqe_type type;
 837			struct idpf_buf_queue *q;
 838
 839			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
 840			q->truesize = truesize;
 841
 842			type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
 843
 844			err = idpf_rx_bufs_init(q, type);
 845			if (err)
 846				return err;
 847
 848			truesize = q->truesize >> 1;
 849		}
 850	}
 851
 852	return 0;
 853}
 854
 855/**
 856 * idpf_rx_desc_alloc - Allocate queue Rx resources
 857 * @vport: vport to allocate resources for
 858 * @rxq: Rx queue for which the resources are setup
 
 
 859 *
 860 * Returns 0 on success, negative on failure
 861 */
 862static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
 863			      struct idpf_rx_queue *rxq)
 864{
 865	struct device *dev = &vport->adapter->pdev->dev;
 866
 867	rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
 
 
 
 
 
 868
 869	/* Allocate descriptors and also round up to nearest 4K */
 870	rxq->size = ALIGN(rxq->size, 4096);
 871	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
 872					     &rxq->dma, GFP_KERNEL);
 873	if (!rxq->desc_ring) {
 874		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 875			rxq->size);
 876		return -ENOMEM;
 877	}
 878
 879	rxq->next_to_alloc = 0;
 880	rxq->next_to_clean = 0;
 881	rxq->next_to_use = 0;
 882	idpf_queue_set(GEN_CHK, rxq);
 883
 884	return 0;
 885}
 886
 887/**
 888 * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
 889 * @vport: vport to allocate resources for
 890 * @bufq: buffer queue for which the resources are set up
 891 *
 892 * Return: 0 on success, -ENOMEM on failure.
 893 */
 894static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
 895				struct idpf_buf_queue *bufq)
 896{
 897	struct device *dev = &vport->adapter->pdev->dev;
 898
 899	bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
 900
 901	bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
 902					     GFP_KERNEL);
 903	if (!bufq->split_buf)
 904		return -ENOMEM;
 905
 906	bufq->next_to_alloc = 0;
 907	bufq->next_to_clean = 0;
 908	bufq->next_to_use = 0;
 909
 910	idpf_queue_set(GEN_CHK, bufq);
 911
 912	return 0;
 913}
 914
 915/**
 916 * idpf_rx_desc_alloc_all - allocate all RX queues resources
 917 * @vport: virtual port structure
 918 *
 919 * Returns 0 on success, negative on failure
 920 */
 921static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
 922{
 
 923	struct idpf_rxq_group *rx_qgrp;
 
 924	int i, j, err;
 925	u16 num_rxq;
 926
 927	for (i = 0; i < vport->num_rxq_grp; i++) {
 928		rx_qgrp = &vport->rxq_grps[i];
 929		if (idpf_is_queue_model_split(vport->rxq_model))
 930			num_rxq = rx_qgrp->splitq.num_rxq_sets;
 931		else
 932			num_rxq = rx_qgrp->singleq.num_rxq;
 933
 934		for (j = 0; j < num_rxq; j++) {
 935			struct idpf_rx_queue *q;
 936
 937			if (idpf_is_queue_model_split(vport->rxq_model))
 938				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
 939			else
 940				q = rx_qgrp->singleq.rxqs[j];
 941
 942			err = idpf_rx_desc_alloc(vport, q);
 943			if (err) {
 944				pci_err(vport->adapter->pdev,
 945					"Memory allocation for Rx Queue %u failed\n",
 946					i);
 947				goto err_out;
 948			}
 949		}
 950
 951		if (!idpf_is_queue_model_split(vport->rxq_model))
 952			continue;
 953
 954		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 955			struct idpf_buf_queue *q;
 956
 957			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
 958
 959			err = idpf_bufq_desc_alloc(vport, q);
 960			if (err) {
 961				pci_err(vport->adapter->pdev,
 962					"Memory allocation for Rx Buffer Queue %u failed\n",
 963					i);
 964				goto err_out;
 965			}
 966		}
 967	}
 968
 969	return 0;
 970
 971err_out:
 972	idpf_rx_desc_rel_all(vport);
 973
 974	return err;
 975}
 976
 977/**
 978 * idpf_txq_group_rel - Release all resources for txq groups
 979 * @vport: vport to release txq groups on
 980 */
 981static void idpf_txq_group_rel(struct idpf_vport *vport)
 982{
 983	bool split, flow_sch_en;
 984	int i, j;
 985
 986	if (!vport->txq_grps)
 987		return;
 988
 989	split = idpf_is_queue_model_split(vport->txq_model);
 990	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
 991				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
 992
 993	for (i = 0; i < vport->num_txq_grp; i++) {
 994		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 995
 996		for (j = 0; j < txq_grp->num_txq; j++) {
 997			kfree(txq_grp->txqs[j]);
 998			txq_grp->txqs[j] = NULL;
 999		}
1000
1001		if (!split)
1002			continue;
1003
1004		kfree(txq_grp->complq);
1005		txq_grp->complq = NULL;
1006
1007		if (flow_sch_en)
1008			kfree(txq_grp->stashes);
1009	}
1010	kfree(vport->txq_grps);
1011	vport->txq_grps = NULL;
1012}
1013
1014/**
1015 * idpf_rxq_sw_queue_rel - Release software queue resources
1016 * @rx_qgrp: rx queue group with software queues
1017 */
1018static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
1019{
1020	int i, j;
1021
1022	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
1023		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
1024
1025		for (j = 0; j < bufq_set->num_refillqs; j++) {
1026			kfree(bufq_set->refillqs[j].ring);
1027			bufq_set->refillqs[j].ring = NULL;
1028		}
1029		kfree(bufq_set->refillqs);
1030		bufq_set->refillqs = NULL;
1031	}
1032}
1033
1034/**
1035 * idpf_rxq_group_rel - Release all resources for rxq groups
1036 * @vport: vport to release rxq groups on
1037 */
1038static void idpf_rxq_group_rel(struct idpf_vport *vport)
1039{
1040	int i;
1041
1042	if (!vport->rxq_grps)
1043		return;
1044
1045	for (i = 0; i < vport->num_rxq_grp; i++) {
1046		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1047		u16 num_rxq;
1048		int j;
1049
1050		if (idpf_is_queue_model_split(vport->rxq_model)) {
1051			num_rxq = rx_qgrp->splitq.num_rxq_sets;
1052			for (j = 0; j < num_rxq; j++) {
1053				kfree(rx_qgrp->splitq.rxq_sets[j]);
1054				rx_qgrp->splitq.rxq_sets[j] = NULL;
1055			}
1056
1057			idpf_rxq_sw_queue_rel(rx_qgrp);
1058			kfree(rx_qgrp->splitq.bufq_sets);
1059			rx_qgrp->splitq.bufq_sets = NULL;
1060		} else {
1061			num_rxq = rx_qgrp->singleq.num_rxq;
1062			for (j = 0; j < num_rxq; j++) {
1063				kfree(rx_qgrp->singleq.rxqs[j]);
1064				rx_qgrp->singleq.rxqs[j] = NULL;
1065			}
1066		}
1067	}
1068	kfree(vport->rxq_grps);
1069	vport->rxq_grps = NULL;
1070}
1071
1072/**
1073 * idpf_vport_queue_grp_rel_all - Release all queue groups
1074 * @vport: vport to release queue groups for
1075 */
1076static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
1077{
1078	idpf_txq_group_rel(vport);
1079	idpf_rxq_group_rel(vport);
1080}
1081
1082/**
1083 * idpf_vport_queues_rel - Free memory for all queues
1084 * @vport: virtual port
1085 *
1086 * Free the memory allocated for queues associated to a vport
1087 */
1088void idpf_vport_queues_rel(struct idpf_vport *vport)
1089{
1090	idpf_tx_desc_rel_all(vport);
1091	idpf_rx_desc_rel_all(vport);
1092	idpf_vport_queue_grp_rel_all(vport);
1093
1094	kfree(vport->txqs);
1095	vport->txqs = NULL;
1096}
1097
1098/**
1099 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
1100 * @vport: vport to init txqs on
1101 *
1102 * We get a queue index from skb->queue_mapping and we need a fast way to
1103 * dereference the queue from queue groups.  This allows us to quickly pull a
1104 * txq based on a queue index.
1105 *
1106 * Returns 0 on success, negative on failure
1107 */
1108static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
1109{
1110	int i, j, k = 0;
1111
1112	vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
1113			      GFP_KERNEL);
1114
1115	if (!vport->txqs)
1116		return -ENOMEM;
1117
1118	for (i = 0; i < vport->num_txq_grp; i++) {
1119		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
1120
1121		for (j = 0; j < tx_grp->num_txq; j++, k++) {
1122			vport->txqs[k] = tx_grp->txqs[j];
1123			vport->txqs[k]->idx = k;
1124		}
1125	}
1126
1127	return 0;
1128}
1129
1130/**
1131 * idpf_vport_init_num_qs - Initialize number of queues
1132 * @vport: vport to initialize queues
1133 * @vport_msg: data to be filled into vport
1134 */
1135void idpf_vport_init_num_qs(struct idpf_vport *vport,
1136			    struct virtchnl2_create_vport *vport_msg)
1137{
1138	struct idpf_vport_user_config_data *config_data;
1139	u16 idx = vport->idx;
1140
1141	config_data = &vport->adapter->vport_config[idx]->user_config;
1142	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
1143	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
1144	/* number of txqs and rxqs in config data will be zeros only in the
1145	 * driver load path and we dont update them there after
1146	 */
1147	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
1148		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
1149		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
1150	}
1151
1152	if (idpf_is_queue_model_split(vport->txq_model))
1153		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
1154	if (idpf_is_queue_model_split(vport->rxq_model))
1155		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
1156
1157	/* Adjust number of buffer queues per Rx queue group. */
1158	if (!idpf_is_queue_model_split(vport->rxq_model)) {
1159		vport->num_bufqs_per_qgrp = 0;
 
1160
1161		return;
1162	}
1163
1164	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
 
 
 
 
 
1165}
1166
1167/**
1168 * idpf_vport_calc_num_q_desc - Calculate number of queue groups
1169 * @vport: vport to calculate q groups for
1170 */
1171void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
1172{
1173	struct idpf_vport_user_config_data *config_data;
1174	int num_bufqs = vport->num_bufqs_per_qgrp;
1175	u32 num_req_txq_desc, num_req_rxq_desc;
1176	u16 idx = vport->idx;
1177	int i;
1178
1179	config_data =  &vport->adapter->vport_config[idx]->user_config;
1180	num_req_txq_desc = config_data->num_req_txq_desc;
1181	num_req_rxq_desc = config_data->num_req_rxq_desc;
1182
1183	vport->complq_desc_count = 0;
1184	if (num_req_txq_desc) {
1185		vport->txq_desc_count = num_req_txq_desc;
1186		if (idpf_is_queue_model_split(vport->txq_model)) {
1187			vport->complq_desc_count = num_req_txq_desc;
1188			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1189				vport->complq_desc_count =
1190					IDPF_MIN_TXQ_COMPLQ_DESC;
1191		}
1192	} else {
1193		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1194		if (idpf_is_queue_model_split(vport->txq_model))
1195			vport->complq_desc_count =
1196				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1197	}
1198
1199	if (num_req_rxq_desc)
1200		vport->rxq_desc_count = num_req_rxq_desc;
1201	else
1202		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1203
1204	for (i = 0; i < num_bufqs; i++) {
1205		if (!vport->bufq_desc_count[i])
1206			vport->bufq_desc_count[i] =
1207				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1208							num_bufqs);
1209	}
1210}
1211
1212/**
1213 * idpf_vport_calc_total_qs - Calculate total number of queues
1214 * @adapter: private data struct
1215 * @vport_idx: vport idx to retrieve vport pointer
1216 * @vport_msg: message to fill with data
1217 * @max_q: vport max queue info
1218 *
1219 * Return 0 on success, error value on failure.
1220 */
1221int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1222			     struct virtchnl2_create_vport *vport_msg,
1223			     struct idpf_vport_max_q *max_q)
1224{
1225	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1226	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1227	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1228	struct idpf_vport_config *vport_config;
1229	u16 num_txq_grps, num_rxq_grps;
1230	u32 num_qs;
1231
1232	vport_config = adapter->vport_config[vport_idx];
1233	if (vport_config) {
1234		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1235		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1236	} else {
1237		int num_cpus;
1238
1239		/* Restrict num of queues to cpus online as a default
1240		 * configuration to give best performance. User can always
1241		 * override to a max number of queues via ethtool.
1242		 */
1243		num_cpus = num_online_cpus();
1244
1245		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1246		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1247		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1248		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1249	}
1250
1251	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1252		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1253		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1254						       IDPF_COMPLQ_PER_GROUP);
1255		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1256						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1257	} else {
1258		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1259		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1260					 dflt_singleq_txqs);
1261		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1262		vport_msg->num_tx_complq = 0;
1263	}
1264	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1265		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1266		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1267						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1268		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1269						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1270	} else {
1271		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1272		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1273					 dflt_singleq_rxqs);
1274		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1275		vport_msg->num_rx_bufq = 0;
1276	}
1277
1278	return 0;
1279}
1280
1281/**
1282 * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1283 * @vport: vport to calculate q groups for
1284 */
1285void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1286{
1287	if (idpf_is_queue_model_split(vport->txq_model))
1288		vport->num_txq_grp = vport->num_txq;
1289	else
1290		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1291
1292	if (idpf_is_queue_model_split(vport->rxq_model))
1293		vport->num_rxq_grp = vport->num_rxq;
1294	else
1295		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1296}
1297
1298/**
1299 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1300 * @vport: vport to calculate queues for
1301 * @num_txq: return parameter for number of TX queues
1302 * @num_rxq: return parameter for number of RX queues
1303 */
1304static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1305					 u16 *num_txq, u16 *num_rxq)
1306{
1307	if (idpf_is_queue_model_split(vport->txq_model))
1308		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1309	else
1310		*num_txq = vport->num_txq;
1311
1312	if (idpf_is_queue_model_split(vport->rxq_model))
1313		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1314	else
1315		*num_rxq = vport->num_rxq;
1316}
1317
1318/**
1319 * idpf_rxq_set_descids - set the descids supported by this queue
1320 * @vport: virtual port data structure
1321 * @q: rx queue for which descids are set
1322 *
1323 */
1324static void idpf_rxq_set_descids(const struct idpf_vport *vport,
1325				 struct idpf_rx_queue *q)
1326{
1327	if (idpf_is_queue_model_split(vport->rxq_model)) {
1328		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1329	} else {
1330		if (vport->base_rxd)
1331			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1332		else
1333			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1334	}
1335}
1336
1337/**
1338 * idpf_txq_group_alloc - Allocate all txq group resources
1339 * @vport: vport to allocate txq groups for
1340 * @num_txq: number of txqs to allocate for each group
1341 *
1342 * Returns 0 on success, negative on failure
1343 */
1344static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1345{
1346	bool split, flow_sch_en;
1347	int i;
1348
1349	vport->txq_grps = kcalloc(vport->num_txq_grp,
1350				  sizeof(*vport->txq_grps), GFP_KERNEL);
1351	if (!vport->txq_grps)
1352		return -ENOMEM;
1353
1354	split = idpf_is_queue_model_split(vport->txq_model);
1355	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1356				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1357
1358	for (i = 0; i < vport->num_txq_grp; i++) {
1359		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1360		struct idpf_adapter *adapter = vport->adapter;
1361		struct idpf_txq_stash *stashes;
1362		int j;
1363
1364		tx_qgrp->vport = vport;
1365		tx_qgrp->num_txq = num_txq;
1366
1367		for (j = 0; j < tx_qgrp->num_txq; j++) {
1368			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1369						   GFP_KERNEL);
1370			if (!tx_qgrp->txqs[j])
1371				goto err_alloc;
1372		}
1373
1374		if (split && flow_sch_en) {
1375			stashes = kcalloc(num_txq, sizeof(*stashes),
1376					  GFP_KERNEL);
1377			if (!stashes)
1378				goto err_alloc;
1379
1380			tx_qgrp->stashes = stashes;
1381		}
1382
1383		for (j = 0; j < tx_qgrp->num_txq; j++) {
1384			struct idpf_tx_queue *q = tx_qgrp->txqs[j];
1385
1386			q->dev = &adapter->pdev->dev;
1387			q->desc_count = vport->txq_desc_count;
1388			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1389			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1390			q->netdev = vport->netdev;
1391			q->txq_grp = tx_qgrp;
 
1392
1393			if (!split) {
1394				q->clean_budget = vport->compln_clean_budget;
1395				idpf_queue_assign(CRC_EN, q,
1396						  vport->crc_enable);
1397			}
1398
1399			if (!flow_sch_en)
1400				continue;
1401
1402			if (split) {
1403				q->stash = &stashes[j];
1404				hash_init(q->stash->sched_buf_hash);
1405			}
1406
1407			idpf_queue_set(FLOW_SCH_EN, q);
1408		}
1409
1410		if (!split)
1411			continue;
1412
1413		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1414					  sizeof(*tx_qgrp->complq),
1415					  GFP_KERNEL);
1416		if (!tx_qgrp->complq)
 
1417			goto err_alloc;
 
1418
 
1419		tx_qgrp->complq->desc_count = vport->complq_desc_count;
 
1420		tx_qgrp->complq->txq_grp = tx_qgrp;
1421		tx_qgrp->complq->netdev = vport->netdev;
1422		tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
1423
1424		if (flow_sch_en)
1425			idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
1426	}
1427
1428	return 0;
1429
1430err_alloc:
1431	idpf_txq_group_rel(vport);
1432
1433	return -ENOMEM;
1434}
1435
1436/**
1437 * idpf_rxq_group_alloc - Allocate all rxq group resources
1438 * @vport: vport to allocate rxq groups for
1439 * @num_rxq: number of rxqs to allocate for each group
1440 *
1441 * Returns 0 on success, negative on failure
1442 */
1443static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1444{
 
 
1445	int i, k, err = 0;
1446	bool hs;
1447
1448	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1449				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1450	if (!vport->rxq_grps)
1451		return -ENOMEM;
1452
1453	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1454
1455	for (i = 0; i < vport->num_rxq_grp; i++) {
1456		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1457		int j;
1458
1459		rx_qgrp->vport = vport;
1460		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1461			rx_qgrp->singleq.num_rxq = num_rxq;
1462			for (j = 0; j < num_rxq; j++) {
1463				rx_qgrp->singleq.rxqs[j] =
1464						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1465							GFP_KERNEL);
1466				if (!rx_qgrp->singleq.rxqs[j]) {
1467					err = -ENOMEM;
1468					goto err_alloc;
1469				}
1470			}
1471			goto skip_splitq_rx_init;
1472		}
1473		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1474
1475		for (j = 0; j < num_rxq; j++) {
1476			rx_qgrp->splitq.rxq_sets[j] =
1477				kzalloc(sizeof(struct idpf_rxq_set),
1478					GFP_KERNEL);
1479			if (!rx_qgrp->splitq.rxq_sets[j]) {
1480				err = -ENOMEM;
1481				goto err_alloc;
1482			}
1483		}
1484
1485		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1486						    sizeof(struct idpf_bufq_set),
1487						    GFP_KERNEL);
1488		if (!rx_qgrp->splitq.bufq_sets) {
1489			err = -ENOMEM;
1490			goto err_alloc;
1491		}
1492
1493		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1494			struct idpf_bufq_set *bufq_set =
1495				&rx_qgrp->splitq.bufq_sets[j];
1496			int swq_size = sizeof(struct idpf_sw_queue);
1497			struct idpf_buf_queue *q;
1498
1499			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
 
1500			q->desc_count = vport->bufq_desc_count[j];
 
 
 
 
1501			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
 
1502
1503			idpf_queue_assign(HSPLIT_EN, q, hs);
 
 
 
1504
1505			bufq_set->num_refillqs = num_rxq;
1506			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1507						     GFP_KERNEL);
1508			if (!bufq_set->refillqs) {
1509				err = -ENOMEM;
1510				goto err_alloc;
1511			}
1512			for (k = 0; k < bufq_set->num_refillqs; k++) {
1513				struct idpf_sw_queue *refillq =
1514					&bufq_set->refillqs[k];
1515
 
1516				refillq->desc_count =
1517					vport->bufq_desc_count[j];
1518				idpf_queue_set(GEN_CHK, refillq);
1519				idpf_queue_set(RFL_GEN_CHK, refillq);
1520				refillq->ring = kcalloc(refillq->desc_count,
1521							sizeof(*refillq->ring),
1522							GFP_KERNEL);
1523				if (!refillq->ring) {
1524					err = -ENOMEM;
1525					goto err_alloc;
1526				}
1527			}
1528		}
1529
1530skip_splitq_rx_init:
1531		for (j = 0; j < num_rxq; j++) {
1532			struct idpf_rx_queue *q;
1533
1534			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1535				q = rx_qgrp->singleq.rxqs[j];
1536				goto setup_rxq;
1537			}
1538			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1539			rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
1540			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1541			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1542				rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
1543				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1544
1545			idpf_queue_assign(HSPLIT_EN, q, hs);
 
 
 
1546
1547setup_rxq:
 
1548			q->desc_count = vport->rxq_desc_count;
1549			q->rx_ptype_lkup = vport->rx_ptype_lkup;
1550			q->netdev = vport->netdev;
1551			q->bufq_sets = rx_qgrp->splitq.bufq_sets;
1552			q->idx = (i * num_rxq) + j;
 
 
 
 
 
1553			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1554			q->rx_max_pkt_size = vport->netdev->mtu +
1555							LIBETH_RX_LL_LEN;
1556			idpf_rxq_set_descids(vport, q);
1557		}
1558	}
1559
1560err_alloc:
1561	if (err)
1562		idpf_rxq_group_rel(vport);
1563
1564	return err;
1565}
1566
1567/**
1568 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1569 * @vport: vport with qgrps to allocate
1570 *
1571 * Returns 0 on success, negative on failure
1572 */
1573static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1574{
1575	u16 num_txq, num_rxq;
1576	int err;
1577
1578	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1579
1580	err = idpf_txq_group_alloc(vport, num_txq);
1581	if (err)
1582		goto err_out;
1583
1584	err = idpf_rxq_group_alloc(vport, num_rxq);
1585	if (err)
1586		goto err_out;
1587
1588	return 0;
1589
1590err_out:
1591	idpf_vport_queue_grp_rel_all(vport);
1592
1593	return err;
1594}
1595
1596/**
1597 * idpf_vport_queues_alloc - Allocate memory for all queues
1598 * @vport: virtual port
1599 *
1600 * Allocate memory for queues associated with a vport.  Returns 0 on success,
1601 * negative on failure.
1602 */
1603int idpf_vport_queues_alloc(struct idpf_vport *vport)
1604{
1605	int err;
1606
1607	err = idpf_vport_queue_grp_alloc_all(vport);
1608	if (err)
1609		goto err_out;
1610
1611	err = idpf_tx_desc_alloc_all(vport);
1612	if (err)
1613		goto err_out;
1614
1615	err = idpf_rx_desc_alloc_all(vport);
1616	if (err)
1617		goto err_out;
1618
1619	err = idpf_vport_init_fast_path_txqs(vport);
1620	if (err)
1621		goto err_out;
1622
1623	return 0;
1624
1625err_out:
1626	idpf_vport_queues_rel(vport);
1627
1628	return err;
1629}
1630
1631/**
1632 * idpf_tx_handle_sw_marker - Handle queue marker packet
1633 * @tx_q: tx queue to handle software marker
1634 */
1635static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
1636{
1637	struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
1638	struct idpf_vport *vport = priv->vport;
1639	int i;
1640
1641	idpf_queue_clear(SW_MARKER, tx_q);
1642	/* Hardware must write marker packets to all queues associated with
1643	 * completion queues. So check if all queues received marker packets
1644	 */
1645	for (i = 0; i < vport->num_txq; i++)
1646		/* If we're still waiting on any other TXQ marker completions,
1647		 * just return now since we cannot wake up the marker_wq yet.
1648		 */
1649		if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
1650			return;
1651
1652	/* Drain complete */
1653	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1654	wake_up(&vport->sw_marker_wq);
1655}
1656
1657/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1658 * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1659 * out of order completions
1660 * @txq: queue to clean
1661 * @compl_tag: completion tag of packet to clean (from completion descriptor)
1662 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1663 * @budget: Used to determine if we are in netpoll
1664 */
1665static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
1666				       u16 compl_tag,
1667				       struct libeth_sq_napi_stats *cleaned,
1668				       int budget)
1669{
1670	struct idpf_tx_stash *stash;
1671	struct hlist_node *tmp_buf;
1672	struct libeth_cq_pp cp = {
1673		.dev	= txq->dev,
1674		.ss	= cleaned,
1675		.napi	= budget,
1676	};
1677
1678	/* Buffer completion */
1679	hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
1680				    hlist, compl_tag) {
1681		if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) != compl_tag))
1682			continue;
1683
1684		hash_del(&stash->hlist);
1685		libeth_tx_complete(&stash->buf, &cp);
 
 
 
 
 
 
 
 
1686
1687		/* Push shadow buf back onto stack */
1688		idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
 
 
1689	}
1690}
1691
1692/**
1693 * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1694 * later time (only relevant for flow scheduling mode)
1695 * @txq: Tx queue to clean
1696 * @tx_buf: buffer to store
1697 */
1698static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
1699				       struct idpf_tx_buf *tx_buf)
1700{
1701	struct idpf_tx_stash *stash;
1702
1703	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
 
1704		return 0;
1705
1706	stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
1707	if (unlikely(!stash)) {
1708		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1709				    netdev_name(txq->netdev));
1710
1711		return -ENOMEM;
1712	}
1713
1714	/* Store buffer params in shadow buffer */
1715	stash->buf.skb = tx_buf->skb;
1716	stash->buf.bytes = tx_buf->bytes;
1717	stash->buf.packets = tx_buf->packets;
1718	stash->buf.type = tx_buf->type;
1719	stash->buf.nr_frags = tx_buf->nr_frags;
1720	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1721	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1722	idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
1723
1724	/* Add buffer to buf_hash table to be freed later */
1725	hash_add(txq->stash->sched_buf_hash, &stash->hlist,
1726		 idpf_tx_buf_compl_tag(&stash->buf));
 
1727
1728	tx_buf->type = LIBETH_SQE_EMPTY;
 
1729
1730	return 0;
1731}
1732
1733#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1734do {								\
1735	if (unlikely(++(ntc) == (txq)->desc_count)) {		\
1736		ntc = 0;					\
 
1737		buf = (txq)->tx_buf;				\
1738		desc = &(txq)->flex_tx[0];			\
1739	} else {						\
1740		(buf)++;					\
1741		(desc)++;					\
1742	}							\
1743} while (0)
1744
1745/**
1746 * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1747 * @tx_q: Tx queue to clean
1748 * @end: queue index until which it should be cleaned
1749 * @napi_budget: Used to determine if we are in netpoll
1750 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1751 * @descs_only: true if queue is using flow-based scheduling and should
1752 * not clean buffers at this time
1753 *
1754 * Cleans the queue descriptor ring. If the queue is using queue-based
1755 * scheduling, the buffers will be cleaned as well. If the queue is using
1756 * flow-based scheduling, only the descriptors are cleaned at this time.
1757 * Separate packet completion events will be reported on the completion queue,
1758 * and the buffers will be cleaned separately. The stats are not updated from
1759 * this function when using flow-based scheduling.
1760 *
1761 * Furthermore, in flow scheduling mode, check to make sure there are enough
1762 * reserve buffers to stash the packet. If there are not, return early, which
1763 * will leave next_to_clean pointing to the packet that failed to be stashed.
1764 *
1765 * Return: false in the scenario above, true otherwise.
1766 */
1767static bool idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
1768				 int napi_budget,
1769				 struct libeth_sq_napi_stats *cleaned,
1770				 bool descs_only)
1771{
1772	union idpf_tx_flex_desc *next_pending_desc = NULL;
1773	union idpf_tx_flex_desc *tx_desc;
1774	u32 ntc = tx_q->next_to_clean;
1775	struct libeth_cq_pp cp = {
1776		.dev	= tx_q->dev,
1777		.ss	= cleaned,
1778		.napi	= napi_budget,
1779	};
1780	struct idpf_tx_buf *tx_buf;
1781	bool clean_complete = true;
1782
1783	tx_desc = &tx_q->flex_tx[ntc];
1784	next_pending_desc = &tx_q->flex_tx[end];
1785	tx_buf = &tx_q->tx_buf[ntc];
 
1786
1787	while (tx_desc != next_pending_desc) {
1788		u32 eop_idx;
1789
1790		/* If this entry in the ring was used as a context descriptor,
1791		 * it's corresponding entry in the buffer ring is reserved. We
1792		 * can skip this descriptor since there is no buffer to clean.
 
1793		 */
1794		if (tx_buf->type <= LIBETH_SQE_CTX)
1795			goto fetch_next_txq_desc;
1796
1797		if (unlikely(tx_buf->type != LIBETH_SQE_SKB))
1798			break;
1799
1800		eop_idx = tx_buf->rs_idx;
 
1801
1802		if (descs_only) {
1803			if (IDPF_TX_BUF_RSV_UNUSED(tx_q) < tx_buf->nr_frags) {
1804				clean_complete = false;
1805				goto tx_splitq_clean_out;
1806			}
1807
1808			idpf_stash_flow_sch_buffers(tx_q, tx_buf);
1809
1810			while (ntc != eop_idx) {
1811				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1812							      tx_desc, tx_buf);
1813				idpf_stash_flow_sch_buffers(tx_q, tx_buf);
 
 
 
 
 
1814			}
1815		} else {
1816			libeth_tx_complete(tx_buf, &cp);
 
1817
1818			/* unmap remaining buffers */
1819			while (ntc != eop_idx) {
1820				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1821							      tx_desc, tx_buf);
1822
1823				/* unmap any remaining paged data */
1824				libeth_tx_complete(tx_buf, &cp);
 
 
 
 
 
 
1825			}
1826		}
1827
1828fetch_next_txq_desc:
1829		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1830	}
1831
1832tx_splitq_clean_out:
 
1833	tx_q->next_to_clean = ntc;
1834
1835	return clean_complete;
1836}
1837
1838#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1839do {							\
1840	(buf)++;					\
1841	(ntc)++;					\
1842	if (unlikely((ntc) == (txq)->desc_count)) {	\
1843		buf = (txq)->tx_buf;			\
1844		ntc = 0;				\
1845	}						\
1846} while (0)
1847
1848/**
1849 * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1850 * @txq: queue to clean
1851 * @compl_tag: completion tag of packet to clean (from completion descriptor)
1852 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1853 * @budget: Used to determine if we are in netpoll
1854 *
1855 * Cleans all buffers associated with the input completion tag either from the
1856 * TX buffer ring or from the hash table if the buffers were previously
1857 * stashed. Returns the byte/segment count for the cleaned packet associated
1858 * this completion tag.
1859 */
1860static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
1861				   struct libeth_sq_napi_stats *cleaned,
1862				   int budget)
1863{
1864	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1865	struct idpf_tx_buf *tx_buf = NULL;
1866	struct libeth_cq_pp cp = {
1867		.dev	= txq->dev,
1868		.ss	= cleaned,
1869		.napi	= budget,
1870	};
1871	u16 ntc, orig_idx = idx;
1872
1873	tx_buf = &txq->tx_buf[idx];
1874
1875	if (unlikely(tx_buf->type <= LIBETH_SQE_CTX ||
1876		     idpf_tx_buf_compl_tag(tx_buf) != compl_tag))
1877		return false;
1878
1879	if (tx_buf->type == LIBETH_SQE_SKB)
1880		libeth_tx_complete(tx_buf, &cp);
 
 
 
 
1881
1882	idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
 
1883
1884	while (idpf_tx_buf_compl_tag(tx_buf) == compl_tag) {
1885		libeth_tx_complete(tx_buf, &cp);
1886		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1887	}
1888
1889	/*
1890	 * It's possible the packet we just cleaned was an out of order
1891	 * completion, which means we can stash the buffers starting from
1892	 * the original next_to_clean and reuse the descriptors. We need
1893	 * to compare the descriptor ring next_to_clean packet's "first" buffer
1894	 * to the "first" buffer of the packet we just cleaned to determine if
1895	 * this is the case. Howevever, next_to_clean can point to either a
1896	 * reserved buffer that corresponds to a context descriptor used for the
1897	 * next_to_clean packet (TSO packet) or the "first" buffer (single
1898	 * packet). The orig_idx from the packet we just cleaned will always
1899	 * point to the "first" buffer. If next_to_clean points to a reserved
1900	 * buffer, let's bump ntc once and start the comparison from there.
1901	 */
1902	ntc = txq->next_to_clean;
1903	tx_buf = &txq->tx_buf[ntc];
1904
1905	if (tx_buf->type == LIBETH_SQE_CTX)
1906		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1907
1908	/*
1909	 * If ntc still points to a different "first" buffer, clean the
1910	 * descriptor ring and stash all of the buffers for later cleaning. If
1911	 * we cannot stash all of the buffers, next_to_clean will point to the
1912	 * "first" buffer of the packet that could not be stashed and cleaning
1913	 * will start there next time.
1914	 */
1915	if (unlikely(tx_buf != &txq->tx_buf[orig_idx] &&
1916		     !idpf_tx_splitq_clean(txq, orig_idx, budget, cleaned,
1917					   true)))
1918		return true;
 
1919
1920	/*
1921	 * Otherwise, update next_to_clean to reflect the cleaning that was
1922	 * done above.
 
1923	 */
1924	txq->next_to_clean = idx;
1925
1926	return true;
1927}
1928
1929/**
1930 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1931 * whether on the buffer ring or in the hash table
1932 * @txq: Tx ring to clean
1933 * @desc: pointer to completion queue descriptor to extract completion
1934 * information from
1935 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1936 * @budget: Used to determine if we are in netpoll
1937 *
1938 * Returns bytes/packets cleaned
1939 */
1940static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
1941					 struct idpf_splitq_tx_compl_desc *desc,
1942					 struct libeth_sq_napi_stats *cleaned,
1943					 int budget)
1944{
1945	u16 compl_tag;
1946
1947	if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
1948		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
1949
1950		idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
1951		return;
1952	}
1953
1954	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
1955
1956	/* If we didn't clean anything on the ring, this packet must be
1957	 * in the hash table. Go clean it there.
1958	 */
1959	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
1960		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
1961}
1962
1963/**
1964 * idpf_tx_clean_complq - Reclaim resources on completion queue
1965 * @complq: Tx ring to clean
1966 * @budget: Used to determine if we are in netpoll
1967 * @cleaned: returns number of packets cleaned
1968 *
1969 * Returns true if there's any budget left (e.g. the clean is finished)
1970 */
1971static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
1972				 int *cleaned)
1973{
1974	struct idpf_splitq_tx_compl_desc *tx_desc;
 
1975	s16 ntc = complq->next_to_clean;
1976	struct idpf_netdev_priv *np;
1977	unsigned int complq_budget;
1978	bool complq_ok = true;
1979	int i;
1980
1981	complq_budget = complq->clean_budget;
1982	tx_desc = &complq->comp[ntc];
1983	ntc -= complq->desc_count;
1984
1985	do {
1986		struct libeth_sq_napi_stats cleaned_stats = { };
1987		struct idpf_tx_queue *tx_q;
1988		int rel_tx_qid;
1989		u16 hw_head;
1990		u8 ctype;	/* completion type */
1991		u16 gen;
1992
1993		/* if the descriptor isn't done, no work yet to do */
1994		gen = le16_get_bits(tx_desc->qid_comptype_gen,
1995				    IDPF_TXD_COMPLQ_GEN_M);
1996		if (idpf_queue_has(GEN_CHK, complq) != gen)
1997			break;
1998
1999		/* Find necessary info of TX queue to clean buffers */
2000		rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
2001					   IDPF_TXD_COMPLQ_QID_M);
2002		if (rel_tx_qid >= complq->txq_grp->num_txq ||
2003		    !complq->txq_grp->txqs[rel_tx_qid]) {
2004			netdev_err(complq->netdev, "TxQ not found\n");
 
2005			goto fetch_next_desc;
2006		}
2007		tx_q = complq->txq_grp->txqs[rel_tx_qid];
2008
2009		/* Determine completion type */
2010		ctype = le16_get_bits(tx_desc->qid_comptype_gen,
2011				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
2012		switch (ctype) {
2013		case IDPF_TXD_COMPLT_RE:
2014			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
2015
2016			idpf_tx_splitq_clean(tx_q, hw_head, budget,
2017					     &cleaned_stats, true);
2018			break;
2019		case IDPF_TXD_COMPLT_RS:
2020			idpf_tx_handle_rs_completion(tx_q, tx_desc,
2021						     &cleaned_stats, budget);
2022			break;
2023		case IDPF_TXD_COMPLT_SW_MARKER:
2024			idpf_tx_handle_sw_marker(tx_q);
2025			break;
2026		default:
2027			netdev_err(tx_q->netdev,
2028				   "Unknown TX completion type: %d\n", ctype);
 
2029			goto fetch_next_desc;
2030		}
2031
2032		u64_stats_update_begin(&tx_q->stats_sync);
2033		u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
2034		u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
2035		tx_q->cleaned_pkts += cleaned_stats.packets;
2036		tx_q->cleaned_bytes += cleaned_stats.bytes;
2037		complq->num_completions++;
2038		u64_stats_update_end(&tx_q->stats_sync);
2039
2040fetch_next_desc:
2041		tx_desc++;
2042		ntc++;
2043		if (unlikely(!ntc)) {
2044			ntc -= complq->desc_count;
2045			tx_desc = &complq->comp[0];
2046			idpf_queue_change(GEN_CHK, complq);
2047		}
2048
2049		prefetch(tx_desc);
2050
2051		/* update budget accounting */
2052		complq_budget--;
2053	} while (likely(complq_budget));
2054
2055	/* Store the state of the complq to be used later in deciding if a
2056	 * TXQ can be started again
2057	 */
2058	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
2059		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
2060		complq_ok = false;
2061
2062	np = netdev_priv(complq->netdev);
2063	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
2064		struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
2065		struct netdev_queue *nq;
2066		bool dont_wake;
2067
2068		/* We didn't clean anything on this queue, move along */
2069		if (!tx_q->cleaned_bytes)
2070			continue;
2071
2072		*cleaned += tx_q->cleaned_pkts;
2073
2074		/* Update BQL */
2075		nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2076
2077		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
2078			    np->state != __IDPF_VPORT_UP ||
2079			    !netif_carrier_ok(tx_q->netdev);
2080		/* Check if the TXQ needs to and can be restarted */
2081		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
2082					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
2083					   dont_wake);
2084
2085		/* Reset cleaned stats for the next time this queue is
2086		 * cleaned
2087		 */
2088		tx_q->cleaned_bytes = 0;
2089		tx_q->cleaned_pkts = 0;
2090	}
2091
2092	ntc += complq->desc_count;
2093	complq->next_to_clean = ntc;
2094
2095	return !!complq_budget;
2096}
2097
2098/**
2099 * idpf_tx_splitq_build_ctb - populate command tag and size for queue
2100 * based scheduling descriptors
2101 * @desc: descriptor to populate
2102 * @params: pointer to tx params struct
2103 * @td_cmd: command to be filled in desc
2104 * @size: size of buffer
2105 */
2106void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
2107			      struct idpf_tx_splitq_params *params,
2108			      u16 td_cmd, u16 size)
2109{
2110	desc->q.qw1.cmd_dtype =
2111		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
2112	desc->q.qw1.cmd_dtype |=
2113		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
2114	desc->q.qw1.buf_size = cpu_to_le16(size);
2115	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
2116}
2117
2118/**
2119 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
2120 * scheduling descriptors
2121 * @desc: descriptor to populate
2122 * @params: pointer to tx params struct
2123 * @td_cmd: command to be filled in desc
2124 * @size: size of buffer
2125 */
2126void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
2127				    struct idpf_tx_splitq_params *params,
2128				    u16 td_cmd, u16 size)
2129{
2130	desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
2131	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
2132	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
2133}
2134
2135/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2136 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
2137 * @tx_q: the queue to be checked
2138 * @descs_needed: number of descriptors required for this packet
2139 *
2140 * Returns 0 if stop is not needed
2141 */
2142static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
2143				     unsigned int descs_needed)
2144{
2145	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2146		goto out;
2147
2148	/* If there are too many outstanding completions expected on the
2149	 * completion queue, stop the TX queue to give the device some time to
2150	 * catch up
2151	 */
2152	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2153		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2154		goto splitq_stop;
2155
2156	/* Also check for available book keeping buffers; if we are low, stop
2157	 * the queue to wait for more completions
2158	 */
2159	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2160		goto splitq_stop;
2161
2162	return 0;
2163
2164splitq_stop:
2165	netif_stop_subqueue(tx_q->netdev, tx_q->idx);
2166
2167out:
2168	u64_stats_update_begin(&tx_q->stats_sync);
2169	u64_stats_inc(&tx_q->q_stats.q_busy);
2170	u64_stats_update_end(&tx_q->stats_sync);
 
2171
2172	return -EBUSY;
2173}
2174
2175/**
2176 * idpf_tx_buf_hw_update - Store the new tail value
2177 * @tx_q: queue to bump
2178 * @val: new tail index
2179 * @xmit_more: more skb's pending
2180 *
2181 * The naming here is special in that 'hw' signals that this function is about
2182 * to do a register write to update our queue status. We know this can only
2183 * mean tail here as HW should be owning head for TX.
2184 */
2185void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
2186			   bool xmit_more)
2187{
2188	struct netdev_queue *nq;
2189
2190	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2191	tx_q->next_to_use = val;
2192
2193	if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
2194		u64_stats_update_begin(&tx_q->stats_sync);
2195		u64_stats_inc(&tx_q->q_stats.q_busy);
2196		u64_stats_update_end(&tx_q->stats_sync);
2197	}
2198
2199	/* Force memory writes to complete before letting h/w
2200	 * know there are new descriptors to fetch.  (Only
2201	 * applicable for weak-ordered memory model archs,
2202	 * such as IA-64).
2203	 */
2204	wmb();
2205
2206	/* notify HW of packet */
2207	if (netif_xmit_stopped(nq) || !xmit_more)
2208		writel(val, tx_q->tail);
2209}
2210
2211/**
2212 * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2213 * @txq: queue to send buffer on
2214 * @skb: send buffer
2215 *
2216 * Returns number of data descriptors needed for this skb.
2217 */
2218unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
2219					 struct sk_buff *skb)
2220{
2221	const struct skb_shared_info *shinfo;
2222	unsigned int count = 0, i;
2223
2224	count += !!skb_headlen(skb);
2225
2226	if (!skb_is_nonlinear(skb))
2227		return count;
2228
2229	shinfo = skb_shinfo(skb);
2230	for (i = 0; i < shinfo->nr_frags; i++) {
2231		unsigned int size;
2232
2233		size = skb_frag_size(&shinfo->frags[i]);
2234
2235		/* We only need to use the idpf_size_to_txd_count check if the
2236		 * fragment is going to span multiple descriptors,
2237		 * i.e. size >= 16K.
2238		 */
2239		if (size >= SZ_16K)
2240			count += idpf_size_to_txd_count(size);
2241		else
2242			count++;
2243	}
2244
2245	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2246		if (__skb_linearize(skb))
2247			return 0;
2248
2249		count = idpf_size_to_txd_count(skb->len);
2250		u64_stats_update_begin(&txq->stats_sync);
2251		u64_stats_inc(&txq->q_stats.linearize);
2252		u64_stats_update_end(&txq->stats_sync);
2253	}
2254
2255	return count;
2256}
2257
2258/**
2259 * idpf_tx_dma_map_error - handle TX DMA map errors
2260 * @txq: queue to send buffer on
2261 * @skb: send buffer
2262 * @first: original first buffer info buffer for packet
2263 * @idx: starting point on ring to unwind
2264 */
2265void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
2266			   struct idpf_tx_buf *first, u16 idx)
2267{
2268	struct libeth_sq_napi_stats ss = { };
2269	struct libeth_cq_pp cp = {
2270		.dev	= txq->dev,
2271		.ss	= &ss,
2272	};
2273
2274	u64_stats_update_begin(&txq->stats_sync);
2275	u64_stats_inc(&txq->q_stats.dma_map_errs);
2276	u64_stats_update_end(&txq->stats_sync);
2277
2278	/* clear dma mappings for failed tx_buf map */
2279	for (;;) {
2280		struct idpf_tx_buf *tx_buf;
2281
2282		tx_buf = &txq->tx_buf[idx];
2283		libeth_tx_complete(tx_buf, &cp);
2284		if (tx_buf == first)
2285			break;
2286		if (idx == 0)
2287			idx = txq->desc_count;
2288		idx--;
2289	}
2290
2291	if (skb_is_gso(skb)) {
2292		union idpf_tx_flex_desc *tx_desc;
2293
2294		/* If we failed a DMA mapping for a TSO packet, we will have
2295		 * used one additional descriptor for a context
2296		 * descriptor. Reset that here.
2297		 */
2298		tx_desc = &txq->flex_tx[idx];
2299		memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2300		if (idx == 0)
2301			idx = txq->desc_count;
2302		idx--;
2303	}
2304
2305	/* Update tail in case netdev_xmit_more was previously true */
2306	idpf_tx_buf_hw_update(txq, idx, false);
2307}
2308
2309/**
2310 * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2311 * @txq: the tx ring to wrap
2312 * @ntu: ring index to bump
2313 */
2314static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
2315{
2316	ntu++;
2317
2318	if (ntu == txq->desc_count) {
2319		ntu = 0;
2320		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2321	}
2322
2323	return ntu;
2324}
2325
2326/**
2327 * idpf_tx_splitq_map - Build the Tx flex descriptor
2328 * @tx_q: queue to send buffer on
2329 * @params: pointer to splitq params struct
2330 * @first: first buffer info buffer to use
2331 *
2332 * This function loops over the skb data pointed to by *first
2333 * and gets a physical address for each memory location and programs
2334 * it and the length into the transmit flex descriptor.
2335 */
2336static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
2337			       struct idpf_tx_splitq_params *params,
2338			       struct idpf_tx_buf *first)
2339{
2340	union idpf_tx_flex_desc *tx_desc;
2341	unsigned int data_len, size;
2342	struct idpf_tx_buf *tx_buf;
2343	u16 i = tx_q->next_to_use;
2344	struct netdev_queue *nq;
2345	struct sk_buff *skb;
2346	skb_frag_t *frag;
2347	u16 td_cmd = 0;
2348	dma_addr_t dma;
2349
2350	skb = first->skb;
2351
2352	td_cmd = params->offload.td_cmd;
2353
2354	data_len = skb->data_len;
2355	size = skb_headlen(skb);
2356
2357	tx_desc = &tx_q->flex_tx[i];
2358
2359	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2360
2361	tx_buf = first;
2362	first->nr_frags = 0;
2363
2364	params->compl_tag =
2365		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2366
2367	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2368		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2369
2370		if (dma_mapping_error(tx_q->dev, dma))
2371			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2372
2373		first->nr_frags++;
2374		idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2375		tx_buf->type = LIBETH_SQE_FRAG;
2376
2377		/* record length, and DMA address */
2378		dma_unmap_len_set(tx_buf, len, size);
2379		dma_unmap_addr_set(tx_buf, dma, dma);
2380
2381		/* buf_addr is in same location for both desc types */
2382		tx_desc->q.buf_addr = cpu_to_le64(dma);
2383
2384		/* The stack can send us fragments that are too large for a
2385		 * single descriptor i.e. frag size > 16K-1. We will need to
2386		 * split the fragment across multiple descriptors in this case.
2387		 * To adhere to HW alignment restrictions, the fragment needs
2388		 * to be split such that the first chunk ends on a 4K boundary
2389		 * and all subsequent chunks start on a 4K boundary. We still
2390		 * want to send as much data as possible though, so our
2391		 * intermediate descriptor chunk size will be 12K.
2392		 *
2393		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2394		 * ------------------------------------------------------------
2395		 * |                    frag_size = 32K                       |
2396		 * ------------------------------------------------------------
2397		 * |2600		  |16384	    |28672
2398		 *
2399		 * 3 descriptors will be used for this fragment. The HW expects
2400		 * the descriptors to contain the following:
2401		 * ------------------------------------------------------------
2402		 * | size = 13784         | size = 12K      | size = 6696     |
2403		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2404		 * ------------------------------------------------------------
2405		 *
2406		 * We need to first adjust the max_data for the first chunk so
2407		 * that it ends on a 4K boundary. By negating the value of the
2408		 * DMA address and taking only the low order bits, we're
2409		 * effectively calculating
2410		 *	4K - (DMA addr lower order bits) =
2411		 *				bytes to next boundary.
2412		 *
2413		 * Add that to our base aligned max_data (12K) and we have
2414		 * our first chunk size. In the example above,
2415		 *	13784 = 12K + (4096-2600)
2416		 *
2417		 * After guaranteeing the first chunk ends on a 4K boundary, we
2418		 * will give the intermediate descriptors 12K chunks and
2419		 * whatever is left to the final descriptor. This ensures that
2420		 * all descriptors used for the remaining chunks of the
2421		 * fragment start on a 4K boundary and we use as few
2422		 * descriptors as possible.
2423		 */
2424		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2425		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2426			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2427						  max_data);
2428
2429			if (unlikely(++i == tx_q->desc_count)) {
2430				tx_buf = tx_q->tx_buf;
2431				tx_desc = &tx_q->flex_tx[0];
 
 
2432				i = 0;
2433				tx_q->compl_tag_cur_gen =
2434					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2435			} else {
2436				tx_buf++;
2437				tx_desc++;
2438			}
2439
2440			/* Since this packet has a buffer that is going to span
2441			 * multiple descriptors, it's going to leave holes in
2442			 * to the TX buffer ring. To ensure these holes do not
2443			 * cause issues in the cleaning routines, we will clear
2444			 * them of any stale data and assign them the same
2445			 * completion tag as the current packet. Then when the
2446			 * packet is being cleaned, the cleaning routines will
2447			 * simply pass over these holes and finish cleaning the
2448			 * rest of the packet.
2449			 */
2450			tx_buf->type = LIBETH_SQE_EMPTY;
2451			idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
2452
2453			/* Adjust the DMA offset and the remaining size of the
2454			 * fragment.  On the first iteration of this loop,
2455			 * max_data will be >= 12K and <= 16K-1.  On any
2456			 * subsequent iteration of this loop, max_data will
2457			 * always be 12K.
2458			 */
2459			dma += max_data;
2460			size -= max_data;
2461
2462			/* Reset max_data since remaining chunks will be 12K
2463			 * at most
2464			 */
2465			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2466
2467			/* buf_addr is in same location for both desc types */
2468			tx_desc->q.buf_addr = cpu_to_le64(dma);
2469		}
2470
2471		if (!data_len)
2472			break;
2473
2474		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
 
 
2475
2476		if (unlikely(++i == tx_q->desc_count)) {
2477			tx_buf = tx_q->tx_buf;
2478			tx_desc = &tx_q->flex_tx[0];
2479			i = 0;
2480			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
2481		} else {
2482			tx_buf++;
2483			tx_desc++;
2484		}
2485
2486		size = skb_frag_size(frag);
2487		data_len -= size;
2488
2489		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2490				       DMA_TO_DEVICE);
 
 
2491	}
2492
2493	/* record SW timestamp if HW timestamp is not available */
2494	skb_tx_timestamp(skb);
2495
2496	first->type = LIBETH_SQE_SKB;
2497
2498	/* write last descriptor with RS and EOP bits */
2499	first->rs_idx = i;
2500	td_cmd |= params->eop_cmd;
2501	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2502	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2503
 
 
 
2504	tx_q->txq_grp->num_completions_pending++;
2505
2506	/* record bytecount for BQL */
2507	nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
2508	netdev_tx_sent_queue(nq, first->bytes);
2509
2510	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2511}
2512
2513/**
2514 * idpf_tso - computes mss and TSO length to prepare for TSO
2515 * @skb: pointer to skb
2516 * @off: pointer to struct that holds offload parameters
2517 *
2518 * Returns error (negative) if TSO was requested but cannot be applied to the
2519 * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2520 */
2521int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2522{
2523	const struct skb_shared_info *shinfo;
2524	union {
2525		struct iphdr *v4;
2526		struct ipv6hdr *v6;
2527		unsigned char *hdr;
2528	} ip;
2529	union {
2530		struct tcphdr *tcp;
2531		struct udphdr *udp;
2532		unsigned char *hdr;
2533	} l4;
2534	u32 paylen, l4_start;
2535	int err;
2536
2537	if (!skb_is_gso(skb))
2538		return 0;
2539
2540	err = skb_cow_head(skb, 0);
2541	if (err < 0)
2542		return err;
2543
2544	shinfo = skb_shinfo(skb);
2545
2546	ip.hdr = skb_network_header(skb);
2547	l4.hdr = skb_transport_header(skb);
2548
2549	/* initialize outer IP header fields */
2550	if (ip.v4->version == 4) {
2551		ip.v4->tot_len = 0;
2552		ip.v4->check = 0;
2553	} else if (ip.v6->version == 6) {
2554		ip.v6->payload_len = 0;
2555	}
2556
2557	l4_start = skb_transport_offset(skb);
2558
2559	/* remove payload length from checksum */
2560	paylen = skb->len - l4_start;
2561
2562	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2563	case SKB_GSO_TCPV4:
2564	case SKB_GSO_TCPV6:
2565		csum_replace_by_diff(&l4.tcp->check,
2566				     (__force __wsum)htonl(paylen));
2567		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2568		break;
2569	case SKB_GSO_UDP_L4:
2570		csum_replace_by_diff(&l4.udp->check,
2571				     (__force __wsum)htonl(paylen));
2572		/* compute length of segmentation header */
2573		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2574		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2575		break;
2576	default:
2577		return -EINVAL;
2578	}
2579
2580	off->tso_len = skb->len - off->tso_hdr_len;
2581	off->mss = shinfo->gso_size;
2582	off->tso_segs = shinfo->gso_segs;
2583
2584	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2585
2586	return 1;
2587}
2588
2589/**
2590 * __idpf_chk_linearize - Check skb is not using too many buffers
2591 * @skb: send buffer
2592 * @max_bufs: maximum number of buffers
2593 *
2594 * For TSO we need to count the TSO header and segment payload separately.  As
2595 * such we need to check cases where we have max_bufs-1 fragments or more as we
2596 * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2597 * for the segment payload in the first descriptor, and another max_buf-1 for
2598 * the fragments.
2599 */
2600static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2601{
2602	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2603	const skb_frag_t *frag, *stale;
2604	int nr_frags, sum;
2605
2606	/* no need to check if number of frags is less than max_bufs - 1 */
2607	nr_frags = shinfo->nr_frags;
2608	if (nr_frags < (max_bufs - 1))
2609		return false;
2610
2611	/* We need to walk through the list and validate that each group
2612	 * of max_bufs-2 fragments totals at least gso_size.
2613	 */
2614	nr_frags -= max_bufs - 2;
2615	frag = &shinfo->frags[0];
2616
2617	/* Initialize size to the negative value of gso_size minus 1.  We use
2618	 * this as the worst case scenario in which the frag ahead of us only
2619	 * provides one byte which is why we are limited to max_bufs-2
2620	 * descriptors for a single transmit as the header and previous
2621	 * fragment are already consuming 2 descriptors.
2622	 */
2623	sum = 1 - shinfo->gso_size;
2624
2625	/* Add size of frags 0 through 4 to create our initial sum */
2626	sum += skb_frag_size(frag++);
2627	sum += skb_frag_size(frag++);
2628	sum += skb_frag_size(frag++);
2629	sum += skb_frag_size(frag++);
2630	sum += skb_frag_size(frag++);
2631
2632	/* Walk through fragments adding latest fragment, testing it, and
2633	 * then removing stale fragments from the sum.
2634	 */
2635	for (stale = &shinfo->frags[0];; stale++) {
2636		int stale_size = skb_frag_size(stale);
2637
2638		sum += skb_frag_size(frag++);
2639
2640		/* The stale fragment may present us with a smaller
2641		 * descriptor than the actual fragment size. To account
2642		 * for that we need to remove all the data on the front and
2643		 * figure out what the remainder would be in the last
2644		 * descriptor associated with the fragment.
2645		 */
2646		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2647			int align_pad = -(skb_frag_off(stale)) &
2648					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2649
2650			sum -= align_pad;
2651			stale_size -= align_pad;
2652
2653			do {
2654				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2655				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2656			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2657		}
2658
2659		/* if sum is negative we failed to make sufficient progress */
2660		if (sum < 0)
2661			return true;
2662
2663		if (!nr_frags--)
2664			break;
2665
2666		sum -= stale_size;
2667	}
2668
2669	return false;
2670}
2671
2672/**
2673 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2674 * @skb: send buffer
2675 * @max_bufs: maximum scatter gather buffers for single packet
2676 * @count: number of buffers this packet needs
2677 *
2678 * Make sure we don't exceed maximum scatter gather buffers for a single
2679 * packet. We have to do some special checking around the boundary (max_bufs-1)
2680 * if TSO is on since we need count the TSO header and payload separately.
2681 * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2682 * header, 1 for segment payload, and then 7 for the fragments.
2683 */
2684static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2685			       unsigned int count)
2686{
2687	if (likely(count < max_bufs))
2688		return false;
2689	if (skb_is_gso(skb))
2690		return __idpf_chk_linearize(skb, max_bufs);
2691
2692	return count > max_bufs;
2693}
2694
2695/**
2696 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2697 * @txq: queue to put context descriptor on
2698 *
2699 * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2700 * ring entry to reflect that this index is a context descriptor
2701 */
2702static struct idpf_flex_tx_ctx_desc *
2703idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
2704{
2705	struct idpf_flex_tx_ctx_desc *desc;
2706	int i = txq->next_to_use;
2707
2708	txq->tx_buf[i].type = LIBETH_SQE_CTX;
 
2709
2710	/* grab the next descriptor */
2711	desc = &txq->flex_ctx[i];
2712	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2713
2714	return desc;
2715}
2716
2717/**
2718 * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2719 * @tx_q: queue to send buffer on
2720 * @skb: pointer to skb
2721 */
2722netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
2723{
2724	u64_stats_update_begin(&tx_q->stats_sync);
2725	u64_stats_inc(&tx_q->q_stats.skb_drops);
2726	u64_stats_update_end(&tx_q->stats_sync);
2727
2728	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2729
2730	dev_kfree_skb(skb);
2731
2732	return NETDEV_TX_OK;
2733}
2734
2735/**
2736 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2737 * @skb: send buffer
2738 * @tx_q: queue to send buffer on
2739 *
2740 * Returns NETDEV_TX_OK if sent, else an error code
2741 */
2742static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2743					struct idpf_tx_queue *tx_q)
2744{
2745	struct idpf_tx_splitq_params tx_params = { };
2746	struct idpf_tx_buf *first;
2747	unsigned int count;
2748	int tso;
2749
2750	count = idpf_tx_desc_count_required(tx_q, skb);
2751	if (unlikely(!count))
2752		return idpf_tx_drop_skb(tx_q, skb);
2753
2754	tso = idpf_tso(skb, &tx_params.offload);
2755	if (unlikely(tso < 0))
2756		return idpf_tx_drop_skb(tx_q, skb);
2757
2758	/* Check for splitq specific TX resources */
2759	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2760	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2761		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2762
2763		return NETDEV_TX_BUSY;
2764	}
2765
2766	if (tso) {
2767		/* If tso is needed, set up context desc */
2768		struct idpf_flex_tx_ctx_desc *ctx_desc =
2769			idpf_tx_splitq_get_ctx_desc(tx_q);
2770
2771		ctx_desc->tso.qw1.cmd_dtype =
2772				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2773					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2774		ctx_desc->tso.qw0.flex_tlen =
2775				cpu_to_le32(tx_params.offload.tso_len &
2776					    IDPF_TXD_FLEX_CTX_TLEN_M);
2777		ctx_desc->tso.qw0.mss_rt =
2778				cpu_to_le16(tx_params.offload.mss &
2779					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2780		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2781
2782		u64_stats_update_begin(&tx_q->stats_sync);
2783		u64_stats_inc(&tx_q->q_stats.lso_pkts);
2784		u64_stats_update_end(&tx_q->stats_sync);
2785	}
2786
2787	/* record the location of the first descriptor for this packet */
2788	first = &tx_q->tx_buf[tx_q->next_to_use];
2789	first->skb = skb;
2790
2791	if (tso) {
2792		first->packets = tx_params.offload.tso_segs;
2793		first->bytes = skb->len +
2794			((first->packets - 1) * tx_params.offload.tso_hdr_len);
2795	} else {
2796		first->packets = 1;
2797		first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
2798	}
2799
2800	if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
2801		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2802		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2803		/* Set the RE bit to catch any packets that may have not been
2804		 * stashed during RS completion cleaning. MIN_GAP is set to
2805		 * MIN_RING size to ensure it will be set at least once each
2806		 * time around the ring.
2807		 */
2808		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2809			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2810			tx_q->txq_grp->num_completions_pending++;
2811		}
2812
2813		if (skb->ip_summed == CHECKSUM_PARTIAL)
2814			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2815
2816	} else {
2817		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2818		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2819
2820		if (skb->ip_summed == CHECKSUM_PARTIAL)
2821			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2822	}
2823
2824	idpf_tx_splitq_map(tx_q, &tx_params, first);
2825
2826	return NETDEV_TX_OK;
2827}
2828
2829/**
2830 * idpf_tx_start - Selects the right Tx queue to send buffer
2831 * @skb: send buffer
2832 * @netdev: network interface device structure
2833 *
2834 * Returns NETDEV_TX_OK if sent, else an error code
2835 */
2836netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
 
2837{
2838	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2839	struct idpf_tx_queue *tx_q;
2840
2841	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2842		dev_kfree_skb_any(skb);
2843
2844		return NETDEV_TX_OK;
2845	}
2846
2847	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2848
2849	/* hardware can't handle really short frames, hardware padding works
2850	 * beyond this point
2851	 */
2852	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2853		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2854
2855		return NETDEV_TX_OK;
2856	}
2857
2858	if (idpf_is_queue_model_split(vport->txq_model))
2859		return idpf_tx_splitq_frame(skb, tx_q);
2860	else
2861		return idpf_tx_singleq_frame(skb, tx_q);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2862}
2863
2864/**
2865 * idpf_rx_hash - set the hash value in the skb
2866 * @rxq: Rx descriptor ring packet is being transacted on
2867 * @skb: pointer to current skb being populated
2868 * @rx_desc: Receive descriptor
2869 * @decoded: Decoded Rx packet type related fields
2870 */
2871static void
2872idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
2873	     const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2874	     struct libeth_rx_pt decoded)
2875{
2876	u32 hash;
2877
2878	if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
2879		return;
2880
2881	hash = le16_to_cpu(rx_desc->hash1) |
2882	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2883	       (rx_desc->hash3 << 24);
2884
2885	libeth_rx_pt_set_hash(skb, hash, decoded);
2886}
2887
2888/**
2889 * idpf_rx_csum - Indicate in skb if checksum is good
2890 * @rxq: Rx descriptor ring packet is being transacted on
2891 * @skb: pointer to current skb being populated
2892 * @csum_bits: checksum fields extracted from the descriptor
2893 * @decoded: Decoded Rx packet type related fields
2894 *
2895 * skb->protocol must be set before this function is called
2896 */
2897static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2898			 struct idpf_rx_csum_decoded csum_bits,
2899			 struct libeth_rx_pt decoded)
2900{
2901	bool ipv4, ipv6;
2902
2903	/* check if Rx checksum is enabled */
2904	if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
2905		return;
2906
2907	/* check if HW has decoded the packet and checksum */
2908	if (unlikely(!csum_bits.l3l4p))
2909		return;
2910
2911	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
2912	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
2913
2914	if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
2915		goto checksum_fail;
2916
2917	if (unlikely(ipv6 && csum_bits.ipv6exadd))
2918		return;
2919
2920	/* check for L4 errors and handle packets that were not able to be
2921	 * checksummed
2922	 */
2923	if (unlikely(csum_bits.l4e))
2924		goto checksum_fail;
2925
2926	if (csum_bits.raw_csum_inv ||
2927	    decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
 
 
 
 
 
 
 
 
 
 
 
 
 
2928		skb->ip_summed = CHECKSUM_UNNECESSARY;
2929		return;
 
 
2930	}
2931
2932	skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
2933	skb->ip_summed = CHECKSUM_COMPLETE;
2934
2935	return;
2936
2937checksum_fail:
2938	u64_stats_update_begin(&rxq->stats_sync);
2939	u64_stats_inc(&rxq->q_stats.hw_csum_err);
2940	u64_stats_update_end(&rxq->stats_sync);
2941}
2942
2943/**
2944 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2945 * @rx_desc: receive descriptor
 
2946 *
2947 * Return: parsed checksum status.
2948 **/
2949static struct idpf_rx_csum_decoded
2950idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2951{
2952	struct idpf_rx_csum_decoded csum = { };
2953	u8 qword0, qword1;
2954
2955	qword0 = rx_desc->status_err0_qw0;
2956	qword1 = rx_desc->status_err0_qw1;
2957
2958	csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
2959			     qword1);
2960	csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
2961			      qword1);
2962	csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2963			     qword1);
2964	csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2965			       qword1);
2966	csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2967				   qword0);
2968	csum.raw_csum_inv =
 
 
 
 
2969		le16_get_bits(rx_desc->ptype_err_fflags0,
2970			      VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
2971	csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
2972
2973	return csum;
2974}
2975
2976/**
2977 * idpf_rx_rsc - Set the RSC fields in the skb
2978 * @rxq : Rx descriptor ring packet is being transacted on
2979 * @skb : pointer to current skb being populated
2980 * @rx_desc: Receive descriptor
2981 * @decoded: Decoded Rx packet type related fields
2982 *
2983 * Return 0 on success and error code on failure
2984 *
2985 * Populate the skb fields with the total number of RSC segments, RSC payload
2986 * length and packet type.
2987 */
2988static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
2989		       const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2990		       struct libeth_rx_pt decoded)
2991{
2992	u16 rsc_segments, rsc_seg_len;
2993	bool ipv4, ipv6;
2994	int len;
2995
2996	if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
2997		     LIBETH_RX_PT_OUTER_L2))
2998		return -EINVAL;
2999
3000	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
3001	if (unlikely(!rsc_seg_len))
3002		return -EINVAL;
3003
3004	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
3005	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
3006
3007	if (unlikely(!(ipv4 ^ ipv6)))
3008		return -EINVAL;
3009
3010	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
 
 
3011
3012	NAPI_GRO_CB(skb)->count = rsc_segments;
3013	skb_shinfo(skb)->gso_size = rsc_seg_len;
3014
3015	skb_reset_network_header(skb);
 
3016
3017	if (ipv4) {
3018		struct iphdr *ipv4h = ip_hdr(skb);
3019
3020		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
3021
3022		/* Reset and set transport header offset in skb */
3023		skb_set_transport_header(skb, sizeof(struct iphdr));
3024		len = skb->len - skb_transport_offset(skb);
3025
3026		/* Compute the TCP pseudo header checksum*/
3027		tcp_hdr(skb)->check =
3028			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
3029	} else {
3030		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
3031
3032		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
3033		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
3034		len = skb->len - skb_transport_offset(skb);
3035		tcp_hdr(skb)->check =
3036			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
3037	}
3038
3039	tcp_gro_complete(skb);
3040
3041	u64_stats_update_begin(&rxq->stats_sync);
3042	u64_stats_inc(&rxq->q_stats.rsc_pkts);
3043	u64_stats_update_end(&rxq->stats_sync);
3044
3045	return 0;
3046}
3047
3048/**
3049 * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
3050 * @rxq: Rx descriptor ring packet is being transacted on
3051 * @skb: pointer to current skb being populated
3052 * @rx_desc: Receive descriptor
3053 *
3054 * This function checks the ring, descriptor, and packet information in
3055 * order to populate the hash, checksum, protocol, and
3056 * other fields within the skb.
3057 */
3058static int
3059idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
3060			   const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3061{
3062	struct idpf_rx_csum_decoded csum_bits;
3063	struct libeth_rx_pt decoded;
3064	u16 rx_ptype;
3065
3066	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
3067				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
3068	decoded = rxq->rx_ptype_lkup[rx_ptype];
 
 
 
 
 
 
3069
3070	/* process RSS/hash */
3071	idpf_rx_hash(rxq, skb, rx_desc, decoded);
3072
3073	skb->protocol = eth_type_trans(skb, rxq->netdev);
3074	skb_record_rx_queue(skb, rxq->idx);
3075
3076	if (le16_get_bits(rx_desc->hdrlen_flags,
3077			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
3078		return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
3079
3080	csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
3081	idpf_rx_csum(rxq, skb, csum_bits, decoded);
3082
3083	return 0;
3084}
3085
3086/**
3087 * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
3088 * @rx_buf: buffer containing page to add
3089 * @skb: sk_buff to place the data into
3090 * @size: packet length from rx_desc
3091 *
3092 * This function will add the data contained in rx_buf->page to the skb.
3093 * It will just attach the page as a frag to the skb.
3094 * The function will then update the page offset.
3095 */
3096void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
3097		      unsigned int size)
3098{
3099	u32 hr = rx_buf->page->pp->p.offset;
3100
3101	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
3102			rx_buf->offset + hr, size, rx_buf->truesize);
 
 
3103}
3104
3105/**
3106 * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
3107 * @hdr: Rx buffer for the headers
3108 * @buf: Rx buffer for the payload
3109 * @data_len: number of bytes received to the payload buffer
3110 *
3111 * When a header buffer overflow occurs or the HW was unable do parse the
3112 * packet type to perform header split, the whole frame gets placed to the
3113 * payload buffer. We can't build a valid skb around a payload buffer when
3114 * the header split is active since it doesn't reserve any head- or tailroom.
3115 * In that case, copy either the whole frame when it's short or just the
3116 * Ethernet header to the header buffer to be able to build an skb and adjust
3117 * the data offset in the payload buffer, IOW emulate the header split.
3118 *
3119 * Return: number of bytes copied to the header buffer.
3120 */
3121static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
3122			     struct libeth_fqe *buf, u32 data_len)
 
 
 
3123{
3124	u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
3125	const void *src;
3126	void *dst;
3127
3128	if (!libeth_rx_sync_for_cpu(buf, copy))
3129		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
3130
3131	dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
3132	src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
3133	memcpy(dst, src, LARGEST_ALIGN(copy));
 
3134
3135	buf->offset += copy;
 
3136
3137	return copy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3138}
3139
3140/**
3141 * idpf_rx_build_skb - Allocate skb and populate it from header buffer
3142 * @buf: Rx buffer to pull data from
 
3143 * @size: the length of the packet
3144 *
3145 * This function allocates an skb. It then populates it with the page data from
3146 * the current receive descriptor, taking care to set up the skb correctly.
 
3147 */
3148struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
 
 
3149{
3150	u32 hr = buf->page->pp->p.offset;
3151	struct sk_buff *skb;
3152	void *va;
3153
3154	va = page_address(buf->page) + buf->offset;
3155	prefetch(va + hr);
3156
3157	skb = napi_build_skb(va, buf->truesize);
3158	if (unlikely(!skb))
3159		return NULL;
3160
3161	skb_mark_for_recycle(skb);
3162
3163	skb_reserve(skb, hr);
3164	__skb_put(skb, size);
 
 
 
 
 
3165
3166	return skb;
3167}
3168
3169/**
3170 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3171 * status and error fields
3172 * @stat_err_field: field from descriptor to test bits in
3173 * @stat_err_bits: value to mask
3174 *
3175 */
3176static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3177					const u8 stat_err_bits)
3178{
3179	return !!(stat_err_field & stat_err_bits);
3180}
3181
3182/**
3183 * idpf_rx_splitq_is_eop - process handling of EOP buffers
3184 * @rx_desc: Rx descriptor for current buffer
3185 *
3186 * If the buffer is an EOP buffer, this function exits returning true,
3187 * otherwise return false indicating that this is in fact a non-EOP buffer.
3188 */
3189static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3190{
3191	/* if we are the last buffer then there is nothing else to do */
3192	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3193						  IDPF_RXD_EOF_SPLITQ));
3194}
3195
3196/**
3197 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3198 * @rxq: Rx descriptor queue to retrieve receive buffer queue
3199 * @budget: Total limit on number of packets to process
3200 *
3201 * This function provides a "bounce buffer" approach to Rx interrupt
3202 * processing. The advantage to this is that on systems that have
3203 * expensive overhead for IOMMU access this provides a means of avoiding
3204 * it by maintaining the mapping of the page to the system.
3205 *
3206 * Returns amount of work completed
3207 */
3208static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
3209{
3210	int total_rx_bytes = 0, total_rx_pkts = 0;
3211	struct idpf_buf_queue *rx_bufq = NULL;
3212	struct sk_buff *skb = rxq->skb;
3213	u16 ntc = rxq->next_to_clean;
3214
3215	/* Process Rx packets bounded by budget */
3216	while (likely(total_rx_pkts < budget)) {
3217		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
3218		struct libeth_fqe *hdr, *rx_buf = NULL;
3219		struct idpf_sw_queue *refillq = NULL;
3220		struct idpf_rxq_set *rxq_set = NULL;
 
 
3221		unsigned int pkt_len = 0;
3222		unsigned int hdr_len = 0;
3223		u16 gen_id, buf_id = 0;
 
 
3224		int bufq_id;
3225		u8 rxdid;
3226
3227		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3228		rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
 
3229
3230		/* This memory barrier is needed to keep us from reading
3231		 * any other fields out of the rx_desc
3232		 */
3233		dma_rmb();
3234
3235		/* if the descriptor isn't done, no work yet to do */
3236		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3237				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3238
3239		if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
3240			break;
3241
3242		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3243				  rx_desc->rxdid_ucast);
3244		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3245			IDPF_RX_BUMP_NTC(rxq, ntc);
3246			u64_stats_update_begin(&rxq->stats_sync);
3247			u64_stats_inc(&rxq->q_stats.bad_descs);
3248			u64_stats_update_end(&rxq->stats_sync);
3249			continue;
3250		}
3251
3252		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3253					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3255		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3256					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3257
3258		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3259		refillq = rxq_set->refillq[bufq_id];
 
 
 
3260
3261		/* retrieve buffer from the rxq */
3262		rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
3263
3264		buf_id = le16_to_cpu(rx_desc->buf_id);
3265
3266		rx_buf = &rx_bufq->buf[buf_id];
3267
3268		if (!rx_bufq->hdr_pp)
3269			goto payload;
3270
3271#define __HBO_BIT	VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
3272#define __HDR_LEN_MASK	VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
3273		if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
3274			/* If a header buffer overflow, occurs, i.e. header is
3275			 * too large to fit in the header split buffer, HW will
3276			 * put the entire packet, including headers, in the
3277			 * data/payload buffer.
3278			 */
3279			hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3280						__HDR_LEN_MASK);
3281#undef __HDR_LEN_MASK
3282#undef __HBO_BIT
3283
3284		hdr = &rx_bufq->hdr_buf[buf_id];
3285
3286		if (unlikely(!hdr_len && !skb)) {
3287			hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
3288			pkt_len -= hdr_len;
3289
 
3290			u64_stats_update_begin(&rxq->stats_sync);
3291			u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
3292			u64_stats_update_end(&rxq->stats_sync);
3293		}
3294
3295		if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
3296			skb = idpf_rx_build_skb(hdr, hdr_len);
3297			if (!skb)
3298				break;
3299
3300			u64_stats_update_begin(&rxq->stats_sync);
3301			u64_stats_inc(&rxq->q_stats.hsplit_pkts);
3302			u64_stats_update_end(&rxq->stats_sync);
 
3303		}
3304
3305		hdr->page = NULL;
3306
3307payload:
3308		if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
3309			goto skip_data;
3310
3311		if (skb)
3312			idpf_rx_add_frag(rx_buf, skb, pkt_len);
3313		else
3314			skb = idpf_rx_build_skb(rx_buf, pkt_len);
3315
3316		/* exit if we failed to retrieve a buffer */
3317		if (!skb)
3318			break;
3319
3320skip_data:
3321		rx_buf->page = NULL;
3322
3323		idpf_rx_post_buf_refill(refillq, buf_id);
3324		IDPF_RX_BUMP_NTC(rxq, ntc);
3325
 
3326		/* skip if it is non EOP desc */
3327		if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
3328			continue;
3329
3330		/* pad skb if needed (to make valid ethernet frame) */
3331		if (eth_skb_pad(skb)) {
3332			skb = NULL;
3333			continue;
3334		}
3335
3336		/* probably a little skewed due to removing CRC */
3337		total_rx_bytes += skb->len;
3338
3339		/* protocol */
3340		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3341			dev_kfree_skb_any(skb);
3342			skb = NULL;
3343			continue;
3344		}
3345
3346		/* send completed skb up the stack */
3347		napi_gro_receive(rxq->napi, skb);
3348		skb = NULL;
3349
3350		/* update budget accounting */
3351		total_rx_pkts++;
3352	}
3353
3354	rxq->next_to_clean = ntc;
3355
3356	rxq->skb = skb;
3357	u64_stats_update_begin(&rxq->stats_sync);
3358	u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
3359	u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
3360	u64_stats_update_end(&rxq->stats_sync);
3361
3362	/* guarantee a trip back through this routine if there was a failure */
3363	return total_rx_pkts;
3364}
3365
3366/**
3367 * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3368 * @bufq: Pointer to the buffer queue
3369 * @buf_id: buffer ID
3370 * @buf_desc: Buffer queue descriptor
3371 *
3372 * Return 0 on success and negative on failure.
3373 */
3374static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
3375				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3376{
3377	struct libeth_fq_fp fq = {
3378		.pp		= bufq->pp,
3379		.fqes		= bufq->buf,
3380		.truesize	= bufq->truesize,
3381		.count		= bufq->desc_count,
3382	};
3383	dma_addr_t addr;
 
 
 
3384
3385	addr = libeth_rx_alloc(&fq, buf_id);
3386	if (addr == DMA_MAPPING_ERROR)
 
 
3387		return -ENOMEM;
3388
3389	buf_desc->pkt_addr = cpu_to_le64(addr);
3390	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3391
3392	if (!idpf_queue_has(HSPLIT_EN, bufq))
3393		return 0;
3394
3395	fq.pp = bufq->hdr_pp;
3396	fq.fqes = bufq->hdr_buf;
3397	fq.truesize = bufq->hdr_truesize;
3398
3399	addr = libeth_rx_alloc(&fq, buf_id);
3400	if (addr == DMA_MAPPING_ERROR)
3401		return -ENOMEM;
3402
3403	buf_desc->hdr_addr = cpu_to_le64(addr);
3404
3405	return 0;
3406}
3407
3408/**
3409 * idpf_rx_clean_refillq - Clean refill queue buffers
3410 * @bufq: buffer queue to post buffers back to
3411 * @refillq: refill queue to clean
3412 *
3413 * This function takes care of the buffer refill management
3414 */
3415static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
3416				  struct idpf_sw_queue *refillq)
3417{
3418	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3419	u16 bufq_nta = bufq->next_to_alloc;
3420	u16 ntc = refillq->next_to_clean;
3421	int cleaned = 0;
 
3422
3423	buf_desc = &bufq->split_buf[bufq_nta];
3424
3425	/* make sure we stop at ring wrap in the unlikely case ring is full */
3426	while (likely(cleaned < refillq->desc_count)) {
3427		u32 buf_id, refill_desc = refillq->ring[ntc];
3428		bool failure;
3429
3430		if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
3431		    !!(refill_desc & IDPF_RX_BI_GEN_M))
3432			break;
3433
3434		buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3435		failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
3436		if (failure)
3437			break;
3438
3439		if (unlikely(++ntc == refillq->desc_count)) {
3440			idpf_queue_change(RFL_GEN_CHK, refillq);
3441			ntc = 0;
3442		}
3443
3444		if (unlikely(++bufq_nta == bufq->desc_count)) {
3445			buf_desc = &bufq->split_buf[0];
3446			bufq_nta = 0;
3447		} else {
3448			buf_desc++;
3449		}
3450
3451		cleaned++;
3452	}
3453
3454	if (!cleaned)
3455		return;
3456
3457	/* We want to limit how many transactions on the bus we trigger with
3458	 * tail writes so we only do it in strides. It's also important we
3459	 * align the write to a multiple of 8 as required by HW.
3460	 */
3461	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3462	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3463		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3464						       IDPF_RX_BUF_POST_STRIDE));
3465
3466	/* update next to alloc since we have filled the ring */
3467	refillq->next_to_clean = ntc;
3468	bufq->next_to_alloc = bufq_nta;
3469}
3470
3471/**
3472 * idpf_rx_clean_refillq_all - Clean all refill queues
3473 * @bufq: buffer queue with refill queues
3474 * @nid: ID of the closest NUMA node with memory
3475 *
3476 * Iterates through all refill queues assigned to the buffer queue assigned to
3477 * this vector.  Returns true if clean is complete within budget, false
3478 * otherwise.
3479 */
3480static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
3481{
3482	struct idpf_bufq_set *bufq_set;
3483	int i;
3484
3485	page_pool_nid_changed(bufq->pp, nid);
3486	if (bufq->hdr_pp)
3487		page_pool_nid_changed(bufq->hdr_pp, nid);
3488
3489	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3490	for (i = 0; i < bufq_set->num_refillqs; i++)
3491		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3492}
3493
3494/**
3495 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3496 * @irq: interrupt number
3497 * @data: pointer to a q_vector
3498 *
3499 */
3500static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3501						void *data)
3502{
3503	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3504
3505	q_vector->total_events++;
3506	napi_schedule(&q_vector->napi);
3507
3508	return IRQ_HANDLED;
3509}
3510
3511/**
3512 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3513 * @vport: virtual port structure
3514 *
3515 */
3516static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3517{
3518	u16 v_idx;
3519
3520	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3521		netif_napi_del(&vport->q_vectors[v_idx].napi);
3522}
3523
3524/**
3525 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3526 * @vport: main vport structure
3527 */
3528static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3529{
3530	int v_idx;
3531
3532	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3533		napi_disable(&vport->q_vectors[v_idx].napi);
3534}
3535
3536/**
3537 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3538 * @vport: virtual port
3539 *
3540 * Free the memory allocated for interrupt vectors  associated to a vport
3541 */
3542void idpf_vport_intr_rel(struct idpf_vport *vport)
3543{
3544	for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
 
 
3545		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3546
3547		kfree(q_vector->complq);
3548		q_vector->complq = NULL;
3549		kfree(q_vector->bufq);
3550		q_vector->bufq = NULL;
3551		kfree(q_vector->tx);
3552		q_vector->tx = NULL;
3553		kfree(q_vector->rx);
3554		q_vector->rx = NULL;
 
 
 
 
 
3555
3556		free_cpumask_var(q_vector->affinity_mask);
 
 
 
 
 
3557	}
3558
 
 
 
 
 
 
 
 
3559	kfree(vport->q_vectors);
3560	vport->q_vectors = NULL;
3561}
3562
3563/**
3564 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3565 * @vport: main vport structure
3566 */
3567static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3568{
3569	struct idpf_adapter *adapter = vport->adapter;
3570	int vector;
3571
3572	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3573		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3574		int irq_num, vidx;
3575
3576		/* free only the irqs that were actually requested */
3577		if (!q_vector)
3578			continue;
3579
3580		vidx = vport->q_vector_idxs[vector];
3581		irq_num = adapter->msix_entries[vidx].vector;
3582
3583		/* clear the affinity_mask in the IRQ descriptor */
3584		irq_set_affinity_hint(irq_num, NULL);
3585		kfree(free_irq(irq_num, q_vector));
3586	}
3587}
3588
3589/**
3590 * idpf_vport_intr_dis_irq_all - Disable all interrupt
3591 * @vport: main vport structure
3592 */
3593static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3594{
3595	struct idpf_q_vector *q_vector = vport->q_vectors;
3596	int q_idx;
3597
3598	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3599		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3600}
3601
3602/**
3603 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3604 * @q_vector: pointer to q_vector
 
 
3605 */
3606static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector)
 
3607{
3608	u32 itr_val = q_vector->intr_reg.dyn_ctl_intena_m;
3609	int type = IDPF_NO_ITR_UPDATE_IDX;
3610	u16 itr = 0;
3611
3612	if (q_vector->wb_on_itr) {
3613		/*
3614		 * Trigger a software interrupt when exiting wb_on_itr, to make
3615		 * sure we catch any pending write backs that might have been
3616		 * missed due to interrupt state transition.
3617		 */
3618		itr_val |= q_vector->intr_reg.dyn_ctl_swint_trig_m |
3619			   q_vector->intr_reg.dyn_ctl_sw_itridx_ena_m;
3620		type = IDPF_SW_ITR_UPDATE_IDX;
3621		itr = IDPF_ITR_20K;
3622	}
3623
3624	itr &= IDPF_ITR_MASK;
3625	/* Don't clear PBA because that can cause lost interrupts that
3626	 * came in while we were cleaning/polling
3627	 */
3628	itr_val |= (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3629		   (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
 
3630
3631	return itr_val;
3632}
3633
3634/**
3635 * idpf_update_dim_sample - Update dim sample with packets and bytes
3636 * @q_vector: the vector associated with the interrupt
3637 * @dim_sample: dim sample to update
3638 * @dim: dim instance structure
3639 * @packets: total packets
3640 * @bytes: total bytes
3641 *
3642 * Update the dim sample with the packets and bytes which are passed to this
3643 * function. Set the dim state appropriately if the dim settings gets stale.
3644 */
3645static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3646				   struct dim_sample *dim_sample,
3647				   struct dim *dim, u64 packets, u64 bytes)
3648{
3649	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3650	dim_sample->comp_ctr = 0;
3651
3652	/* if dim settings get stale, like when not updated for 1 second or
3653	 * longer, force it to start again. This addresses the frequent case
3654	 * of an idle queue being switched to by the scheduler.
3655	 */
3656	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3657		dim->state = DIM_START_MEASURE;
3658}
3659
3660/**
3661 * idpf_net_dim - Update net DIM algorithm
3662 * @q_vector: the vector associated with the interrupt
3663 *
3664 * Create a DIM sample and notify net_dim() so that it can possibly decide
3665 * a new ITR value based on incoming packets, bytes, and interrupts.
3666 *
3667 * This function is a no-op if the queue is not configured to dynamic ITR.
3668 */
3669static void idpf_net_dim(struct idpf_q_vector *q_vector)
3670{
3671	struct dim_sample dim_sample = { };
3672	u64 packets, bytes;
3673	u32 i;
3674
3675	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3676		goto check_rx_itr;
3677
3678	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3679		struct idpf_tx_queue *txq = q_vector->tx[i];
3680		unsigned int start;
3681
3682		do {
3683			start = u64_stats_fetch_begin(&txq->stats_sync);
3684			packets += u64_stats_read(&txq->q_stats.packets);
3685			bytes += u64_stats_read(&txq->q_stats.bytes);
3686		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3687	}
3688
3689	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3690			       packets, bytes);
3691	net_dim(&q_vector->tx_dim, &dim_sample);
3692
3693check_rx_itr:
3694	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3695		return;
3696
3697	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3698		struct idpf_rx_queue *rxq = q_vector->rx[i];
3699		unsigned int start;
3700
3701		do {
3702			start = u64_stats_fetch_begin(&rxq->stats_sync);
3703			packets += u64_stats_read(&rxq->q_stats.packets);
3704			bytes += u64_stats_read(&rxq->q_stats.bytes);
3705		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3706	}
3707
3708	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3709			       packets, bytes);
3710	net_dim(&q_vector->rx_dim, &dim_sample);
3711}
3712
3713/**
3714 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3715 * @q_vector: q_vector for which itr is being updated and interrupt enabled
3716 *
3717 * Update the net_dim() algorithm and re-enable the interrupt associated with
3718 * this vector.
3719 */
3720void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3721{
3722	u32 intval;
3723
3724	/* net_dim() updates ITR out-of-band using a work item */
3725	idpf_net_dim(q_vector);
3726
3727	intval = idpf_vport_intr_buildreg_itr(q_vector);
3728	q_vector->wb_on_itr = false;
3729
3730	writel(intval, q_vector->intr_reg.dyn_ctl);
3731}
3732
3733/**
3734 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3735 * @vport: main vport structure
 
3736 */
3737static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
3738{
3739	struct idpf_adapter *adapter = vport->adapter;
3740	const char *drv_name, *if_name, *vec_name;
3741	int vector, err, irq_num, vidx;
3742
3743	drv_name = dev_driver_string(&adapter->pdev->dev);
3744	if_name = netdev_name(vport->netdev);
3745
3746	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3747		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3748		char *name;
3749
3750		vidx = vport->q_vector_idxs[vector];
3751		irq_num = adapter->msix_entries[vidx].vector;
3752
3753		if (q_vector->num_rxq && q_vector->num_txq)
3754			vec_name = "TxRx";
3755		else if (q_vector->num_rxq)
3756			vec_name = "Rx";
3757		else if (q_vector->num_txq)
3758			vec_name = "Tx";
3759		else
3760			continue;
3761
3762		name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
3763				 vec_name, vidx);
3764
3765		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3766				  name, q_vector);
3767		if (err) {
3768			netdev_err(vport->netdev,
3769				   "Request_irq failed, error: %d\n", err);
3770			goto free_q_irqs;
3771		}
3772		/* assign the mask for this irq */
3773		irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
3774	}
3775
3776	return 0;
3777
3778free_q_irqs:
3779	while (--vector >= 0) {
3780		vidx = vport->q_vector_idxs[vector];
3781		irq_num = adapter->msix_entries[vidx].vector;
3782		kfree(free_irq(irq_num, &vport->q_vectors[vector]));
3783	}
3784
3785	return err;
3786}
3787
3788/**
3789 * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3790 * @q_vector: q_vector structure
3791 * @itr: Interrupt throttling rate
3792 * @tx: Tx or Rx ITR
3793 */
3794void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3795{
3796	struct idpf_intr_reg *intr_reg;
3797
3798	if (tx && !q_vector->tx)
3799		return;
3800	else if (!tx && !q_vector->rx)
3801		return;
3802
3803	intr_reg = &q_vector->intr_reg;
3804	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3805	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3806}
3807
3808/**
3809 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3810 * @vport: main vport structure
3811 */
3812static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3813{
3814	bool dynamic;
3815	int q_idx;
3816	u16 itr;
3817
3818	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3819		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3820
3821		/* Set the initial ITR values */
3822		if (qv->num_txq) {
3823			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3824			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3825			idpf_vport_intr_write_itr(qv, dynamic ?
3826						  itr : qv->tx_itr_value,
3827						  true);
3828		}
3829
3830		if (qv->num_rxq) {
3831			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3832			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3833			idpf_vport_intr_write_itr(qv, dynamic ?
3834						  itr : qv->rx_itr_value,
3835						  false);
3836		}
3837
3838		if (qv->num_txq || qv->num_rxq)
3839			idpf_vport_intr_update_itr_ena_irq(qv);
3840	}
3841}
3842
3843/**
3844 * idpf_vport_intr_deinit - Release all vector associations for the vport
3845 * @vport: main vport structure
3846 */
3847void idpf_vport_intr_deinit(struct idpf_vport *vport)
3848{
3849	idpf_vport_intr_dis_irq_all(vport);
3850	idpf_vport_intr_napi_dis_all(vport);
3851	idpf_vport_intr_napi_del_all(vport);
 
3852	idpf_vport_intr_rel_irq(vport);
3853}
3854
3855/**
3856 * idpf_tx_dim_work - Call back from the stack
3857 * @work: work queue structure
3858 */
3859static void idpf_tx_dim_work(struct work_struct *work)
3860{
3861	struct idpf_q_vector *q_vector;
3862	struct idpf_vport *vport;
3863	struct dim *dim;
3864	u16 itr;
3865
3866	dim = container_of(work, struct dim, work);
3867	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3868	vport = q_vector->vport;
3869
3870	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3871		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3872
3873	/* look up the values in our local table */
3874	itr = vport->tx_itr_profile[dim->profile_ix];
3875
3876	idpf_vport_intr_write_itr(q_vector, itr, true);
3877
3878	dim->state = DIM_START_MEASURE;
3879}
3880
3881/**
3882 * idpf_rx_dim_work - Call back from the stack
3883 * @work: work queue structure
3884 */
3885static void idpf_rx_dim_work(struct work_struct *work)
3886{
3887	struct idpf_q_vector *q_vector;
3888	struct idpf_vport *vport;
3889	struct dim *dim;
3890	u16 itr;
3891
3892	dim = container_of(work, struct dim, work);
3893	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3894	vport = q_vector->vport;
3895
3896	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3897		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3898
3899	/* look up the values in our local table */
3900	itr = vport->rx_itr_profile[dim->profile_ix];
3901
3902	idpf_vport_intr_write_itr(q_vector, itr, false);
3903
3904	dim->state = DIM_START_MEASURE;
3905}
3906
3907/**
3908 * idpf_init_dim - Set up dynamic interrupt moderation
3909 * @qv: q_vector structure
3910 */
3911static void idpf_init_dim(struct idpf_q_vector *qv)
3912{
3913	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3914	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3915	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3916
3917	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3918	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3919	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3920}
3921
3922/**
3923 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3924 * @vport: main vport structure
3925 */
3926static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3927{
3928	int q_idx;
3929
3930	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3931		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3932
3933		idpf_init_dim(q_vector);
3934		napi_enable(&q_vector->napi);
3935	}
3936}
3937
3938/**
3939 * idpf_tx_splitq_clean_all- Clean completion queues
3940 * @q_vec: queue vector
3941 * @budget: Used to determine if we are in netpoll
3942 * @cleaned: returns number of packets cleaned
3943 *
3944 * Returns false if clean is not complete else returns true
3945 */
3946static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3947				     int budget, int *cleaned)
3948{
3949	u16 num_complq = q_vec->num_complq;
3950	bool clean_complete = true;
3951	int i, budget_per_q;
3952
3953	if (unlikely(!num_complq))
3954		return true;
3955
3956	budget_per_q = DIV_ROUND_UP(budget, num_complq);
3957
3958	for (i = 0; i < num_complq; i++)
3959		clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
3960						       budget_per_q, cleaned);
3961
3962	return clean_complete;
3963}
3964
3965/**
3966 * idpf_rx_splitq_clean_all- Clean completion queues
3967 * @q_vec: queue vector
3968 * @budget: Used to determine if we are in netpoll
3969 * @cleaned: returns number of packets cleaned
3970 *
3971 * Returns false if clean is not complete else returns true
3972 */
3973static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3974				     int *cleaned)
3975{
3976	u16 num_rxq = q_vec->num_rxq;
3977	bool clean_complete = true;
3978	int pkts_cleaned = 0;
3979	int i, budget_per_q;
3980	int nid;
3981
3982	/* We attempt to distribute budget to each Rx queue fairly, but don't
3983	 * allow the budget to go below 1 because that would exit polling early.
3984	 */
3985	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
3986	for (i = 0; i < num_rxq; i++) {
3987		struct idpf_rx_queue *rxq = q_vec->rx[i];
3988		int pkts_cleaned_per_q;
3989
3990		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
3991		/* if we clean as many as budgeted, we must not be done */
3992		if (pkts_cleaned_per_q >= budget_per_q)
3993			clean_complete = false;
3994		pkts_cleaned += pkts_cleaned_per_q;
3995	}
3996	*cleaned = pkts_cleaned;
3997
3998	nid = numa_mem_id();
3999
4000	for (i = 0; i < q_vec->num_bufq; i++)
4001		idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
4002
4003	return clean_complete;
4004}
4005
4006/**
4007 * idpf_vport_splitq_napi_poll - NAPI handler
4008 * @napi: struct from which you get q_vector
4009 * @budget: budget provided by stack
4010 */
4011static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
4012{
4013	struct idpf_q_vector *q_vector =
4014				container_of(napi, struct idpf_q_vector, napi);
4015	bool clean_complete;
4016	int work_done = 0;
4017
4018	/* Handle case where we are called by netpoll with a budget of 0 */
4019	if (unlikely(!budget)) {
4020		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4021
4022		return 0;
4023	}
4024
4025	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
4026	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
4027
4028	/* If work not completed, return budget and polling will return */
4029	if (!clean_complete) {
4030		idpf_vport_intr_set_wb_on_itr(q_vector);
4031		return budget;
4032	}
4033
4034	work_done = min_t(int, work_done, budget - 1);
4035
4036	/* Exit the polling mode, but don't re-enable interrupts if stack might
4037	 * poll us due to busy-polling
4038	 */
4039	if (likely(napi_complete_done(napi, work_done)))
4040		idpf_vport_intr_update_itr_ena_irq(q_vector);
4041	else
4042		idpf_vport_intr_set_wb_on_itr(q_vector);
4043
4044	/* Switch to poll mode in the tear-down path after sending disable
4045	 * queues virtchnl message, as the interrupts will be disabled after
4046	 * that
4047	 */
4048	if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
4049							 q_vector->tx[0])))
4050		return budget;
4051	else
4052		return work_done;
4053}
4054
4055/**
4056 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
4057 * @vport: virtual port
4058 *
4059 * Mapping for vectors to queues
4060 */
4061static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
4062{
4063	bool split = idpf_is_queue_model_split(vport->rxq_model);
4064	u16 num_txq_grp = vport->num_txq_grp;
 
4065	struct idpf_rxq_group *rx_qgrp;
4066	struct idpf_txq_group *tx_qgrp;
4067	u32 i, qv_idx, q_index;
 
4068
4069	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
4070		u16 num_rxq;
4071
4072		if (qv_idx >= vport->num_q_vectors)
4073			qv_idx = 0;
4074
4075		rx_qgrp = &vport->rxq_grps[i];
4076		if (split)
4077			num_rxq = rx_qgrp->splitq.num_rxq_sets;
4078		else
4079			num_rxq = rx_qgrp->singleq.num_rxq;
4080
4081		for (u32 j = 0; j < num_rxq; j++) {
4082			struct idpf_rx_queue *q;
 
4083
4084			if (split)
4085				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
4086			else
4087				q = rx_qgrp->singleq.rxqs[j];
4088			q->q_vector = &vport->q_vectors[qv_idx];
4089			q_index = q->q_vector->num_rxq;
4090			q->q_vector->rx[q_index] = q;
4091			q->q_vector->num_rxq++;
4092
4093			if (split)
4094				q->napi = &q->q_vector->napi;
4095		}
4096
4097		if (split) {
4098			for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
4099				struct idpf_buf_queue *bufq;
4100
4101				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
4102				bufq->q_vector = &vport->q_vectors[qv_idx];
4103				q_index = bufq->q_vector->num_bufq;
4104				bufq->q_vector->bufq[q_index] = bufq;
4105				bufq->q_vector->num_bufq++;
4106			}
 
 
4107		}
4108
4109		qv_idx++;
4110	}
4111
4112	split = idpf_is_queue_model_split(vport->txq_model);
4113
4114	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4115		u16 num_txq;
4116
4117		if (qv_idx >= vport->num_q_vectors)
4118			qv_idx = 0;
4119
4120		tx_qgrp = &vport->txq_grps[i];
4121		num_txq = tx_qgrp->num_txq;
4122
4123		for (u32 j = 0; j < num_txq; j++) {
4124			struct idpf_tx_queue *q;
 
4125
4126			q = tx_qgrp->txqs[j];
4127			q->q_vector = &vport->q_vectors[qv_idx];
4128			q->q_vector->tx[q->q_vector->num_txq++] = q;
4129		}
4130
4131		if (split) {
4132			struct idpf_compl_queue *q = tx_qgrp->complq;
 
 
 
 
 
 
 
 
 
4133
4134			q->q_vector = &vport->q_vectors[qv_idx];
4135			q->q_vector->complq[q->q_vector->num_complq++] = q;
4136		}
4137
4138		qv_idx++;
4139	}
4140}
4141
4142/**
4143 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4144 * @vport: virtual port
4145 *
4146 * Initialize vector indexes with values returened over mailbox
4147 */
4148static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4149{
4150	struct idpf_adapter *adapter = vport->adapter;
4151	struct virtchnl2_alloc_vectors *ac;
4152	u16 *vecids, total_vecs;
4153	int i;
4154
4155	ac = adapter->req_vec_chunks;
4156	if (!ac) {
4157		for (i = 0; i < vport->num_q_vectors; i++)
4158			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4159
4160		return 0;
4161	}
4162
4163	total_vecs = idpf_get_reserved_vecs(adapter);
4164	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4165	if (!vecids)
4166		return -ENOMEM;
4167
4168	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4169
4170	for (i = 0; i < vport->num_q_vectors; i++)
4171		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4172
4173	kfree(vecids);
4174
4175	return 0;
4176}
4177
4178/**
4179 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4180 * @vport: virtual port structure
4181 */
4182static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4183{
4184	int (*napi_poll)(struct napi_struct *napi, int budget);
4185	u16 v_idx;
4186
4187	if (idpf_is_queue_model_split(vport->txq_model))
4188		napi_poll = idpf_vport_splitq_napi_poll;
4189	else
4190		napi_poll = idpf_vport_singleq_napi_poll;
4191
4192	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4193		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4194
4195		netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
4196
4197		/* only set affinity_mask if the CPU is online */
4198		if (cpu_online(v_idx))
4199			cpumask_set_cpu(v_idx, q_vector->affinity_mask);
4200	}
4201}
4202
4203/**
4204 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4205 * @vport: virtual port
4206 *
4207 * We allocate one q_vector per queue interrupt. If allocation fails we
4208 * return -ENOMEM.
4209 */
4210int idpf_vport_intr_alloc(struct idpf_vport *vport)
4211{
4212	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4213	struct idpf_q_vector *q_vector;
4214	u32 complqs_per_vector, v_idx;
4215
4216	vport->q_vectors = kcalloc(vport->num_q_vectors,
4217				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4218	if (!vport->q_vectors)
4219		return -ENOMEM;
4220
4221	txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4222				       vport->num_q_vectors);
4223	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
4224				       vport->num_q_vectors);
4225	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4226			   DIV_ROUND_UP(vport->num_rxq_grp,
4227					vport->num_q_vectors);
4228	complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
4229					  vport->num_q_vectors);
4230
4231	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4232		q_vector = &vport->q_vectors[v_idx];
4233		q_vector->vport = vport;
4234
4235		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
4236		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
4237		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4238
4239		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
4240		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
4241		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4242
4243		if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
4244			goto error;
4245
4246		q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
4247				       GFP_KERNEL);
4248		if (!q_vector->tx)
 
4249			goto error;
 
4250
4251		q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
 
4252				       GFP_KERNEL);
4253		if (!q_vector->rx)
 
4254			goto error;
 
4255
4256		if (!idpf_is_queue_model_split(vport->rxq_model))
4257			continue;
4258
4259		q_vector->bufq = kcalloc(bufqs_per_vector,
4260					 sizeof(*q_vector->bufq),
4261					 GFP_KERNEL);
4262		if (!q_vector->bufq)
4263			goto error;
4264
4265		q_vector->complq = kcalloc(complqs_per_vector,
4266					   sizeof(*q_vector->complq),
4267					   GFP_KERNEL);
4268		if (!q_vector->complq)
4269			goto error;
 
4270	}
4271
4272	return 0;
4273
4274error:
4275	idpf_vport_intr_rel(vport);
4276
4277	return -ENOMEM;
4278}
4279
4280/**
4281 * idpf_vport_intr_init - Setup all vectors for the given vport
4282 * @vport: virtual port
4283 *
4284 * Returns 0 on success or negative on failure
4285 */
4286int idpf_vport_intr_init(struct idpf_vport *vport)
4287{
 
4288	int err;
4289
4290	err = idpf_vport_intr_init_vec_idx(vport);
4291	if (err)
4292		return err;
4293
4294	idpf_vport_intr_map_vector_to_qs(vport);
4295	idpf_vport_intr_napi_add_all(vport);
 
4296
4297	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4298	if (err)
4299		goto unroll_vectors_alloc;
4300
4301	err = idpf_vport_intr_req_irq(vport);
 
 
 
 
4302	if (err)
4303		goto unroll_vectors_alloc;
4304
 
 
4305	return 0;
4306
4307unroll_vectors_alloc:
 
4308	idpf_vport_intr_napi_del_all(vport);
4309
4310	return err;
4311}
4312
4313void idpf_vport_intr_ena(struct idpf_vport *vport)
4314{
4315	idpf_vport_intr_napi_ena_all(vport);
4316	idpf_vport_intr_ena_irq_all(vport);
4317}
4318
4319/**
4320 * idpf_config_rss - Send virtchnl messages to configure RSS
4321 * @vport: virtual port
4322 *
4323 * Return 0 on success, negative on failure
4324 */
4325int idpf_config_rss(struct idpf_vport *vport)
4326{
4327	int err;
4328
4329	err = idpf_send_get_set_rss_key_msg(vport, false);
4330	if (err)
4331		return err;
4332
4333	return idpf_send_get_set_rss_lut_msg(vport, false);
4334}
4335
4336/**
4337 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4338 * @vport: virtual port structure
4339 */
4340static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4341{
4342	struct idpf_adapter *adapter = vport->adapter;
4343	u16 num_active_rxq = vport->num_rxq;
4344	struct idpf_rss_data *rss_data;
4345	int i;
4346
4347	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4348
4349	for (i = 0; i < rss_data->rss_lut_size; i++) {
4350		rss_data->rss_lut[i] = i % num_active_rxq;
4351		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4352	}
4353}
4354
4355/**
4356 * idpf_init_rss - Allocate and initialize RSS resources
4357 * @vport: virtual port
4358 *
4359 * Return 0 on success, negative on failure
4360 */
4361int idpf_init_rss(struct idpf_vport *vport)
4362{
4363	struct idpf_adapter *adapter = vport->adapter;
4364	struct idpf_rss_data *rss_data;
4365	u32 lut_size;
4366
4367	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4368
4369	lut_size = rss_data->rss_lut_size * sizeof(u32);
4370	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4371	if (!rss_data->rss_lut)
4372		return -ENOMEM;
4373
4374	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4375	if (!rss_data->cached_lut) {
4376		kfree(rss_data->rss_lut);
4377		rss_data->rss_lut = NULL;
4378
4379		return -ENOMEM;
4380	}
4381
4382	/* Fill the default RSS lut values */
4383	idpf_fill_dflt_rss_lut(vport);
4384
4385	return idpf_config_rss(vport);
4386}
4387
4388/**
4389 * idpf_deinit_rss - Release RSS resources
4390 * @vport: virtual port
4391 */
4392void idpf_deinit_rss(struct idpf_vport *vport)
4393{
4394	struct idpf_adapter *adapter = vport->adapter;
4395	struct idpf_rss_data *rss_data;
4396
4397	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4398	kfree(rss_data->cached_lut);
4399	rss_data->cached_lut = NULL;
4400	kfree(rss_data->rss_lut);
4401	rss_data->rss_lut = NULL;
4402}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2023 Intel Corporation */
   3
 
 
 
   4#include "idpf.h"
 
 
 
 
 
 
 
 
 
 
 
 
   5
   6/**
   7 * idpf_buf_lifo_push - push a buffer pointer onto stack
   8 * @stack: pointer to stack struct
   9 * @buf: pointer to buf to push
  10 *
  11 * Returns 0 on success, negative on failure
  12 **/
  13static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
  14			      struct idpf_tx_stash *buf)
  15{
  16	if (unlikely(stack->top == stack->size))
  17		return -ENOSPC;
  18
  19	stack->bufs[stack->top++] = buf;
  20
  21	return 0;
  22}
  23
  24/**
  25 * idpf_buf_lifo_pop - pop a buffer pointer from stack
  26 * @stack: pointer to stack struct
  27 **/
  28static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
  29{
  30	if (unlikely(!stack->top))
  31		return NULL;
  32
  33	return stack->bufs[--stack->top];
  34}
  35
  36/**
  37 * idpf_tx_timeout - Respond to a Tx Hang
  38 * @netdev: network interface device structure
  39 * @txqueue: TX queue
  40 */
  41void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  42{
  43	struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
  44
  45	adapter->tx_timeout_count++;
  46
  47	netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
  48		   adapter->tx_timeout_count, txqueue);
  49	if (!idpf_is_reset_in_prog(adapter)) {
  50		set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
  51		queue_delayed_work(adapter->vc_event_wq,
  52				   &adapter->vc_event_task,
  53				   msecs_to_jiffies(10));
  54	}
  55}
  56
  57/**
  58 * idpf_tx_buf_rel - Release a Tx buffer
  59 * @tx_q: the queue that owns the buffer
  60 * @tx_buf: the buffer to free
  61 */
  62static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
  63{
  64	if (tx_buf->skb) {
  65		if (dma_unmap_len(tx_buf, len))
  66			dma_unmap_single(tx_q->dev,
  67					 dma_unmap_addr(tx_buf, dma),
  68					 dma_unmap_len(tx_buf, len),
  69					 DMA_TO_DEVICE);
  70		dev_kfree_skb_any(tx_buf->skb);
  71	} else if (dma_unmap_len(tx_buf, len)) {
  72		dma_unmap_page(tx_q->dev,
  73			       dma_unmap_addr(tx_buf, dma),
  74			       dma_unmap_len(tx_buf, len),
  75			       DMA_TO_DEVICE);
  76	}
  77
  78	tx_buf->next_to_watch = NULL;
  79	tx_buf->skb = NULL;
  80	tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
  81	dma_unmap_len_set(tx_buf, len, 0);
  82}
  83
  84/**
  85 * idpf_tx_buf_rel_all - Free any empty Tx buffers
  86 * @txq: queue to be cleaned
  87 */
  88static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
  89{
  90	u16 i;
 
 
 
 
 
 
 
 
  91
  92	/* Buffers already cleared, nothing to do */
  93	if (!txq->tx_buf)
  94		return;
  95
  96	/* Free all the Tx buffer sk_buffs */
  97	for (i = 0; i < txq->desc_count; i++)
  98		idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
  99
 100	kfree(txq->tx_buf);
 101	txq->tx_buf = NULL;
 102
 103	if (!txq->buf_stack.bufs)
 
 
 
 
 104		return;
 105
 106	for (i = 0; i < txq->buf_stack.size; i++)
 107		kfree(txq->buf_stack.bufs[i]);
 
 
 
 
 
 
 108
 109	kfree(txq->buf_stack.bufs);
 110	txq->buf_stack.bufs = NULL;
 
 
 
 
 
 
 
 
 111}
 112
 113/**
 114 * idpf_tx_desc_rel - Free Tx resources per queue
 115 * @txq: Tx descriptor ring for a specific queue
 116 * @bufq: buffer q or completion q
 117 *
 118 * Free all transmit software resources
 119 */
 120static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
 121{
 122	if (bufq)
 123		idpf_tx_buf_rel_all(txq);
 124
 125	if (!txq->desc_ring)
 126		return;
 127
 128	dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
 129	txq->desc_ring = NULL;
 130	txq->next_to_alloc = 0;
 131	txq->next_to_use = 0;
 132	txq->next_to_clean = 0;
 133}
 134
 135/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 136 * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
 137 * @vport: virtual port structure
 138 *
 139 * Free all transmit software resources
 140 */
 141static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
 142{
 143	int i, j;
 144
 145	if (!vport->txq_grps)
 146		return;
 147
 148	for (i = 0; i < vport->num_txq_grp; i++) {
 149		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 150
 151		for (j = 0; j < txq_grp->num_txq; j++)
 152			idpf_tx_desc_rel(txq_grp->txqs[j], true);
 153
 154		if (idpf_is_queue_model_split(vport->txq_model))
 155			idpf_tx_desc_rel(txq_grp->complq, false);
 156	}
 157}
 158
 159/**
 160 * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
 161 * @tx_q: queue for which the buffers are allocated
 162 *
 163 * Returns 0 on success, negative on failure
 164 */
 165static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
 166{
 
 167	int buf_size;
 168	int i;
 169
 170	/* Allocate book keeping buffers only. Buffers to be supplied to HW
 171	 * are allocated by kernel network stack and received as part of skb
 172	 */
 173	buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
 174	tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
 175	if (!tx_q->tx_buf)
 176		return -ENOMEM;
 177
 178	/* Initialize tx_bufs with invalid completion tags */
 179	for (i = 0; i < tx_q->desc_count; i++)
 180		tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
 
 181
 182	/* Initialize tx buf stack for out-of-order completions if
 183	 * flow scheduling offload is enabled
 184	 */
 185	tx_q->buf_stack.bufs =
 186		kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
 187			GFP_KERNEL);
 188	if (!tx_q->buf_stack.bufs)
 189		return -ENOMEM;
 190
 191	tx_q->buf_stack.size = tx_q->desc_count;
 192	tx_q->buf_stack.top = tx_q->desc_count;
 193
 194	for (i = 0; i < tx_q->desc_count; i++) {
 195		tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
 196						  GFP_KERNEL);
 197		if (!tx_q->buf_stack.bufs[i])
 198			return -ENOMEM;
 199	}
 200
 201	return 0;
 202}
 203
 204/**
 205 * idpf_tx_desc_alloc - Allocate the Tx descriptors
 
 206 * @tx_q: the tx ring to set up
 207 * @bufq: buffer or completion queue
 208 *
 209 * Returns 0 on success, negative on failure
 210 */
 211static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
 
 212{
 213	struct device *dev = tx_q->dev;
 214	u32 desc_sz;
 215	int err;
 216
 217	if (bufq) {
 218		err = idpf_tx_buf_alloc_all(tx_q);
 219		if (err)
 220			goto err_alloc;
 221
 222		desc_sz = sizeof(struct idpf_base_tx_desc);
 223	} else {
 224		desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
 225	}
 226
 227	tx_q->size = tx_q->desc_count * desc_sz;
 228
 229	/* Allocate descriptors also round up to nearest 4K */
 230	tx_q->size = ALIGN(tx_q->size, 4096);
 231	tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
 232					      GFP_KERNEL);
 233	if (!tx_q->desc_ring) {
 234		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 235			tx_q->size);
 236		err = -ENOMEM;
 237		goto err_alloc;
 238	}
 239
 240	tx_q->next_to_alloc = 0;
 241	tx_q->next_to_use = 0;
 242	tx_q->next_to_clean = 0;
 243	set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
 244
 245	return 0;
 246
 247err_alloc:
 248	idpf_tx_desc_rel(tx_q, bufq);
 249
 250	return err;
 251}
 252
 253/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 254 * idpf_tx_desc_alloc_all - allocate all queues Tx resources
 255 * @vport: virtual port private structure
 256 *
 257 * Returns 0 on success, negative on failure
 258 */
 259static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
 260{
 261	struct device *dev = &vport->adapter->pdev->dev;
 262	int err = 0;
 263	int i, j;
 264
 265	/* Setup buffer queues. In single queue model buffer queues and
 266	 * completion queues will be same
 267	 */
 268	for (i = 0; i < vport->num_txq_grp; i++) {
 269		for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
 270			struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
 271			u8 gen_bits = 0;
 272			u16 bufidx_mask;
 273
 274			err = idpf_tx_desc_alloc(txq, true);
 275			if (err) {
 276				dev_err(dev, "Allocation for Tx Queue %u failed\n",
 
 277					i);
 278				goto err_out;
 279			}
 280
 281			if (!idpf_is_queue_model_split(vport->txq_model))
 282				continue;
 283
 284			txq->compl_tag_cur_gen = 0;
 285
 286			/* Determine the number of bits in the bufid
 287			 * mask and add one to get the start of the
 288			 * generation bits
 289			 */
 290			bufidx_mask = txq->desc_count - 1;
 291			while (bufidx_mask >> 1) {
 292				txq->compl_tag_gen_s++;
 293				bufidx_mask = bufidx_mask >> 1;
 294			}
 295			txq->compl_tag_gen_s++;
 296
 297			gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
 298							txq->compl_tag_gen_s;
 299			txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
 300
 301			/* Set bufid mask based on location of first
 302			 * gen bit; it cannot simply be the descriptor
 303			 * ring size-1 since we can have size values
 304			 * where not all of those bits are set.
 305			 */
 306			txq->compl_tag_bufid_m =
 307				GETMAXVAL(txq->compl_tag_gen_s);
 308		}
 309
 310		if (!idpf_is_queue_model_split(vport->txq_model))
 311			continue;
 312
 313		/* Setup completion queues */
 314		err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
 315		if (err) {
 316			dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
 
 317				i);
 318			goto err_out;
 319		}
 320	}
 321
 322err_out:
 323	if (err)
 324		idpf_tx_desc_rel_all(vport);
 325
 326	return err;
 327}
 328
 329/**
 330 * idpf_rx_page_rel - Release an rx buffer page
 331 * @rxq: the queue that owns the buffer
 332 * @rx_buf: the buffer to free
 333 */
 334static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
 335{
 336	if (unlikely(!rx_buf->page))
 337		return;
 338
 339	page_pool_put_full_page(rxq->pp, rx_buf->page, false);
 340
 341	rx_buf->page = NULL;
 342	rx_buf->page_offset = 0;
 343}
 344
 345/**
 346 * idpf_rx_hdr_buf_rel_all - Release header buffer memory
 347 * @rxq: queue to use
 348 */
 349static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
 350{
 351	struct idpf_adapter *adapter = rxq->vport->adapter;
 
 
 
 
 
 
 352
 353	dma_free_coherent(&adapter->pdev->dev,
 354			  rxq->desc_count * IDPF_HDR_BUF_SIZE,
 355			  rxq->rx_buf.hdr_buf_va,
 356			  rxq->rx_buf.hdr_buf_pa);
 357	rxq->rx_buf.hdr_buf_va = NULL;
 358}
 359
 360/**
 361 * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
 362 * @rxq: queue to be cleaned
 363 */
 364static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
 365{
 366	u16 i;
 
 
 
 367
 368	/* queue already cleared, nothing to do */
 369	if (!rxq->rx_buf.buf)
 370		return;
 371
 372	/* Free all the bufs allocated and given to hw on Rx queue */
 373	for (i = 0; i < rxq->desc_count; i++)
 374		idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
 
 
 
 375
 376	if (rxq->rx_hsplit_en)
 377		idpf_rx_hdr_buf_rel_all(rxq);
 
 
 378
 379	page_pool_destroy(rxq->pp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380	rxq->pp = NULL;
 381
 382	kfree(rxq->rx_buf.buf);
 383	rxq->rx_buf.buf = NULL;
 384}
 385
 386/**
 387 * idpf_rx_desc_rel - Free a specific Rx q resources
 388 * @rxq: queue to clean the resources from
 389 * @bufq: buffer q or completion q
 390 * @q_model: single or split q model
 391 *
 392 * Free a specific rx queue resources
 393 */
 394static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
 
 395{
 396	if (!rxq)
 397		return;
 398
 399	if (rxq->skb) {
 400		dev_kfree_skb_any(rxq->skb);
 401		rxq->skb = NULL;
 402	}
 403
 404	if (bufq || !idpf_is_queue_model_split(q_model))
 405		idpf_rx_buf_rel_all(rxq);
 406
 407	rxq->next_to_alloc = 0;
 408	rxq->next_to_clean = 0;
 409	rxq->next_to_use = 0;
 410	if (!rxq->desc_ring)
 411		return;
 412
 413	dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
 414	rxq->desc_ring = NULL;
 415}
 416
 417/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 418 * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
 419 * @vport: virtual port structure
 420 *
 421 * Free all rx queues resources
 422 */
 423static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
 424{
 
 425	struct idpf_rxq_group *rx_qgrp;
 426	u16 num_rxq;
 427	int i, j;
 428
 429	if (!vport->rxq_grps)
 430		return;
 431
 432	for (i = 0; i < vport->num_rxq_grp; i++) {
 433		rx_qgrp = &vport->rxq_grps[i];
 434
 435		if (!idpf_is_queue_model_split(vport->rxq_model)) {
 436			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
 437				idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
 438						 false, vport->rxq_model);
 439			continue;
 440		}
 441
 442		num_rxq = rx_qgrp->splitq.num_rxq_sets;
 443		for (j = 0; j < num_rxq; j++)
 444			idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
 445					 false, vport->rxq_model);
 446
 447		if (!rx_qgrp->splitq.bufq_sets)
 448			continue;
 449
 450		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 451			struct idpf_bufq_set *bufq_set =
 452				&rx_qgrp->splitq.bufq_sets[j];
 453
 454			idpf_rx_desc_rel(&bufq_set->bufq, true,
 455					 vport->rxq_model);
 456		}
 457	}
 458}
 459
 460/**
 461 * idpf_rx_buf_hw_update - Store the new tail and head values
 462 * @rxq: queue to bump
 463 * @val: new head index
 464 */
 465void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
 466{
 467	rxq->next_to_use = val;
 468
 469	if (unlikely(!rxq->tail))
 470		return;
 471
 472	/* writel has an implicit memory barrier */
 473	writel(val, rxq->tail);
 474}
 475
 476/**
 477 * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
 478 * @rxq: ring to use
 479 *
 480 * Returns 0 on success, negative on failure.
 481 */
 482static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
 483{
 484	struct idpf_adapter *adapter = rxq->vport->adapter;
 
 
 
 
 
 485
 486	rxq->rx_buf.hdr_buf_va =
 487		dma_alloc_coherent(&adapter->pdev->dev,
 488				   IDPF_HDR_BUF_SIZE * rxq->desc_count,
 489				   &rxq->rx_buf.hdr_buf_pa,
 490				   GFP_KERNEL);
 491	if (!rxq->rx_buf.hdr_buf_va)
 492		return -ENOMEM;
 
 493
 494	return 0;
 495}
 496
 497/**
 498 * idpf_rx_post_buf_refill - Post buffer id to refill queue
 499 * @refillq: refill queue to post to
 500 * @buf_id: buffer id to post
 501 */
 502static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
 503{
 504	u16 nta = refillq->next_to_alloc;
 505
 506	/* store the buffer ID and the SW maintained GEN bit to the refillq */
 507	refillq->ring[nta] =
 508		FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
 509		FIELD_PREP(IDPF_RX_BI_GEN_M,
 510			   test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
 511
 512	if (unlikely(++nta == refillq->desc_count)) {
 513		nta = 0;
 514		change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
 515	}
 516	refillq->next_to_alloc = nta;
 
 517}
 518
 519/**
 520 * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
 521 * @bufq: buffer queue to post to
 522 * @buf_id: buffer id to post
 523 *
 524 * Returns false if buffer could not be allocated, true otherwise.
 525 */
 526static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
 527{
 528	struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
 
 
 
 529	u16 nta = bufq->next_to_alloc;
 530	struct idpf_rx_buf *buf;
 531	dma_addr_t addr;
 532
 533	splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
 534	buf = &bufq->rx_buf.buf[buf_id];
 
 
 
 
 
 
 
 
 535
 536	if (bufq->rx_hsplit_en) {
 537		splitq_rx_desc->hdr_addr =
 538			cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
 539				    (u32)buf_id * IDPF_HDR_BUF_SIZE);
 540	}
 541
 542	addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
 543	if (unlikely(addr == DMA_MAPPING_ERROR))
 
 
 
 
 544		return false;
 545
 546	splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
 547	splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
 548
 549	nta++;
 550	if (unlikely(nta == bufq->desc_count))
 551		nta = 0;
 552	bufq->next_to_alloc = nta;
 553
 554	return true;
 555}
 556
 557/**
 558 * idpf_rx_post_init_bufs - Post initial buffers to bufq
 559 * @bufq: buffer queue to post working set to
 560 * @working_set: number of buffers to put in working set
 561 *
 562 * Returns true if @working_set bufs were posted successfully, false otherwise.
 563 */
 564static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
 
 565{
 566	int i;
 567
 568	for (i = 0; i < working_set; i++) {
 569		if (!idpf_rx_post_buf_desc(bufq, i))
 570			return false;
 571	}
 572
 573	idpf_rx_buf_hw_update(bufq,
 574			      bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
 575
 576	return true;
 577}
 578
 579/**
 580 * idpf_rx_create_page_pool - Create a page pool
 581 * @rxbufq: RX queue to create page pool for
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582 *
 583 * Returns &page_pool on success, casted -errno on failure
 584 */
 585static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
 586{
 587	struct page_pool_params pp = {
 588		.flags		= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
 589		.order		= 0,
 590		.pool_size	= rxbufq->desc_count,
 591		.nid		= NUMA_NO_NODE,
 592		.dev		= rxbufq->vport->netdev->dev.parent,
 593		.max_len	= PAGE_SIZE,
 594		.dma_dir	= DMA_FROM_DEVICE,
 595		.offset		= 0,
 596	};
 
 597
 598	return page_pool_create(&pp);
 
 
 
 
 
 
 
 
 
 599}
 600
 601/**
 602 * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
 603 * @rxbufq: queue for which the buffers are allocated; equivalent to
 604 * rxq when operating in singleq mode
 605 *
 606 * Returns 0 on success, negative on failure
 607 */
 608static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
 609{
 610	int err = 0;
 611
 612	/* Allocate book keeping buffers */
 613	rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
 614				     sizeof(struct idpf_rx_buf), GFP_KERNEL);
 615	if (!rxbufq->rx_buf.buf) {
 616		err = -ENOMEM;
 617		goto rx_buf_alloc_all_out;
 618	}
 619
 620	if (rxbufq->rx_hsplit_en) {
 621		err = idpf_rx_hdr_buf_alloc_all(rxbufq);
 622		if (err)
 623			goto rx_buf_alloc_all_out;
 624	}
 625
 626	/* Allocate buffers to be given to HW.	 */
 627	if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
 628		int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
 629
 630		if (!idpf_rx_post_init_bufs(rxbufq, working_set))
 631			err = -ENOMEM;
 632	} else {
 633		if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
 634						     rxbufq->desc_count - 1))
 635			err = -ENOMEM;
 636	}
 637
 638rx_buf_alloc_all_out:
 639	if (err)
 640		idpf_rx_buf_rel_all(rxbufq);
 641
 642	return err;
 643}
 644
 645/**
 646 * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
 647 * @rxbufq: RX queue to create page pool for
 
 648 *
 649 * Returns 0 on success, negative on failure
 650 */
 651static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
 
 652{
 653	struct page_pool *pool;
 654
 655	pool = idpf_rx_create_page_pool(rxbufq);
 656	if (IS_ERR(pool))
 657		return PTR_ERR(pool);
 
 
 
 658
 659	rxbufq->pp = pool;
 
 
 
 
 
 
 
 660
 661	return idpf_rx_buf_alloc_all(rxbufq);
 662}
 663
 664/**
 665 * idpf_rx_bufs_init_all - Initialize all RX bufs
 666 * @vport: virtual port struct
 667 *
 668 * Returns 0 on success, negative on failure
 669 */
 670int idpf_rx_bufs_init_all(struct idpf_vport *vport)
 671{
 672	struct idpf_rxq_group *rx_qgrp;
 673	struct idpf_queue *q;
 674	int i, j, err;
 675
 676	for (i = 0; i < vport->num_rxq_grp; i++) {
 677		rx_qgrp = &vport->rxq_grps[i];
 
 678
 679		/* Allocate bufs for the rxq itself in singleq */
 680		if (!idpf_is_queue_model_split(vport->rxq_model)) {
 681			int num_rxq = rx_qgrp->singleq.num_rxq;
 682
 683			for (j = 0; j < num_rxq; j++) {
 
 
 684				q = rx_qgrp->singleq.rxqs[j];
 685				err = idpf_rx_bufs_init(q);
 686				if (err)
 687					return err;
 688			}
 689
 690			continue;
 691		}
 692
 693		/* Otherwise, allocate bufs for the buffer queues */
 694		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 
 
 
 695			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
 696			err = idpf_rx_bufs_init(q);
 
 
 
 
 697			if (err)
 698				return err;
 
 
 699		}
 700	}
 701
 702	return 0;
 703}
 704
 705/**
 706 * idpf_rx_desc_alloc - Allocate queue Rx resources
 
 707 * @rxq: Rx queue for which the resources are setup
 708 * @bufq: buffer or completion queue
 709 * @q_model: single or split queue model
 710 *
 711 * Returns 0 on success, negative on failure
 712 */
 713static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
 
 714{
 715	struct device *dev = rxq->dev;
 716
 717	if (bufq)
 718		rxq->size = rxq->desc_count *
 719			sizeof(struct virtchnl2_splitq_rx_buf_desc);
 720	else
 721		rxq->size = rxq->desc_count *
 722			sizeof(union virtchnl2_rx_desc);
 723
 724	/* Allocate descriptors and also round up to nearest 4K */
 725	rxq->size = ALIGN(rxq->size, 4096);
 726	rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
 727					     &rxq->dma, GFP_KERNEL);
 728	if (!rxq->desc_ring) {
 729		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 730			rxq->size);
 731		return -ENOMEM;
 732	}
 733
 734	rxq->next_to_alloc = 0;
 735	rxq->next_to_clean = 0;
 736	rxq->next_to_use = 0;
 737	set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738
 739	return 0;
 740}
 741
 742/**
 743 * idpf_rx_desc_alloc_all - allocate all RX queues resources
 744 * @vport: virtual port structure
 745 *
 746 * Returns 0 on success, negative on failure
 747 */
 748static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
 749{
 750	struct device *dev = &vport->adapter->pdev->dev;
 751	struct idpf_rxq_group *rx_qgrp;
 752	struct idpf_queue *q;
 753	int i, j, err;
 754	u16 num_rxq;
 755
 756	for (i = 0; i < vport->num_rxq_grp; i++) {
 757		rx_qgrp = &vport->rxq_grps[i];
 758		if (idpf_is_queue_model_split(vport->rxq_model))
 759			num_rxq = rx_qgrp->splitq.num_rxq_sets;
 760		else
 761			num_rxq = rx_qgrp->singleq.num_rxq;
 762
 763		for (j = 0; j < num_rxq; j++) {
 
 
 764			if (idpf_is_queue_model_split(vport->rxq_model))
 765				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
 766			else
 767				q = rx_qgrp->singleq.rxqs[j];
 768			err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
 
 769			if (err) {
 770				dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
 
 771					i);
 772				goto err_out;
 773			}
 774		}
 775
 776		if (!idpf_is_queue_model_split(vport->rxq_model))
 777			continue;
 778
 779		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 
 
 780			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
 781			err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
 
 782			if (err) {
 783				dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
 
 784					i);
 785				goto err_out;
 786			}
 787		}
 788	}
 789
 790	return 0;
 791
 792err_out:
 793	idpf_rx_desc_rel_all(vport);
 794
 795	return err;
 796}
 797
 798/**
 799 * idpf_txq_group_rel - Release all resources for txq groups
 800 * @vport: vport to release txq groups on
 801 */
 802static void idpf_txq_group_rel(struct idpf_vport *vport)
 803{
 
 804	int i, j;
 805
 806	if (!vport->txq_grps)
 807		return;
 808
 
 
 
 
 809	for (i = 0; i < vport->num_txq_grp; i++) {
 810		struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
 811
 812		for (j = 0; j < txq_grp->num_txq; j++) {
 813			kfree(txq_grp->txqs[j]);
 814			txq_grp->txqs[j] = NULL;
 815		}
 
 
 
 
 816		kfree(txq_grp->complq);
 817		txq_grp->complq = NULL;
 
 
 
 818	}
 819	kfree(vport->txq_grps);
 820	vport->txq_grps = NULL;
 821}
 822
 823/**
 824 * idpf_rxq_sw_queue_rel - Release software queue resources
 825 * @rx_qgrp: rx queue group with software queues
 826 */
 827static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
 828{
 829	int i, j;
 830
 831	for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
 832		struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
 833
 834		for (j = 0; j < bufq_set->num_refillqs; j++) {
 835			kfree(bufq_set->refillqs[j].ring);
 836			bufq_set->refillqs[j].ring = NULL;
 837		}
 838		kfree(bufq_set->refillqs);
 839		bufq_set->refillqs = NULL;
 840	}
 841}
 842
 843/**
 844 * idpf_rxq_group_rel - Release all resources for rxq groups
 845 * @vport: vport to release rxq groups on
 846 */
 847static void idpf_rxq_group_rel(struct idpf_vport *vport)
 848{
 849	int i;
 850
 851	if (!vport->rxq_grps)
 852		return;
 853
 854	for (i = 0; i < vport->num_rxq_grp; i++) {
 855		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
 856		u16 num_rxq;
 857		int j;
 858
 859		if (idpf_is_queue_model_split(vport->rxq_model)) {
 860			num_rxq = rx_qgrp->splitq.num_rxq_sets;
 861			for (j = 0; j < num_rxq; j++) {
 862				kfree(rx_qgrp->splitq.rxq_sets[j]);
 863				rx_qgrp->splitq.rxq_sets[j] = NULL;
 864			}
 865
 866			idpf_rxq_sw_queue_rel(rx_qgrp);
 867			kfree(rx_qgrp->splitq.bufq_sets);
 868			rx_qgrp->splitq.bufq_sets = NULL;
 869		} else {
 870			num_rxq = rx_qgrp->singleq.num_rxq;
 871			for (j = 0; j < num_rxq; j++) {
 872				kfree(rx_qgrp->singleq.rxqs[j]);
 873				rx_qgrp->singleq.rxqs[j] = NULL;
 874			}
 875		}
 876	}
 877	kfree(vport->rxq_grps);
 878	vport->rxq_grps = NULL;
 879}
 880
 881/**
 882 * idpf_vport_queue_grp_rel_all - Release all queue groups
 883 * @vport: vport to release queue groups for
 884 */
 885static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
 886{
 887	idpf_txq_group_rel(vport);
 888	idpf_rxq_group_rel(vport);
 889}
 890
 891/**
 892 * idpf_vport_queues_rel - Free memory for all queues
 893 * @vport: virtual port
 894 *
 895 * Free the memory allocated for queues associated to a vport
 896 */
 897void idpf_vport_queues_rel(struct idpf_vport *vport)
 898{
 899	idpf_tx_desc_rel_all(vport);
 900	idpf_rx_desc_rel_all(vport);
 901	idpf_vport_queue_grp_rel_all(vport);
 902
 903	kfree(vport->txqs);
 904	vport->txqs = NULL;
 905}
 906
 907/**
 908 * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
 909 * @vport: vport to init txqs on
 910 *
 911 * We get a queue index from skb->queue_mapping and we need a fast way to
 912 * dereference the queue from queue groups.  This allows us to quickly pull a
 913 * txq based on a queue index.
 914 *
 915 * Returns 0 on success, negative on failure
 916 */
 917static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
 918{
 919	int i, j, k = 0;
 920
 921	vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
 922			      GFP_KERNEL);
 923
 924	if (!vport->txqs)
 925		return -ENOMEM;
 926
 927	for (i = 0; i < vport->num_txq_grp; i++) {
 928		struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
 929
 930		for (j = 0; j < tx_grp->num_txq; j++, k++) {
 931			vport->txqs[k] = tx_grp->txqs[j];
 932			vport->txqs[k]->idx = k;
 933		}
 934	}
 935
 936	return 0;
 937}
 938
 939/**
 940 * idpf_vport_init_num_qs - Initialize number of queues
 941 * @vport: vport to initialize queues
 942 * @vport_msg: data to be filled into vport
 943 */
 944void idpf_vport_init_num_qs(struct idpf_vport *vport,
 945			    struct virtchnl2_create_vport *vport_msg)
 946{
 947	struct idpf_vport_user_config_data *config_data;
 948	u16 idx = vport->idx;
 949
 950	config_data = &vport->adapter->vport_config[idx]->user_config;
 951	vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
 952	vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
 953	/* number of txqs and rxqs in config data will be zeros only in the
 954	 * driver load path and we dont update them there after
 955	 */
 956	if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
 957		config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
 958		config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
 959	}
 960
 961	if (idpf_is_queue_model_split(vport->txq_model))
 962		vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
 963	if (idpf_is_queue_model_split(vport->rxq_model))
 964		vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
 965
 966	/* Adjust number of buffer queues per Rx queue group. */
 967	if (!idpf_is_queue_model_split(vport->rxq_model)) {
 968		vport->num_bufqs_per_qgrp = 0;
 969		vport->bufq_size[0] = IDPF_RX_BUF_2048;
 970
 971		return;
 972	}
 973
 974	vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
 975	/* Bufq[0] default buffer size is 4K
 976	 * Bufq[1] default buffer size is 2K
 977	 */
 978	vport->bufq_size[0] = IDPF_RX_BUF_4096;
 979	vport->bufq_size[1] = IDPF_RX_BUF_2048;
 980}
 981
 982/**
 983 * idpf_vport_calc_num_q_desc - Calculate number of queue groups
 984 * @vport: vport to calculate q groups for
 985 */
 986void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
 987{
 988	struct idpf_vport_user_config_data *config_data;
 989	int num_bufqs = vport->num_bufqs_per_qgrp;
 990	u32 num_req_txq_desc, num_req_rxq_desc;
 991	u16 idx = vport->idx;
 992	int i;
 993
 994	config_data =  &vport->adapter->vport_config[idx]->user_config;
 995	num_req_txq_desc = config_data->num_req_txq_desc;
 996	num_req_rxq_desc = config_data->num_req_rxq_desc;
 997
 998	vport->complq_desc_count = 0;
 999	if (num_req_txq_desc) {
1000		vport->txq_desc_count = num_req_txq_desc;
1001		if (idpf_is_queue_model_split(vport->txq_model)) {
1002			vport->complq_desc_count = num_req_txq_desc;
1003			if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
1004				vport->complq_desc_count =
1005					IDPF_MIN_TXQ_COMPLQ_DESC;
1006		}
1007	} else {
1008		vport->txq_desc_count =	IDPF_DFLT_TX_Q_DESC_COUNT;
1009		if (idpf_is_queue_model_split(vport->txq_model))
1010			vport->complq_desc_count =
1011				IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
1012	}
1013
1014	if (num_req_rxq_desc)
1015		vport->rxq_desc_count = num_req_rxq_desc;
1016	else
1017		vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
1018
1019	for (i = 0; i < num_bufqs; i++) {
1020		if (!vport->bufq_desc_count[i])
1021			vport->bufq_desc_count[i] =
1022				IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
1023							num_bufqs);
1024	}
1025}
1026
1027/**
1028 * idpf_vport_calc_total_qs - Calculate total number of queues
1029 * @adapter: private data struct
1030 * @vport_idx: vport idx to retrieve vport pointer
1031 * @vport_msg: message to fill with data
1032 * @max_q: vport max queue info
1033 *
1034 * Return 0 on success, error value on failure.
1035 */
1036int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
1037			     struct virtchnl2_create_vport *vport_msg,
1038			     struct idpf_vport_max_q *max_q)
1039{
1040	int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
1041	int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
1042	u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
1043	struct idpf_vport_config *vport_config;
1044	u16 num_txq_grps, num_rxq_grps;
1045	u32 num_qs;
1046
1047	vport_config = adapter->vport_config[vport_idx];
1048	if (vport_config) {
1049		num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
1050		num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
1051	} else {
1052		int num_cpus;
1053
1054		/* Restrict num of queues to cpus online as a default
1055		 * configuration to give best performance. User can always
1056		 * override to a max number of queues via ethtool.
1057		 */
1058		num_cpus = num_online_cpus();
1059
1060		dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
1061		dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
1062		dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
1063		dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
1064	}
1065
1066	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
1067		num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
1068		vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
1069						       IDPF_COMPLQ_PER_GROUP);
1070		vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
1071						  IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
1072	} else {
1073		num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1074		num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
1075					 dflt_singleq_txqs);
1076		vport_msg->num_tx_q = cpu_to_le16(num_qs);
1077		vport_msg->num_tx_complq = 0;
1078	}
1079	if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
1080		num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
1081		vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
1082						     IDPF_MAX_BUFQS_PER_RXQ_GRP);
1083		vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
1084						  IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
1085	} else {
1086		num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1087		num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
1088					 dflt_singleq_rxqs);
1089		vport_msg->num_rx_q = cpu_to_le16(num_qs);
1090		vport_msg->num_rx_bufq = 0;
1091	}
1092
1093	return 0;
1094}
1095
1096/**
1097 * idpf_vport_calc_num_q_groups - Calculate number of queue groups
1098 * @vport: vport to calculate q groups for
1099 */
1100void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
1101{
1102	if (idpf_is_queue_model_split(vport->txq_model))
1103		vport->num_txq_grp = vport->num_txq;
1104	else
1105		vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
1106
1107	if (idpf_is_queue_model_split(vport->rxq_model))
1108		vport->num_rxq_grp = vport->num_rxq;
1109	else
1110		vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
1111}
1112
1113/**
1114 * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
1115 * @vport: vport to calculate queues for
1116 * @num_txq: return parameter for number of TX queues
1117 * @num_rxq: return parameter for number of RX queues
1118 */
1119static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
1120					 u16 *num_txq, u16 *num_rxq)
1121{
1122	if (idpf_is_queue_model_split(vport->txq_model))
1123		*num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
1124	else
1125		*num_txq = vport->num_txq;
1126
1127	if (idpf_is_queue_model_split(vport->rxq_model))
1128		*num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
1129	else
1130		*num_rxq = vport->num_rxq;
1131}
1132
1133/**
1134 * idpf_rxq_set_descids - set the descids supported by this queue
1135 * @vport: virtual port data structure
1136 * @q: rx queue for which descids are set
1137 *
1138 */
1139static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
 
1140{
1141	if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
1142		q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
1143	} else {
1144		if (vport->base_rxd)
1145			q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
1146		else
1147			q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
1148	}
1149}
1150
1151/**
1152 * idpf_txq_group_alloc - Allocate all txq group resources
1153 * @vport: vport to allocate txq groups for
1154 * @num_txq: number of txqs to allocate for each group
1155 *
1156 * Returns 0 on success, negative on failure
1157 */
1158static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
1159{
1160	bool flow_sch_en;
1161	int err, i;
1162
1163	vport->txq_grps = kcalloc(vport->num_txq_grp,
1164				  sizeof(*vport->txq_grps), GFP_KERNEL);
1165	if (!vport->txq_grps)
1166		return -ENOMEM;
1167
 
1168	flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
1169				       VIRTCHNL2_CAP_SPLITQ_QSCHED);
1170
1171	for (i = 0; i < vport->num_txq_grp; i++) {
1172		struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
1173		struct idpf_adapter *adapter = vport->adapter;
 
1174		int j;
1175
1176		tx_qgrp->vport = vport;
1177		tx_qgrp->num_txq = num_txq;
1178
1179		for (j = 0; j < tx_qgrp->num_txq; j++) {
1180			tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
1181						   GFP_KERNEL);
1182			if (!tx_qgrp->txqs[j]) {
1183				err = -ENOMEM;
 
 
 
 
 
 
1184				goto err_alloc;
1185			}
 
1186		}
1187
1188		for (j = 0; j < tx_qgrp->num_txq; j++) {
1189			struct idpf_queue *q = tx_qgrp->txqs[j];
1190
1191			q->dev = &adapter->pdev->dev;
1192			q->desc_count = vport->txq_desc_count;
1193			q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
1194			q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
1195			q->vport = vport;
1196			q->txq_grp = tx_qgrp;
1197			hash_init(q->sched_buf_hash);
1198
1199			if (flow_sch_en)
1200				set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
1201		}
1202
1203		if (!idpf_is_queue_model_split(vport->txq_model))
1204			continue;
1205
1206		tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
1207					  sizeof(*tx_qgrp->complq),
1208					  GFP_KERNEL);
1209		if (!tx_qgrp->complq) {
1210			err = -ENOMEM;
1211			goto err_alloc;
1212		}
1213
1214		tx_qgrp->complq->dev = &adapter->pdev->dev;
1215		tx_qgrp->complq->desc_count = vport->complq_desc_count;
1216		tx_qgrp->complq->vport = vport;
1217		tx_qgrp->complq->txq_grp = tx_qgrp;
 
 
1218
1219		if (flow_sch_en)
1220			__set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
1221	}
1222
1223	return 0;
1224
1225err_alloc:
1226	idpf_txq_group_rel(vport);
1227
1228	return err;
1229}
1230
1231/**
1232 * idpf_rxq_group_alloc - Allocate all rxq group resources
1233 * @vport: vport to allocate rxq groups for
1234 * @num_rxq: number of rxqs to allocate for each group
1235 *
1236 * Returns 0 on success, negative on failure
1237 */
1238static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
1239{
1240	struct idpf_adapter *adapter = vport->adapter;
1241	struct idpf_queue *q;
1242	int i, k, err = 0;
1243	bool hs;
1244
1245	vport->rxq_grps = kcalloc(vport->num_rxq_grp,
1246				  sizeof(struct idpf_rxq_group), GFP_KERNEL);
1247	if (!vport->rxq_grps)
1248		return -ENOMEM;
1249
1250	hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
1251
1252	for (i = 0; i < vport->num_rxq_grp; i++) {
1253		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
1254		int j;
1255
1256		rx_qgrp->vport = vport;
1257		if (!idpf_is_queue_model_split(vport->rxq_model)) {
1258			rx_qgrp->singleq.num_rxq = num_rxq;
1259			for (j = 0; j < num_rxq; j++) {
1260				rx_qgrp->singleq.rxqs[j] =
1261						kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
1262							GFP_KERNEL);
1263				if (!rx_qgrp->singleq.rxqs[j]) {
1264					err = -ENOMEM;
1265					goto err_alloc;
1266				}
1267			}
1268			goto skip_splitq_rx_init;
1269		}
1270		rx_qgrp->splitq.num_rxq_sets = num_rxq;
1271
1272		for (j = 0; j < num_rxq; j++) {
1273			rx_qgrp->splitq.rxq_sets[j] =
1274				kzalloc(sizeof(struct idpf_rxq_set),
1275					GFP_KERNEL);
1276			if (!rx_qgrp->splitq.rxq_sets[j]) {
1277				err = -ENOMEM;
1278				goto err_alloc;
1279			}
1280		}
1281
1282		rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
1283						    sizeof(struct idpf_bufq_set),
1284						    GFP_KERNEL);
1285		if (!rx_qgrp->splitq.bufq_sets) {
1286			err = -ENOMEM;
1287			goto err_alloc;
1288		}
1289
1290		for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
1291			struct idpf_bufq_set *bufq_set =
1292				&rx_qgrp->splitq.bufq_sets[j];
1293			int swq_size = sizeof(struct idpf_sw_queue);
 
1294
1295			q = &rx_qgrp->splitq.bufq_sets[j].bufq;
1296			q->dev = &adapter->pdev->dev;
1297			q->desc_count = vport->bufq_desc_count[j];
1298			q->vport = vport;
1299			q->rxq_grp = rx_qgrp;
1300			q->idx = j;
1301			q->rx_buf_size = vport->bufq_size[j];
1302			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1303			q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
1304
1305			if (hs) {
1306				q->rx_hsplit_en = true;
1307				q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
1308			}
1309
1310			bufq_set->num_refillqs = num_rxq;
1311			bufq_set->refillqs = kcalloc(num_rxq, swq_size,
1312						     GFP_KERNEL);
1313			if (!bufq_set->refillqs) {
1314				err = -ENOMEM;
1315				goto err_alloc;
1316			}
1317			for (k = 0; k < bufq_set->num_refillqs; k++) {
1318				struct idpf_sw_queue *refillq =
1319					&bufq_set->refillqs[k];
1320
1321				refillq->dev = &vport->adapter->pdev->dev;
1322				refillq->desc_count =
1323					vport->bufq_desc_count[j];
1324				set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
1325				set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
1326				refillq->ring = kcalloc(refillq->desc_count,
1327							sizeof(u16),
1328							GFP_KERNEL);
1329				if (!refillq->ring) {
1330					err = -ENOMEM;
1331					goto err_alloc;
1332				}
1333			}
1334		}
1335
1336skip_splitq_rx_init:
1337		for (j = 0; j < num_rxq; j++) {
 
 
1338			if (!idpf_is_queue_model_split(vport->rxq_model)) {
1339				q = rx_qgrp->singleq.rxqs[j];
1340				goto setup_rxq;
1341			}
1342			q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
1343			rx_qgrp->splitq.rxq_sets[j]->refillq0 =
1344			      &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
1345			if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
1346				rx_qgrp->splitq.rxq_sets[j]->refillq1 =
1347				      &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
1348
1349			if (hs) {
1350				q->rx_hsplit_en = true;
1351				q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
1352			}
1353
1354setup_rxq:
1355			q->dev = &adapter->pdev->dev;
1356			q->desc_count = vport->rxq_desc_count;
1357			q->vport = vport;
1358			q->rxq_grp = rx_qgrp;
 
1359			q->idx = (i * num_rxq) + j;
1360			/* In splitq mode, RXQ buffer size should be
1361			 * set to that of the first buffer queue
1362			 * associated with this RXQ
1363			 */
1364			q->rx_buf_size = vport->bufq_size[0];
1365			q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
1366			q->rx_max_pkt_size = vport->netdev->mtu +
1367							IDPF_PACKET_HDR_PAD;
1368			idpf_rxq_set_descids(vport, q);
1369		}
1370	}
1371
1372err_alloc:
1373	if (err)
1374		idpf_rxq_group_rel(vport);
1375
1376	return err;
1377}
1378
1379/**
1380 * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
1381 * @vport: vport with qgrps to allocate
1382 *
1383 * Returns 0 on success, negative on failure
1384 */
1385static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
1386{
1387	u16 num_txq, num_rxq;
1388	int err;
1389
1390	idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
1391
1392	err = idpf_txq_group_alloc(vport, num_txq);
1393	if (err)
1394		goto err_out;
1395
1396	err = idpf_rxq_group_alloc(vport, num_rxq);
1397	if (err)
1398		goto err_out;
1399
1400	return 0;
1401
1402err_out:
1403	idpf_vport_queue_grp_rel_all(vport);
1404
1405	return err;
1406}
1407
1408/**
1409 * idpf_vport_queues_alloc - Allocate memory for all queues
1410 * @vport: virtual port
1411 *
1412 * Allocate memory for queues associated with a vport.  Returns 0 on success,
1413 * negative on failure.
1414 */
1415int idpf_vport_queues_alloc(struct idpf_vport *vport)
1416{
1417	int err;
1418
1419	err = idpf_vport_queue_grp_alloc_all(vport);
1420	if (err)
1421		goto err_out;
1422
1423	err = idpf_tx_desc_alloc_all(vport);
1424	if (err)
1425		goto err_out;
1426
1427	err = idpf_rx_desc_alloc_all(vport);
1428	if (err)
1429		goto err_out;
1430
1431	err = idpf_vport_init_fast_path_txqs(vport);
1432	if (err)
1433		goto err_out;
1434
1435	return 0;
1436
1437err_out:
1438	idpf_vport_queues_rel(vport);
1439
1440	return err;
1441}
1442
1443/**
1444 * idpf_tx_handle_sw_marker - Handle queue marker packet
1445 * @tx_q: tx queue to handle software marker
1446 */
1447static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
1448{
1449	struct idpf_vport *vport = tx_q->vport;
 
1450	int i;
1451
1452	clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
1453	/* Hardware must write marker packets to all queues associated with
1454	 * completion queues. So check if all queues received marker packets
1455	 */
1456	for (i = 0; i < vport->num_txq; i++)
1457		/* If we're still waiting on any other TXQ marker completions,
1458		 * just return now since we cannot wake up the marker_wq yet.
1459		 */
1460		if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
1461			return;
1462
1463	/* Drain complete */
1464	set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
1465	wake_up(&vport->sw_marker_wq);
1466}
1467
1468/**
1469 * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
1470 * packet
1471 * @tx_q: tx queue to clean buffer from
1472 * @tx_buf: buffer to be cleaned
1473 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1474 * @napi_budget: Used to determine if we are in netpoll
1475 */
1476static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
1477				     struct idpf_tx_buf *tx_buf,
1478				     struct idpf_cleaned_stats *cleaned,
1479				     int napi_budget)
1480{
1481	napi_consume_skb(tx_buf->skb, napi_budget);
1482
1483	if (dma_unmap_len(tx_buf, len)) {
1484		dma_unmap_single(tx_q->dev,
1485				 dma_unmap_addr(tx_buf, dma),
1486				 dma_unmap_len(tx_buf, len),
1487				 DMA_TO_DEVICE);
1488
1489		dma_unmap_len_set(tx_buf, len, 0);
1490	}
1491
1492	/* clear tx_buf data */
1493	tx_buf->skb = NULL;
1494
1495	cleaned->bytes += tx_buf->bytecount;
1496	cleaned->packets += tx_buf->gso_segs;
1497}
1498
1499/**
1500 * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
1501 * out of order completions
1502 * @txq: queue to clean
1503 * @compl_tag: completion tag of packet to clean (from completion descriptor)
1504 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1505 * @budget: Used to determine if we are in netpoll
1506 */
1507static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
1508				       struct idpf_cleaned_stats *cleaned,
 
1509				       int budget)
1510{
1511	struct idpf_tx_stash *stash;
1512	struct hlist_node *tmp_buf;
 
 
 
 
 
1513
1514	/* Buffer completion */
1515	hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
1516				    hlist, compl_tag) {
1517		if (unlikely(stash->buf.compl_tag != (int)compl_tag))
1518			continue;
1519
1520		if (stash->buf.skb) {
1521			idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
1522						 budget);
1523		} else if (dma_unmap_len(&stash->buf, len)) {
1524			dma_unmap_page(txq->dev,
1525				       dma_unmap_addr(&stash->buf, dma),
1526				       dma_unmap_len(&stash->buf, len),
1527				       DMA_TO_DEVICE);
1528			dma_unmap_len_set(&stash->buf, len, 0);
1529		}
1530
1531		/* Push shadow buf back onto stack */
1532		idpf_buf_lifo_push(&txq->buf_stack, stash);
1533
1534		hash_del(&stash->hlist);
1535	}
1536}
1537
1538/**
1539 * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
1540 * later time (only relevant for flow scheduling mode)
1541 * @txq: Tx queue to clean
1542 * @tx_buf: buffer to store
1543 */
1544static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
1545				       struct idpf_tx_buf *tx_buf)
1546{
1547	struct idpf_tx_stash *stash;
1548
1549	if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
1550		     !dma_unmap_len(tx_buf, len)))
1551		return 0;
1552
1553	stash = idpf_buf_lifo_pop(&txq->buf_stack);
1554	if (unlikely(!stash)) {
1555		net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
1556				    txq->vport->netdev->name);
1557
1558		return -ENOMEM;
1559	}
1560
1561	/* Store buffer params in shadow buffer */
1562	stash->buf.skb = tx_buf->skb;
1563	stash->buf.bytecount = tx_buf->bytecount;
1564	stash->buf.gso_segs = tx_buf->gso_segs;
 
 
1565	dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
1566	dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
1567	stash->buf.compl_tag = tx_buf->compl_tag;
1568
1569	/* Add buffer to buf_hash table to be freed later */
1570	hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
1571
1572	memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
1573
1574	/* Reinitialize buf_id portion of tag */
1575	tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
1576
1577	return 0;
1578}
1579
1580#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf)	\
1581do {								\
1582	(ntc)++;						\
1583	if (unlikely(!(ntc))) {					\
1584		ntc -= (txq)->desc_count;			\
1585		buf = (txq)->tx_buf;				\
1586		desc = IDPF_FLEX_TX_DESC(txq, 0);		\
1587	} else {						\
1588		(buf)++;					\
1589		(desc)++;					\
1590	}							\
1591} while (0)
1592
1593/**
1594 * idpf_tx_splitq_clean - Reclaim resources from buffer queue
1595 * @tx_q: Tx queue to clean
1596 * @end: queue index until which it should be cleaned
1597 * @napi_budget: Used to determine if we are in netpoll
1598 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1599 * @descs_only: true if queue is using flow-based scheduling and should
1600 * not clean buffers at this time
1601 *
1602 * Cleans the queue descriptor ring. If the queue is using queue-based
1603 * scheduling, the buffers will be cleaned as well. If the queue is using
1604 * flow-based scheduling, only the descriptors are cleaned at this time.
1605 * Separate packet completion events will be reported on the completion queue,
1606 * and the buffers will be cleaned separately. The stats are not updated from
1607 * this function when using flow-based scheduling.
 
 
 
 
 
 
1608 */
1609static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
1610				 int napi_budget,
1611				 struct idpf_cleaned_stats *cleaned,
1612				 bool descs_only)
1613{
1614	union idpf_tx_flex_desc *next_pending_desc = NULL;
1615	union idpf_tx_flex_desc *tx_desc;
1616	s16 ntc = tx_q->next_to_clean;
 
 
 
 
 
1617	struct idpf_tx_buf *tx_buf;
 
1618
1619	tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
1620	next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
1621	tx_buf = &tx_q->tx_buf[ntc];
1622	ntc -= tx_q->desc_count;
1623
1624	while (tx_desc != next_pending_desc) {
1625		union idpf_tx_flex_desc *eop_desc;
1626
1627		/* If this entry in the ring was used as a context descriptor,
1628		 * it's corresponding entry in the buffer ring will have an
1629		 * invalid completion tag since no buffer was used.  We can
1630		 * skip this descriptor since there is no buffer to clean.
1631		 */
1632		if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
1633			goto fetch_next_txq_desc;
1634
1635		eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
 
1636
1637		/* clear next_to_watch to prevent false hangs */
1638		tx_buf->next_to_watch = NULL;
1639
1640		if (descs_only) {
1641			if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
 
1642				goto tx_splitq_clean_out;
 
 
 
1643
1644			while (tx_desc != eop_desc) {
1645				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1646							      tx_desc, tx_buf);
1647
1648				if (dma_unmap_len(tx_buf, len)) {
1649					if (idpf_stash_flow_sch_buffers(tx_q,
1650									tx_buf))
1651						goto tx_splitq_clean_out;
1652				}
1653			}
1654		} else {
1655			idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
1656						 napi_budget);
1657
1658			/* unmap remaining buffers */
1659			while (tx_desc != eop_desc) {
1660				idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
1661							      tx_desc, tx_buf);
1662
1663				/* unmap any remaining paged data */
1664				if (dma_unmap_len(tx_buf, len)) {
1665					dma_unmap_page(tx_q->dev,
1666						       dma_unmap_addr(tx_buf, dma),
1667						       dma_unmap_len(tx_buf, len),
1668						       DMA_TO_DEVICE);
1669					dma_unmap_len_set(tx_buf, len, 0);
1670				}
1671			}
1672		}
1673
1674fetch_next_txq_desc:
1675		idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
1676	}
1677
1678tx_splitq_clean_out:
1679	ntc += tx_q->desc_count;
1680	tx_q->next_to_clean = ntc;
 
 
1681}
1682
1683#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf)	\
1684do {							\
1685	(buf)++;					\
1686	(ntc)++;					\
1687	if (unlikely((ntc) == (txq)->desc_count)) {	\
1688		buf = (txq)->tx_buf;			\
1689		ntc = 0;				\
1690	}						\
1691} while (0)
1692
1693/**
1694 * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
1695 * @txq: queue to clean
1696 * @compl_tag: completion tag of packet to clean (from completion descriptor)
1697 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1698 * @budget: Used to determine if we are in netpoll
1699 *
1700 * Cleans all buffers associated with the input completion tag either from the
1701 * TX buffer ring or from the hash table if the buffers were previously
1702 * stashed. Returns the byte/segment count for the cleaned packet associated
1703 * this completion tag.
1704 */
1705static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
1706				   struct idpf_cleaned_stats *cleaned,
1707				   int budget)
1708{
1709	u16 idx = compl_tag & txq->compl_tag_bufid_m;
1710	struct idpf_tx_buf *tx_buf = NULL;
1711	u16 ntc = txq->next_to_clean;
1712	u16 num_descs_cleaned = 0;
1713	u16 orig_idx = idx;
 
 
 
1714
1715	tx_buf = &txq->tx_buf[idx];
1716
1717	while (tx_buf->compl_tag == (int)compl_tag) {
1718		if (tx_buf->skb) {
1719			idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
1720		} else if (dma_unmap_len(tx_buf, len)) {
1721			dma_unmap_page(txq->dev,
1722				       dma_unmap_addr(tx_buf, dma),
1723				       dma_unmap_len(tx_buf, len),
1724				       DMA_TO_DEVICE);
1725			dma_unmap_len_set(tx_buf, len, 0);
1726		}
1727
1728		memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
1729		tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
1730
1731		num_descs_cleaned++;
 
1732		idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
1733	}
1734
1735	/* If we didn't clean anything on the ring for this completion, there's
1736	 * nothing more to do.
 
 
 
 
 
 
 
 
 
 
1737	 */
1738	if (unlikely(!num_descs_cleaned))
1739		return false;
 
 
 
1740
1741	/* Otherwise, if we did clean a packet on the ring directly, it's safe
1742	 * to assume that the descriptors starting from the original
1743	 * next_to_clean up until the previously cleaned packet can be reused.
1744	 * Therefore, we will go back in the ring and stash any buffers still
1745	 * in the ring into the hash table to be cleaned later.
 
1746	 */
1747	tx_buf = &txq->tx_buf[ntc];
1748	while (tx_buf != &txq->tx_buf[orig_idx]) {
1749		idpf_stash_flow_sch_buffers(txq, tx_buf);
1750		idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
1751	}
1752
1753	/* Finally, update next_to_clean to reflect the work that was just done
1754	 * on the ring, if any. If the packet was only cleaned from the hash
1755	 * table, the ring will not be impacted, therefore we should not touch
1756	 * next_to_clean. The updated idx is used here
1757	 */
1758	txq->next_to_clean = idx;
1759
1760	return true;
1761}
1762
1763/**
1764 * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
1765 * whether on the buffer ring or in the hash table
1766 * @txq: Tx ring to clean
1767 * @desc: pointer to completion queue descriptor to extract completion
1768 * information from
1769 * @cleaned: pointer to stats struct to track cleaned packets/bytes
1770 * @budget: Used to determine if we are in netpoll
1771 *
1772 * Returns bytes/packets cleaned
1773 */
1774static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
1775					 struct idpf_splitq_tx_compl_desc *desc,
1776					 struct idpf_cleaned_stats *cleaned,
1777					 int budget)
1778{
1779	u16 compl_tag;
1780
1781	if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
1782		u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
1783
1784		return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
 
1785	}
1786
1787	compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
1788
1789	/* If we didn't clean anything on the ring, this packet must be
1790	 * in the hash table. Go clean it there.
1791	 */
1792	if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
1793		idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
1794}
1795
1796/**
1797 * idpf_tx_clean_complq - Reclaim resources on completion queue
1798 * @complq: Tx ring to clean
1799 * @budget: Used to determine if we are in netpoll
1800 * @cleaned: returns number of packets cleaned
1801 *
1802 * Returns true if there's any budget left (e.g. the clean is finished)
1803 */
1804static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
1805				 int *cleaned)
1806{
1807	struct idpf_splitq_tx_compl_desc *tx_desc;
1808	struct idpf_vport *vport = complq->vport;
1809	s16 ntc = complq->next_to_clean;
1810	struct idpf_netdev_priv *np;
1811	unsigned int complq_budget;
1812	bool complq_ok = true;
1813	int i;
1814
1815	complq_budget = vport->compln_clean_budget;
1816	tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
1817	ntc -= complq->desc_count;
1818
1819	do {
1820		struct idpf_cleaned_stats cleaned_stats = { };
1821		struct idpf_queue *tx_q;
1822		int rel_tx_qid;
1823		u16 hw_head;
1824		u8 ctype;	/* completion type */
1825		u16 gen;
1826
1827		/* if the descriptor isn't done, no work yet to do */
1828		gen = le16_get_bits(tx_desc->qid_comptype_gen,
1829				    IDPF_TXD_COMPLQ_GEN_M);
1830		if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
1831			break;
1832
1833		/* Find necessary info of TX queue to clean buffers */
1834		rel_tx_qid = le16_get_bits(tx_desc->qid_comptype_gen,
1835					   IDPF_TXD_COMPLQ_QID_M);
1836		if (rel_tx_qid >= complq->txq_grp->num_txq ||
1837		    !complq->txq_grp->txqs[rel_tx_qid]) {
1838			dev_err(&complq->vport->adapter->pdev->dev,
1839				"TxQ not found\n");
1840			goto fetch_next_desc;
1841		}
1842		tx_q = complq->txq_grp->txqs[rel_tx_qid];
1843
1844		/* Determine completion type */
1845		ctype = le16_get_bits(tx_desc->qid_comptype_gen,
1846				      IDPF_TXD_COMPLQ_COMPL_TYPE_M);
1847		switch (ctype) {
1848		case IDPF_TXD_COMPLT_RE:
1849			hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
1850
1851			idpf_tx_splitq_clean(tx_q, hw_head, budget,
1852					     &cleaned_stats, true);
1853			break;
1854		case IDPF_TXD_COMPLT_RS:
1855			idpf_tx_handle_rs_completion(tx_q, tx_desc,
1856						     &cleaned_stats, budget);
1857			break;
1858		case IDPF_TXD_COMPLT_SW_MARKER:
1859			idpf_tx_handle_sw_marker(tx_q);
1860			break;
1861		default:
1862			dev_err(&tx_q->vport->adapter->pdev->dev,
1863				"Unknown TX completion type: %d\n",
1864				ctype);
1865			goto fetch_next_desc;
1866		}
1867
1868		u64_stats_update_begin(&tx_q->stats_sync);
1869		u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
1870		u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
1871		tx_q->cleaned_pkts += cleaned_stats.packets;
1872		tx_q->cleaned_bytes += cleaned_stats.bytes;
1873		complq->num_completions++;
1874		u64_stats_update_end(&tx_q->stats_sync);
1875
1876fetch_next_desc:
1877		tx_desc++;
1878		ntc++;
1879		if (unlikely(!ntc)) {
1880			ntc -= complq->desc_count;
1881			tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
1882			change_bit(__IDPF_Q_GEN_CHK, complq->flags);
1883		}
1884
1885		prefetch(tx_desc);
1886
1887		/* update budget accounting */
1888		complq_budget--;
1889	} while (likely(complq_budget));
1890
1891	/* Store the state of the complq to be used later in deciding if a
1892	 * TXQ can be started again
1893	 */
1894	if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
1895		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
1896		complq_ok = false;
1897
1898	np = netdev_priv(complq->vport->netdev);
1899	for (i = 0; i < complq->txq_grp->num_txq; ++i) {
1900		struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
1901		struct netdev_queue *nq;
1902		bool dont_wake;
1903
1904		/* We didn't clean anything on this queue, move along */
1905		if (!tx_q->cleaned_bytes)
1906			continue;
1907
1908		*cleaned += tx_q->cleaned_pkts;
1909
1910		/* Update BQL */
1911		nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
1912
1913		dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
1914			    np->state != __IDPF_VPORT_UP ||
1915			    !netif_carrier_ok(tx_q->vport->netdev);
1916		/* Check if the TXQ needs to and can be restarted */
1917		__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
1918					   IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
1919					   dont_wake);
1920
1921		/* Reset cleaned stats for the next time this queue is
1922		 * cleaned
1923		 */
1924		tx_q->cleaned_bytes = 0;
1925		tx_q->cleaned_pkts = 0;
1926	}
1927
1928	ntc += complq->desc_count;
1929	complq->next_to_clean = ntc;
1930
1931	return !!complq_budget;
1932}
1933
1934/**
1935 * idpf_tx_splitq_build_ctb - populate command tag and size for queue
1936 * based scheduling descriptors
1937 * @desc: descriptor to populate
1938 * @params: pointer to tx params struct
1939 * @td_cmd: command to be filled in desc
1940 * @size: size of buffer
1941 */
1942void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
1943			      struct idpf_tx_splitq_params *params,
1944			      u16 td_cmd, u16 size)
1945{
1946	desc->q.qw1.cmd_dtype =
1947		le16_encode_bits(params->dtype, IDPF_FLEX_TXD_QW1_DTYPE_M);
1948	desc->q.qw1.cmd_dtype |=
1949		le16_encode_bits(td_cmd, IDPF_FLEX_TXD_QW1_CMD_M);
1950	desc->q.qw1.buf_size = cpu_to_le16(size);
1951	desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
1952}
1953
1954/**
1955 * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
1956 * scheduling descriptors
1957 * @desc: descriptor to populate
1958 * @params: pointer to tx params struct
1959 * @td_cmd: command to be filled in desc
1960 * @size: size of buffer
1961 */
1962void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
1963				    struct idpf_tx_splitq_params *params,
1964				    u16 td_cmd, u16 size)
1965{
1966	desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
1967	desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
1968	desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
1969}
1970
1971/**
1972 * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
1973 * @tx_q: the queue to be checked
1974 * @size: number of descriptors we want to assure is available
1975 *
1976 * Returns 0 if stop is not needed
1977 */
1978int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
1979{
1980	struct netdev_queue *nq;
1981
1982	if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
1983		return 0;
1984
1985	u64_stats_update_begin(&tx_q->stats_sync);
1986	u64_stats_inc(&tx_q->q_stats.tx.q_busy);
1987	u64_stats_update_end(&tx_q->stats_sync);
1988
1989	nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
1990
1991	return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
1992}
1993
1994/**
1995 * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
1996 * @tx_q: the queue to be checked
1997 * @descs_needed: number of descriptors required for this packet
1998 *
1999 * Returns 0 if stop is not needed
2000 */
2001static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
2002				     unsigned int descs_needed)
2003{
2004	if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
2005		goto splitq_stop;
2006
2007	/* If there are too many outstanding completions expected on the
2008	 * completion queue, stop the TX queue to give the device some time to
2009	 * catch up
2010	 */
2011	if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
2012		     IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
2013		goto splitq_stop;
2014
2015	/* Also check for available book keeping buffers; if we are low, stop
2016	 * the queue to wait for more completions
2017	 */
2018	if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
2019		goto splitq_stop;
2020
2021	return 0;
2022
2023splitq_stop:
 
 
 
2024	u64_stats_update_begin(&tx_q->stats_sync);
2025	u64_stats_inc(&tx_q->q_stats.tx.q_busy);
2026	u64_stats_update_end(&tx_q->stats_sync);
2027	netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
2028
2029	return -EBUSY;
2030}
2031
2032/**
2033 * idpf_tx_buf_hw_update - Store the new tail value
2034 * @tx_q: queue to bump
2035 * @val: new tail index
2036 * @xmit_more: more skb's pending
2037 *
2038 * The naming here is special in that 'hw' signals that this function is about
2039 * to do a register write to update our queue status. We know this can only
2040 * mean tail here as HW should be owning head for TX.
2041 */
2042void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
2043			   bool xmit_more)
2044{
2045	struct netdev_queue *nq;
2046
2047	nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
2048	tx_q->next_to_use = val;
2049
2050	idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
 
 
 
 
2051
2052	/* Force memory writes to complete before letting h/w
2053	 * know there are new descriptors to fetch.  (Only
2054	 * applicable for weak-ordered memory model archs,
2055	 * such as IA-64).
2056	 */
2057	wmb();
2058
2059	/* notify HW of packet */
2060	if (netif_xmit_stopped(nq) || !xmit_more)
2061		writel(val, tx_q->tail);
2062}
2063
2064/**
2065 * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
2066 * @txq: queue to send buffer on
2067 * @skb: send buffer
2068 *
2069 * Returns number of data descriptors needed for this skb.
2070 */
2071unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
2072					 struct sk_buff *skb)
2073{
2074	const struct skb_shared_info *shinfo;
2075	unsigned int count = 0, i;
2076
2077	count += !!skb_headlen(skb);
2078
2079	if (!skb_is_nonlinear(skb))
2080		return count;
2081
2082	shinfo = skb_shinfo(skb);
2083	for (i = 0; i < shinfo->nr_frags; i++) {
2084		unsigned int size;
2085
2086		size = skb_frag_size(&shinfo->frags[i]);
2087
2088		/* We only need to use the idpf_size_to_txd_count check if the
2089		 * fragment is going to span multiple descriptors,
2090		 * i.e. size >= 16K.
2091		 */
2092		if (size >= SZ_16K)
2093			count += idpf_size_to_txd_count(size);
2094		else
2095			count++;
2096	}
2097
2098	if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
2099		if (__skb_linearize(skb))
2100			return 0;
2101
2102		count = idpf_size_to_txd_count(skb->len);
2103		u64_stats_update_begin(&txq->stats_sync);
2104		u64_stats_inc(&txq->q_stats.tx.linearize);
2105		u64_stats_update_end(&txq->stats_sync);
2106	}
2107
2108	return count;
2109}
2110
2111/**
2112 * idpf_tx_dma_map_error - handle TX DMA map errors
2113 * @txq: queue to send buffer on
2114 * @skb: send buffer
2115 * @first: original first buffer info buffer for packet
2116 * @idx: starting point on ring to unwind
2117 */
2118void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
2119			   struct idpf_tx_buf *first, u16 idx)
2120{
 
 
 
 
 
 
2121	u64_stats_update_begin(&txq->stats_sync);
2122	u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
2123	u64_stats_update_end(&txq->stats_sync);
2124
2125	/* clear dma mappings for failed tx_buf map */
2126	for (;;) {
2127		struct idpf_tx_buf *tx_buf;
2128
2129		tx_buf = &txq->tx_buf[idx];
2130		idpf_tx_buf_rel(txq, tx_buf);
2131		if (tx_buf == first)
2132			break;
2133		if (idx == 0)
2134			idx = txq->desc_count;
2135		idx--;
2136	}
2137
2138	if (skb_is_gso(skb)) {
2139		union idpf_tx_flex_desc *tx_desc;
2140
2141		/* If we failed a DMA mapping for a TSO packet, we will have
2142		 * used one additional descriptor for a context
2143		 * descriptor. Reset that here.
2144		 */
2145		tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
2146		memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
2147		if (idx == 0)
2148			idx = txq->desc_count;
2149		idx--;
2150	}
2151
2152	/* Update tail in case netdev_xmit_more was previously true */
2153	idpf_tx_buf_hw_update(txq, idx, false);
2154}
2155
2156/**
2157 * idpf_tx_splitq_bump_ntu - adjust NTU and generation
2158 * @txq: the tx ring to wrap
2159 * @ntu: ring index to bump
2160 */
2161static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
2162{
2163	ntu++;
2164
2165	if (ntu == txq->desc_count) {
2166		ntu = 0;
2167		txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
2168	}
2169
2170	return ntu;
2171}
2172
2173/**
2174 * idpf_tx_splitq_map - Build the Tx flex descriptor
2175 * @tx_q: queue to send buffer on
2176 * @params: pointer to splitq params struct
2177 * @first: first buffer info buffer to use
2178 *
2179 * This function loops over the skb data pointed to by *first
2180 * and gets a physical address for each memory location and programs
2181 * it and the length into the transmit flex descriptor.
2182 */
2183static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
2184			       struct idpf_tx_splitq_params *params,
2185			       struct idpf_tx_buf *first)
2186{
2187	union idpf_tx_flex_desc *tx_desc;
2188	unsigned int data_len, size;
2189	struct idpf_tx_buf *tx_buf;
2190	u16 i = tx_q->next_to_use;
2191	struct netdev_queue *nq;
2192	struct sk_buff *skb;
2193	skb_frag_t *frag;
2194	u16 td_cmd = 0;
2195	dma_addr_t dma;
2196
2197	skb = first->skb;
2198
2199	td_cmd = params->offload.td_cmd;
2200
2201	data_len = skb->data_len;
2202	size = skb_headlen(skb);
2203
2204	tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
2205
2206	dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
2207
2208	tx_buf = first;
 
2209
2210	params->compl_tag =
2211		(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
2212
2213	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2214		unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2215
2216		if (dma_mapping_error(tx_q->dev, dma))
2217			return idpf_tx_dma_map_error(tx_q, skb, first, i);
2218
2219		tx_buf->compl_tag = params->compl_tag;
 
 
2220
2221		/* record length, and DMA address */
2222		dma_unmap_len_set(tx_buf, len, size);
2223		dma_unmap_addr_set(tx_buf, dma, dma);
2224
2225		/* buf_addr is in same location for both desc types */
2226		tx_desc->q.buf_addr = cpu_to_le64(dma);
2227
2228		/* The stack can send us fragments that are too large for a
2229		 * single descriptor i.e. frag size > 16K-1. We will need to
2230		 * split the fragment across multiple descriptors in this case.
2231		 * To adhere to HW alignment restrictions, the fragment needs
2232		 * to be split such that the first chunk ends on a 4K boundary
2233		 * and all subsequent chunks start on a 4K boundary. We still
2234		 * want to send as much data as possible though, so our
2235		 * intermediate descriptor chunk size will be 12K.
2236		 *
2237		 * For example, consider a 32K fragment mapped to DMA addr 2600.
2238		 * ------------------------------------------------------------
2239		 * |                    frag_size = 32K                       |
2240		 * ------------------------------------------------------------
2241		 * |2600		  |16384	    |28672
2242		 *
2243		 * 3 descriptors will be used for this fragment. The HW expects
2244		 * the descriptors to contain the following:
2245		 * ------------------------------------------------------------
2246		 * | size = 13784         | size = 12K      | size = 6696     |
2247		 * | dma = 2600           | dma = 16384     | dma = 28672     |
2248		 * ------------------------------------------------------------
2249		 *
2250		 * We need to first adjust the max_data for the first chunk so
2251		 * that it ends on a 4K boundary. By negating the value of the
2252		 * DMA address and taking only the low order bits, we're
2253		 * effectively calculating
2254		 *	4K - (DMA addr lower order bits) =
2255		 *				bytes to next boundary.
2256		 *
2257		 * Add that to our base aligned max_data (12K) and we have
2258		 * our first chunk size. In the example above,
2259		 *	13784 = 12K + (4096-2600)
2260		 *
2261		 * After guaranteeing the first chunk ends on a 4K boundary, we
2262		 * will give the intermediate descriptors 12K chunks and
2263		 * whatever is left to the final descriptor. This ensures that
2264		 * all descriptors used for the remaining chunks of the
2265		 * fragment start on a 4K boundary and we use as few
2266		 * descriptors as possible.
2267		 */
2268		max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
2269		while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
2270			idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
2271						  max_data);
2272
2273			tx_desc++;
2274			i++;
2275
2276			if (i == tx_q->desc_count) {
2277				tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
2278				i = 0;
2279				tx_q->compl_tag_cur_gen =
2280					IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
 
 
 
2281			}
2282
2283			/* Since this packet has a buffer that is going to span
2284			 * multiple descriptors, it's going to leave holes in
2285			 * to the TX buffer ring. To ensure these holes do not
2286			 * cause issues in the cleaning routines, we will clear
2287			 * them of any stale data and assign them the same
2288			 * completion tag as the current packet. Then when the
2289			 * packet is being cleaned, the cleaning routines will
2290			 * simply pass over these holes and finish cleaning the
2291			 * rest of the packet.
2292			 */
2293			memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2294			tx_q->tx_buf[i].compl_tag = params->compl_tag;
2295
2296			/* Adjust the DMA offset and the remaining size of the
2297			 * fragment.  On the first iteration of this loop,
2298			 * max_data will be >= 12K and <= 16K-1.  On any
2299			 * subsequent iteration of this loop, max_data will
2300			 * always be 12K.
2301			 */
2302			dma += max_data;
2303			size -= max_data;
2304
2305			/* Reset max_data since remaining chunks will be 12K
2306			 * at most
2307			 */
2308			max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
2309
2310			/* buf_addr is in same location for both desc types */
2311			tx_desc->q.buf_addr = cpu_to_le64(dma);
2312		}
2313
2314		if (!data_len)
2315			break;
2316
2317		idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2318		tx_desc++;
2319		i++;
2320
2321		if (i == tx_q->desc_count) {
2322			tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
 
2323			i = 0;
2324			tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
 
 
 
2325		}
2326
2327		size = skb_frag_size(frag);
2328		data_len -= size;
2329
2330		dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
2331				       DMA_TO_DEVICE);
2332
2333		tx_buf = &tx_q->tx_buf[i];
2334	}
2335
2336	/* record SW timestamp if HW timestamp is not available */
2337	skb_tx_timestamp(skb);
2338
 
 
2339	/* write last descriptor with RS and EOP bits */
 
2340	td_cmd |= params->eop_cmd;
2341	idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
2342	i = idpf_tx_splitq_bump_ntu(tx_q, i);
2343
2344	/* set next_to_watch value indicating a packet is present */
2345	first->next_to_watch = tx_desc;
2346
2347	tx_q->txq_grp->num_completions_pending++;
2348
2349	/* record bytecount for BQL */
2350	nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
2351	netdev_tx_sent_queue(nq, first->bytecount);
2352
2353	idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
2354}
2355
2356/**
2357 * idpf_tso - computes mss and TSO length to prepare for TSO
2358 * @skb: pointer to skb
2359 * @off: pointer to struct that holds offload parameters
2360 *
2361 * Returns error (negative) if TSO was requested but cannot be applied to the
2362 * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
2363 */
2364int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
2365{
2366	const struct skb_shared_info *shinfo;
2367	union {
2368		struct iphdr *v4;
2369		struct ipv6hdr *v6;
2370		unsigned char *hdr;
2371	} ip;
2372	union {
2373		struct tcphdr *tcp;
2374		struct udphdr *udp;
2375		unsigned char *hdr;
2376	} l4;
2377	u32 paylen, l4_start;
2378	int err;
2379
2380	if (!skb_is_gso(skb))
2381		return 0;
2382
2383	err = skb_cow_head(skb, 0);
2384	if (err < 0)
2385		return err;
2386
2387	shinfo = skb_shinfo(skb);
2388
2389	ip.hdr = skb_network_header(skb);
2390	l4.hdr = skb_transport_header(skb);
2391
2392	/* initialize outer IP header fields */
2393	if (ip.v4->version == 4) {
2394		ip.v4->tot_len = 0;
2395		ip.v4->check = 0;
2396	} else if (ip.v6->version == 6) {
2397		ip.v6->payload_len = 0;
2398	}
2399
2400	l4_start = skb_transport_offset(skb);
2401
2402	/* remove payload length from checksum */
2403	paylen = skb->len - l4_start;
2404
2405	switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
2406	case SKB_GSO_TCPV4:
2407	case SKB_GSO_TCPV6:
2408		csum_replace_by_diff(&l4.tcp->check,
2409				     (__force __wsum)htonl(paylen));
2410		off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
2411		break;
2412	case SKB_GSO_UDP_L4:
2413		csum_replace_by_diff(&l4.udp->check,
2414				     (__force __wsum)htonl(paylen));
2415		/* compute length of segmentation header */
2416		off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
2417		l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
2418		break;
2419	default:
2420		return -EINVAL;
2421	}
2422
2423	off->tso_len = skb->len - off->tso_hdr_len;
2424	off->mss = shinfo->gso_size;
2425	off->tso_segs = shinfo->gso_segs;
2426
2427	off->tx_flags |= IDPF_TX_FLAGS_TSO;
2428
2429	return 1;
2430}
2431
2432/**
2433 * __idpf_chk_linearize - Check skb is not using too many buffers
2434 * @skb: send buffer
2435 * @max_bufs: maximum number of buffers
2436 *
2437 * For TSO we need to count the TSO header and segment payload separately.  As
2438 * such we need to check cases where we have max_bufs-1 fragments or more as we
2439 * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
2440 * for the segment payload in the first descriptor, and another max_buf-1 for
2441 * the fragments.
2442 */
2443static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
2444{
2445	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2446	const skb_frag_t *frag, *stale;
2447	int nr_frags, sum;
2448
2449	/* no need to check if number of frags is less than max_bufs - 1 */
2450	nr_frags = shinfo->nr_frags;
2451	if (nr_frags < (max_bufs - 1))
2452		return false;
2453
2454	/* We need to walk through the list and validate that each group
2455	 * of max_bufs-2 fragments totals at least gso_size.
2456	 */
2457	nr_frags -= max_bufs - 2;
2458	frag = &shinfo->frags[0];
2459
2460	/* Initialize size to the negative value of gso_size minus 1.  We use
2461	 * this as the worst case scenario in which the frag ahead of us only
2462	 * provides one byte which is why we are limited to max_bufs-2
2463	 * descriptors for a single transmit as the header and previous
2464	 * fragment are already consuming 2 descriptors.
2465	 */
2466	sum = 1 - shinfo->gso_size;
2467
2468	/* Add size of frags 0 through 4 to create our initial sum */
2469	sum += skb_frag_size(frag++);
2470	sum += skb_frag_size(frag++);
2471	sum += skb_frag_size(frag++);
2472	sum += skb_frag_size(frag++);
2473	sum += skb_frag_size(frag++);
2474
2475	/* Walk through fragments adding latest fragment, testing it, and
2476	 * then removing stale fragments from the sum.
2477	 */
2478	for (stale = &shinfo->frags[0];; stale++) {
2479		int stale_size = skb_frag_size(stale);
2480
2481		sum += skb_frag_size(frag++);
2482
2483		/* The stale fragment may present us with a smaller
2484		 * descriptor than the actual fragment size. To account
2485		 * for that we need to remove all the data on the front and
2486		 * figure out what the remainder would be in the last
2487		 * descriptor associated with the fragment.
2488		 */
2489		if (stale_size > IDPF_TX_MAX_DESC_DATA) {
2490			int align_pad = -(skb_frag_off(stale)) &
2491					(IDPF_TX_MAX_READ_REQ_SIZE - 1);
2492
2493			sum -= align_pad;
2494			stale_size -= align_pad;
2495
2496			do {
2497				sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2498				stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
2499			} while (stale_size > IDPF_TX_MAX_DESC_DATA);
2500		}
2501
2502		/* if sum is negative we failed to make sufficient progress */
2503		if (sum < 0)
2504			return true;
2505
2506		if (!nr_frags--)
2507			break;
2508
2509		sum -= stale_size;
2510	}
2511
2512	return false;
2513}
2514
2515/**
2516 * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
2517 * @skb: send buffer
2518 * @max_bufs: maximum scatter gather buffers for single packet
2519 * @count: number of buffers this packet needs
2520 *
2521 * Make sure we don't exceed maximum scatter gather buffers for a single
2522 * packet. We have to do some special checking around the boundary (max_bufs-1)
2523 * if TSO is on since we need count the TSO header and payload separately.
2524 * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
2525 * header, 1 for segment payload, and then 7 for the fragments.
2526 */
2527bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
2528			unsigned int count)
2529{
2530	if (likely(count < max_bufs))
2531		return false;
2532	if (skb_is_gso(skb))
2533		return __idpf_chk_linearize(skb, max_bufs);
2534
2535	return count > max_bufs;
2536}
2537
2538/**
2539 * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
2540 * @txq: queue to put context descriptor on
2541 *
2542 * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
2543 * ring entry to reflect that this index is a context descriptor
2544 */
2545static struct idpf_flex_tx_ctx_desc *
2546idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
2547{
2548	struct idpf_flex_tx_ctx_desc *desc;
2549	int i = txq->next_to_use;
2550
2551	memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
2552	txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
2553
2554	/* grab the next descriptor */
2555	desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
2556	txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
2557
2558	return desc;
2559}
2560
2561/**
2562 * idpf_tx_drop_skb - free the SKB and bump tail if necessary
2563 * @tx_q: queue to send buffer on
2564 * @skb: pointer to skb
2565 */
2566netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
2567{
2568	u64_stats_update_begin(&tx_q->stats_sync);
2569	u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
2570	u64_stats_update_end(&tx_q->stats_sync);
2571
2572	idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2573
2574	dev_kfree_skb(skb);
2575
2576	return NETDEV_TX_OK;
2577}
2578
2579/**
2580 * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
2581 * @skb: send buffer
2582 * @tx_q: queue to send buffer on
2583 *
2584 * Returns NETDEV_TX_OK if sent, else an error code
2585 */
2586static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
2587					struct idpf_queue *tx_q)
2588{
2589	struct idpf_tx_splitq_params tx_params = { };
2590	struct idpf_tx_buf *first;
2591	unsigned int count;
2592	int tso;
2593
2594	count = idpf_tx_desc_count_required(tx_q, skb);
2595	if (unlikely(!count))
2596		return idpf_tx_drop_skb(tx_q, skb);
2597
2598	tso = idpf_tso(skb, &tx_params.offload);
2599	if (unlikely(tso < 0))
2600		return idpf_tx_drop_skb(tx_q, skb);
2601
2602	/* Check for splitq specific TX resources */
2603	count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
2604	if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
2605		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2606
2607		return NETDEV_TX_BUSY;
2608	}
2609
2610	if (tso) {
2611		/* If tso is needed, set up context desc */
2612		struct idpf_flex_tx_ctx_desc *ctx_desc =
2613			idpf_tx_splitq_get_ctx_desc(tx_q);
2614
2615		ctx_desc->tso.qw1.cmd_dtype =
2616				cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
2617					    IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
2618		ctx_desc->tso.qw0.flex_tlen =
2619				cpu_to_le32(tx_params.offload.tso_len &
2620					    IDPF_TXD_FLEX_CTX_TLEN_M);
2621		ctx_desc->tso.qw0.mss_rt =
2622				cpu_to_le16(tx_params.offload.mss &
2623					    IDPF_TXD_FLEX_CTX_MSS_RT_M);
2624		ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
2625
2626		u64_stats_update_begin(&tx_q->stats_sync);
2627		u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
2628		u64_stats_update_end(&tx_q->stats_sync);
2629	}
2630
2631	/* record the location of the first descriptor for this packet */
2632	first = &tx_q->tx_buf[tx_q->next_to_use];
2633	first->skb = skb;
2634
2635	if (tso) {
2636		first->gso_segs = tx_params.offload.tso_segs;
2637		first->bytecount = skb->len +
2638			((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
2639	} else {
2640		first->gso_segs = 1;
2641		first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2642	}
2643
2644	if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
2645		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
2646		tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
2647		/* Set the RE bit to catch any packets that may have not been
2648		 * stashed during RS completion cleaning. MIN_GAP is set to
2649		 * MIN_RING size to ensure it will be set at least once each
2650		 * time around the ring.
2651		 */
2652		if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
2653			tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
2654			tx_q->txq_grp->num_completions_pending++;
2655		}
2656
2657		if (skb->ip_summed == CHECKSUM_PARTIAL)
2658			tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
2659
2660	} else {
2661		tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
2662		tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
2663
2664		if (skb->ip_summed == CHECKSUM_PARTIAL)
2665			tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
2666	}
2667
2668	idpf_tx_splitq_map(tx_q, &tx_params, first);
2669
2670	return NETDEV_TX_OK;
2671}
2672
2673/**
2674 * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
2675 * @skb: send buffer
2676 * @netdev: network interface device structure
2677 *
2678 * Returns NETDEV_TX_OK if sent, else an error code
2679 */
2680netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
2681				 struct net_device *netdev)
2682{
2683	struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
2684	struct idpf_queue *tx_q;
2685
2686	if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
2687		dev_kfree_skb_any(skb);
2688
2689		return NETDEV_TX_OK;
2690	}
2691
2692	tx_q = vport->txqs[skb_get_queue_mapping(skb)];
2693
2694	/* hardware can't handle really short frames, hardware padding works
2695	 * beyond this point
2696	 */
2697	if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
2698		idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
2699
2700		return NETDEV_TX_OK;
2701	}
2702
2703	return idpf_tx_splitq_frame(skb, tx_q);
2704}
2705
2706/**
2707 * idpf_ptype_to_htype - get a hash type
2708 * @decoded: Decoded Rx packet type related fields
2709 *
2710 * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
2711 * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
2712 * Rx desc.
2713 */
2714enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
2715{
2716	if (!decoded->known)
2717		return PKT_HASH_TYPE_NONE;
2718	if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
2719	    decoded->inner_prot)
2720		return PKT_HASH_TYPE_L4;
2721	if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
2722	    decoded->outer_ip)
2723		return PKT_HASH_TYPE_L3;
2724	if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
2725		return PKT_HASH_TYPE_L2;
2726
2727	return PKT_HASH_TYPE_NONE;
2728}
2729
2730/**
2731 * idpf_rx_hash - set the hash value in the skb
2732 * @rxq: Rx descriptor ring packet is being transacted on
2733 * @skb: pointer to current skb being populated
2734 * @rx_desc: Receive descriptor
2735 * @decoded: Decoded Rx packet type related fields
2736 */
2737static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
2738			 struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2739			 struct idpf_rx_ptype_decoded *decoded)
 
2740{
2741	u32 hash;
2742
2743	if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
2744		return;
2745
2746	hash = le16_to_cpu(rx_desc->hash1) |
2747	       (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
2748	       (rx_desc->hash3 << 24);
2749
2750	skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
2751}
2752
2753/**
2754 * idpf_rx_csum - Indicate in skb if checksum is good
2755 * @rxq: Rx descriptor ring packet is being transacted on
2756 * @skb: pointer to current skb being populated
2757 * @csum_bits: checksum fields extracted from the descriptor
2758 * @decoded: Decoded Rx packet type related fields
2759 *
2760 * skb->protocol must be set before this function is called
2761 */
2762static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
2763			 struct idpf_rx_csum_decoded *csum_bits,
2764			 struct idpf_rx_ptype_decoded *decoded)
2765{
2766	bool ipv4, ipv6;
2767
2768	/* check if Rx checksum is enabled */
2769	if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
2770		return;
2771
2772	/* check if HW has decoded the packet and checksum */
2773	if (!(csum_bits->l3l4p))
2774		return;
2775
2776	ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
2777	ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
2778
2779	if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
2780		goto checksum_fail;
2781
2782	if (ipv6 && csum_bits->ipv6exadd)
2783		return;
2784
2785	/* check for L4 errors and handle packets that were not able to be
2786	 * checksummed
2787	 */
2788	if (csum_bits->l4e)
2789		goto checksum_fail;
2790
2791	/* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
2792	switch (decoded->inner_prot) {
2793	case IDPF_RX_PTYPE_INNER_PROT_ICMP:
2794	case IDPF_RX_PTYPE_INNER_PROT_TCP:
2795	case IDPF_RX_PTYPE_INNER_PROT_UDP:
2796		if (!csum_bits->raw_csum_inv) {
2797			u16 csum = csum_bits->raw_csum;
2798
2799			skb->csum = csum_unfold((__force __sum16)~swab16(csum));
2800			skb->ip_summed = CHECKSUM_COMPLETE;
2801		} else {
2802			skb->ip_summed = CHECKSUM_UNNECESSARY;
2803		}
2804		break;
2805	case IDPF_RX_PTYPE_INNER_PROT_SCTP:
2806		skb->ip_summed = CHECKSUM_UNNECESSARY;
2807		break;
2808	default:
2809		break;
2810	}
2811
 
 
 
2812	return;
2813
2814checksum_fail:
2815	u64_stats_update_begin(&rxq->stats_sync);
2816	u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
2817	u64_stats_update_end(&rxq->stats_sync);
2818}
2819
2820/**
2821 * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
2822 * @rx_desc: receive descriptor
2823 * @csum: structure to extract checksum fields
2824 *
 
2825 **/
2826static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2827					     struct idpf_rx_csum_decoded *csum)
2828{
 
2829	u8 qword0, qword1;
2830
2831	qword0 = rx_desc->status_err0_qw0;
2832	qword1 = rx_desc->status_err0_qw1;
2833
2834	csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
 
 
2835			      qword1);
2836	csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
 
 
2837			       qword1);
2838	csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
2839			      qword1);
2840	csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
2841				qword1);
2842	csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
2843				    qword0);
2844	csum->raw_csum_inv =
2845		le16_get_bits(rx_desc->ptype_err_fflags0,
2846			      VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
2847	csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
 
 
2848}
2849
2850/**
2851 * idpf_rx_rsc - Set the RSC fields in the skb
2852 * @rxq : Rx descriptor ring packet is being transacted on
2853 * @skb : pointer to current skb being populated
2854 * @rx_desc: Receive descriptor
2855 * @decoded: Decoded Rx packet type related fields
2856 *
2857 * Return 0 on success and error code on failure
2858 *
2859 * Populate the skb fields with the total number of RSC segments, RSC payload
2860 * length and packet type.
2861 */
2862static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
2863		       struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
2864		       struct idpf_rx_ptype_decoded *decoded)
2865{
2866	u16 rsc_segments, rsc_seg_len;
2867	bool ipv4, ipv6;
2868	int len;
2869
2870	if (unlikely(!decoded->outer_ip))
 
2871		return -EINVAL;
2872
2873	rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
2874	if (unlikely(!rsc_seg_len))
2875		return -EINVAL;
2876
2877	ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
2878	ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
2879
2880	if (unlikely(!(ipv4 ^ ipv6)))
2881		return -EINVAL;
2882
2883	rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
2884	if (unlikely(rsc_segments == 1))
2885		return 0;
2886
2887	NAPI_GRO_CB(skb)->count = rsc_segments;
2888	skb_shinfo(skb)->gso_size = rsc_seg_len;
2889
2890	skb_reset_network_header(skb);
2891	len = skb->len - skb_transport_offset(skb);
2892
2893	if (ipv4) {
2894		struct iphdr *ipv4h = ip_hdr(skb);
2895
2896		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2897
2898		/* Reset and set transport header offset in skb */
2899		skb_set_transport_header(skb, sizeof(struct iphdr));
 
2900
2901		/* Compute the TCP pseudo header checksum*/
2902		tcp_hdr(skb)->check =
2903			~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
2904	} else {
2905		struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2906
2907		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2908		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
 
2909		tcp_hdr(skb)->check =
2910			~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
2911	}
2912
2913	tcp_gro_complete(skb);
2914
2915	u64_stats_update_begin(&rxq->stats_sync);
2916	u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
2917	u64_stats_update_end(&rxq->stats_sync);
2918
2919	return 0;
2920}
2921
2922/**
2923 * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
2924 * @rxq: Rx descriptor ring packet is being transacted on
2925 * @skb: pointer to current skb being populated
2926 * @rx_desc: Receive descriptor
2927 *
2928 * This function checks the ring, descriptor, and packet information in
2929 * order to populate the hash, checksum, protocol, and
2930 * other fields within the skb.
2931 */
2932static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
2933				      struct sk_buff *skb,
2934				      struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
2935{
2936	struct idpf_rx_csum_decoded csum_bits = { };
2937	struct idpf_rx_ptype_decoded decoded;
2938	u16 rx_ptype;
2939
2940	rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
2941				 VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
2942
2943	decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
2944	/* If we don't know the ptype we can't do anything else with it. Just
2945	 * pass it up the stack as-is.
2946	 */
2947	if (!decoded.known)
2948		return 0;
2949
2950	/* process RSS/hash */
2951	idpf_rx_hash(rxq, skb, rx_desc, &decoded);
2952
2953	skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
 
2954
2955	if (le16_get_bits(rx_desc->hdrlen_flags,
2956			  VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
2957		return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
2958
2959	idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
2960	idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
2961
2962	return 0;
2963}
2964
2965/**
2966 * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
2967 * @rx_buf: buffer containing page to add
2968 * @skb: sk_buff to place the data into
2969 * @size: packet length from rx_desc
2970 *
2971 * This function will add the data contained in rx_buf->page to the skb.
2972 * It will just attach the page as a frag to the skb.
2973 * The function will then update the page offset.
2974 */
2975void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
2976		      unsigned int size)
2977{
 
 
2978	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
2979			rx_buf->page_offset, size, rx_buf->truesize);
2980
2981	rx_buf->page = NULL;
2982}
2983
2984/**
2985 * idpf_rx_construct_skb - Allocate skb and populate it
2986 * @rxq: Rx descriptor queue
2987 * @rx_buf: Rx buffer to pull data from
2988 * @size: the length of the packet
 
 
 
 
 
 
 
 
2989 *
2990 * This function allocates an skb. It then populates it with the page
2991 * data from the current receive descriptor, taking care to set up the
2992 * skb correctly.
2993 */
2994struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
2995				      struct idpf_rx_buf *rx_buf,
2996				      unsigned int size)
2997{
2998	unsigned int headlen;
2999	struct sk_buff *skb;
3000	void *va;
3001
3002	va = page_address(rx_buf->page) + rx_buf->page_offset;
3003
3004	/* prefetch first cache line of first page */
3005	net_prefetch(va);
3006	/* allocate a skb to store the frags */
3007	skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
3008			       GFP_ATOMIC);
3009	if (unlikely(!skb)) {
3010		idpf_rx_put_page(rx_buf);
3011
3012		return NULL;
3013	}
3014
3015	skb_record_rx_queue(skb, rxq->idx);
3016	skb_mark_for_recycle(skb);
3017
3018	/* Determine available headroom for copy */
3019	headlen = size;
3020	if (headlen > IDPF_RX_HDR_SIZE)
3021		headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
3022
3023	/* align pull length to size of long to optimize memcpy performance */
3024	memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
3025
3026	/* if we exhaust the linear part then add what is left as a frag */
3027	size -= headlen;
3028	if (!size) {
3029		idpf_rx_put_page(rx_buf);
3030
3031		return skb;
3032	}
3033
3034	skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
3035			size, rx_buf->truesize);
3036
3037	/* Since we're giving the page to the stack, clear our reference to it.
3038	 * We'll get a new one during buffer posting.
3039	 */
3040	rx_buf->page = NULL;
3041
3042	return skb;
3043}
3044
3045/**
3046 * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
3047 * @rxq: Rx descriptor queue
3048 * @va: Rx buffer to pull data from
3049 * @size: the length of the packet
3050 *
3051 * This function allocates an skb. It then populates it with the page data from
3052 * the current receive descriptor, taking care to set up the skb correctly.
3053 * This specifically uses a header buffer to start building the skb.
3054 */
3055static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
3056						 const void *va,
3057						 unsigned int size)
3058{
 
3059	struct sk_buff *skb;
 
3060
3061	/* allocate a skb to store the frags */
3062	skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
 
 
3063	if (unlikely(!skb))
3064		return NULL;
3065
3066	skb_record_rx_queue(skb, rxq->idx);
3067
3068	memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
3069
3070	/* More than likely, a payload fragment, which will use a page from
3071	 * page_pool will be added to the SKB so mark it for recycle
3072	 * preemptively. And if not, it's inconsequential.
3073	 */
3074	skb_mark_for_recycle(skb);
3075
3076	return skb;
3077}
3078
3079/**
3080 * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
3081 * status and error fields
3082 * @stat_err_field: field from descriptor to test bits in
3083 * @stat_err_bits: value to mask
3084 *
3085 */
3086static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
3087					const u8 stat_err_bits)
3088{
3089	return !!(stat_err_field & stat_err_bits);
3090}
3091
3092/**
3093 * idpf_rx_splitq_is_eop - process handling of EOP buffers
3094 * @rx_desc: Rx descriptor for current buffer
3095 *
3096 * If the buffer is an EOP buffer, this function exits returning true,
3097 * otherwise return false indicating that this is in fact a non-EOP buffer.
3098 */
3099static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
3100{
3101	/* if we are the last buffer then there is nothing else to do */
3102	return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
3103						  IDPF_RXD_EOF_SPLITQ));
3104}
3105
3106/**
3107 * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
3108 * @rxq: Rx descriptor queue to retrieve receive buffer queue
3109 * @budget: Total limit on number of packets to process
3110 *
3111 * This function provides a "bounce buffer" approach to Rx interrupt
3112 * processing. The advantage to this is that on systems that have
3113 * expensive overhead for IOMMU access this provides a means of avoiding
3114 * it by maintaining the mapping of the page to the system.
3115 *
3116 * Returns amount of work completed
3117 */
3118static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
3119{
3120	int total_rx_bytes = 0, total_rx_pkts = 0;
3121	struct idpf_queue *rx_bufq = NULL;
3122	struct sk_buff *skb = rxq->skb;
3123	u16 ntc = rxq->next_to_clean;
3124
3125	/* Process Rx packets bounded by budget */
3126	while (likely(total_rx_pkts < budget)) {
3127		struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
 
3128		struct idpf_sw_queue *refillq = NULL;
3129		struct idpf_rxq_set *rxq_set = NULL;
3130		struct idpf_rx_buf *rx_buf = NULL;
3131		union virtchnl2_rx_desc *desc;
3132		unsigned int pkt_len = 0;
3133		unsigned int hdr_len = 0;
3134		u16 gen_id, buf_id = 0;
3135		 /* Header buffer overflow only valid for header split */
3136		bool hbo = false;
3137		int bufq_id;
3138		u8 rxdid;
3139
3140		/* get the Rx desc from Rx queue based on 'next_to_clean' */
3141		desc = IDPF_RX_DESC(rxq, ntc);
3142		rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
3143
3144		/* This memory barrier is needed to keep us from reading
3145		 * any other fields out of the rx_desc
3146		 */
3147		dma_rmb();
3148
3149		/* if the descriptor isn't done, no work yet to do */
3150		gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3151				       VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
3152
3153		if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
3154			break;
3155
3156		rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
3157				  rx_desc->rxdid_ucast);
3158		if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
3159			IDPF_RX_BUMP_NTC(rxq, ntc);
3160			u64_stats_update_begin(&rxq->stats_sync);
3161			u64_stats_inc(&rxq->q_stats.rx.bad_descs);
3162			u64_stats_update_end(&rxq->stats_sync);
3163			continue;
3164		}
3165
3166		pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3167					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
3168
3169		hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
3170				rx_desc->status_err0_qw1);
3171
3172		if (unlikely(hbo)) {
3173			/* If a header buffer overflow, occurs, i.e. header is
3174			 * too large to fit in the header split buffer, HW will
3175			 * put the entire packet, including headers, in the
3176			 * data/payload buffer.
3177			 */
3178			u64_stats_update_begin(&rxq->stats_sync);
3179			u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
3180			u64_stats_update_end(&rxq->stats_sync);
3181			goto bypass_hsplit;
3182		}
3183
3184		hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
3185					VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
3186
3187bypass_hsplit:
3188		bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
3189					VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
3190
3191		rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
3192		if (!bufq_id)
3193			refillq = rxq_set->refillq0;
3194		else
3195			refillq = rxq_set->refillq1;
3196
3197		/* retrieve buffer from the rxq */
3198		rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
3199
3200		buf_id = le16_to_cpu(rx_desc->buf_id);
3201
3202		rx_buf = &rx_bufq->rx_buf.buf[buf_id];
 
 
 
3203
3204		if (hdr_len) {
3205			const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
3206						(u32)buf_id * IDPF_HDR_BUF_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3207
3208			skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
3209			u64_stats_update_begin(&rxq->stats_sync);
3210			u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
3211			u64_stats_update_end(&rxq->stats_sync);
3212		}
3213
3214		if (pkt_len) {
3215			idpf_rx_sync_for_cpu(rx_buf, pkt_len);
3216			if (skb)
3217				idpf_rx_add_frag(rx_buf, skb, pkt_len);
3218			else
3219				skb = idpf_rx_construct_skb(rxq, rx_buf,
3220							    pkt_len);
3221		} else {
3222			idpf_rx_put_page(rx_buf);
3223		}
3224
 
 
 
 
 
 
 
 
 
 
 
3225		/* exit if we failed to retrieve a buffer */
3226		if (!skb)
3227			break;
3228
 
 
 
3229		idpf_rx_post_buf_refill(refillq, buf_id);
 
3230
3231		IDPF_RX_BUMP_NTC(rxq, ntc);
3232		/* skip if it is non EOP desc */
3233		if (!idpf_rx_splitq_is_eop(rx_desc))
3234			continue;
3235
3236		/* pad skb if needed (to make valid ethernet frame) */
3237		if (eth_skb_pad(skb)) {
3238			skb = NULL;
3239			continue;
3240		}
3241
3242		/* probably a little skewed due to removing CRC */
3243		total_rx_bytes += skb->len;
3244
3245		/* protocol */
3246		if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
3247			dev_kfree_skb_any(skb);
3248			skb = NULL;
3249			continue;
3250		}
3251
3252		/* send completed skb up the stack */
3253		napi_gro_receive(&rxq->q_vector->napi, skb);
3254		skb = NULL;
3255
3256		/* update budget accounting */
3257		total_rx_pkts++;
3258	}
3259
3260	rxq->next_to_clean = ntc;
3261
3262	rxq->skb = skb;
3263	u64_stats_update_begin(&rxq->stats_sync);
3264	u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
3265	u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
3266	u64_stats_update_end(&rxq->stats_sync);
3267
3268	/* guarantee a trip back through this routine if there was a failure */
3269	return total_rx_pkts;
3270}
3271
3272/**
3273 * idpf_rx_update_bufq_desc - Update buffer queue descriptor
3274 * @bufq: Pointer to the buffer queue
3275 * @refill_desc: SW Refill queue descriptor containing buffer ID
3276 * @buf_desc: Buffer queue descriptor
3277 *
3278 * Return 0 on success and negative on failure.
3279 */
3280static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
3281				    struct virtchnl2_splitq_rx_buf_desc *buf_desc)
3282{
3283	struct idpf_rx_buf *buf;
 
 
 
 
 
3284	dma_addr_t addr;
3285	u16 buf_id;
3286
3287	buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
3288
3289	buf = &bufq->rx_buf.buf[buf_id];
3290
3291	addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
3292	if (unlikely(addr == DMA_MAPPING_ERROR))
3293		return -ENOMEM;
3294
3295	buf_desc->pkt_addr = cpu_to_le64(addr);
3296	buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
3297
3298	if (!bufq->rx_hsplit_en)
3299		return 0;
3300
3301	buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
3302					 (u32)buf_id * IDPF_HDR_BUF_SIZE);
 
 
 
 
 
 
 
3303
3304	return 0;
3305}
3306
3307/**
3308 * idpf_rx_clean_refillq - Clean refill queue buffers
3309 * @bufq: buffer queue to post buffers back to
3310 * @refillq: refill queue to clean
3311 *
3312 * This function takes care of the buffer refill management
3313 */
3314static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
3315				  struct idpf_sw_queue *refillq)
3316{
3317	struct virtchnl2_splitq_rx_buf_desc *buf_desc;
3318	u16 bufq_nta = bufq->next_to_alloc;
3319	u16 ntc = refillq->next_to_clean;
3320	int cleaned = 0;
3321	u16 gen;
3322
3323	buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
3324
3325	/* make sure we stop at ring wrap in the unlikely case ring is full */
3326	while (likely(cleaned < refillq->desc_count)) {
3327		u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
3328		bool failure;
3329
3330		gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
3331		if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
3332			break;
3333
3334		failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
3335						   buf_desc);
3336		if (failure)
3337			break;
3338
3339		if (unlikely(++ntc == refillq->desc_count)) {
3340			change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
3341			ntc = 0;
3342		}
3343
3344		if (unlikely(++bufq_nta == bufq->desc_count)) {
3345			buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
3346			bufq_nta = 0;
3347		} else {
3348			buf_desc++;
3349		}
3350
3351		cleaned++;
3352	}
3353
3354	if (!cleaned)
3355		return;
3356
3357	/* We want to limit how many transactions on the bus we trigger with
3358	 * tail writes so we only do it in strides. It's also important we
3359	 * align the write to a multiple of 8 as required by HW.
3360	 */
3361	if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
3362	    bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
3363		idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
3364						       IDPF_RX_BUF_POST_STRIDE));
3365
3366	/* update next to alloc since we have filled the ring */
3367	refillq->next_to_clean = ntc;
3368	bufq->next_to_alloc = bufq_nta;
3369}
3370
3371/**
3372 * idpf_rx_clean_refillq_all - Clean all refill queues
3373 * @bufq: buffer queue with refill queues
 
3374 *
3375 * Iterates through all refill queues assigned to the buffer queue assigned to
3376 * this vector.  Returns true if clean is complete within budget, false
3377 * otherwise.
3378 */
3379static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
3380{
3381	struct idpf_bufq_set *bufq_set;
3382	int i;
3383
 
 
 
 
3384	bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
3385	for (i = 0; i < bufq_set->num_refillqs; i++)
3386		idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
3387}
3388
3389/**
3390 * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
3391 * @irq: interrupt number
3392 * @data: pointer to a q_vector
3393 *
3394 */
3395static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
3396						void *data)
3397{
3398	struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
3399
3400	q_vector->total_events++;
3401	napi_schedule(&q_vector->napi);
3402
3403	return IRQ_HANDLED;
3404}
3405
3406/**
3407 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3408 * @vport: virtual port structure
3409 *
3410 */
3411static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
3412{
3413	u16 v_idx;
3414
3415	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3416		netif_napi_del(&vport->q_vectors[v_idx].napi);
3417}
3418
3419/**
3420 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3421 * @vport: main vport structure
3422 */
3423static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
3424{
3425	int v_idx;
3426
3427	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
3428		napi_disable(&vport->q_vectors[v_idx].napi);
3429}
3430
3431/**
3432 * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
3433 * @vport: virtual port
3434 *
3435 * Free the memory allocated for interrupt vectors  associated to a vport
3436 */
3437void idpf_vport_intr_rel(struct idpf_vport *vport)
3438{
3439	int i, j, v_idx;
3440
3441	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
3442		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
3443
 
 
3444		kfree(q_vector->bufq);
3445		q_vector->bufq = NULL;
3446		kfree(q_vector->tx);
3447		q_vector->tx = NULL;
3448		kfree(q_vector->rx);
3449		q_vector->rx = NULL;
3450	}
3451
3452	/* Clean up the mapping of queues to vectors */
3453	for (i = 0; i < vport->num_rxq_grp; i++) {
3454		struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
3455
3456		if (idpf_is_queue_model_split(vport->rxq_model))
3457			for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
3458				rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
3459		else
3460			for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
3461				rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
3462	}
3463
3464	if (idpf_is_queue_model_split(vport->txq_model))
3465		for (i = 0; i < vport->num_txq_grp; i++)
3466			vport->txq_grps[i].complq->q_vector = NULL;
3467	else
3468		for (i = 0; i < vport->num_txq_grp; i++)
3469			for (j = 0; j < vport->txq_grps[i].num_txq; j++)
3470				vport->txq_grps[i].txqs[j]->q_vector = NULL;
3471
3472	kfree(vport->q_vectors);
3473	vport->q_vectors = NULL;
3474}
3475
3476/**
3477 * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
3478 * @vport: main vport structure
3479 */
3480static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
3481{
3482	struct idpf_adapter *adapter = vport->adapter;
3483	int vector;
3484
3485	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3486		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
3487		int irq_num, vidx;
3488
3489		/* free only the irqs that were actually requested */
3490		if (!q_vector)
3491			continue;
3492
3493		vidx = vport->q_vector_idxs[vector];
3494		irq_num = adapter->msix_entries[vidx].vector;
3495
3496		/* clear the affinity_mask in the IRQ descriptor */
3497		irq_set_affinity_hint(irq_num, NULL);
3498		free_irq(irq_num, q_vector);
3499	}
3500}
3501
3502/**
3503 * idpf_vport_intr_dis_irq_all - Disable all interrupt
3504 * @vport: main vport structure
3505 */
3506static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
3507{
3508	struct idpf_q_vector *q_vector = vport->q_vectors;
3509	int q_idx;
3510
3511	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
3512		writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
3513}
3514
3515/**
3516 * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
3517 * @q_vector: pointer to q_vector
3518 * @type: itr index
3519 * @itr: itr value
3520 */
3521static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
3522					const int type, u16 itr)
3523{
3524	u32 itr_val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3525
3526	itr &= IDPF_ITR_MASK;
3527	/* Don't clear PBA because that can cause lost interrupts that
3528	 * came in while we were cleaning/polling
3529	 */
3530	itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
3531		  (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
3532		  (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
3533
3534	return itr_val;
3535}
3536
3537/**
3538 * idpf_update_dim_sample - Update dim sample with packets and bytes
3539 * @q_vector: the vector associated with the interrupt
3540 * @dim_sample: dim sample to update
3541 * @dim: dim instance structure
3542 * @packets: total packets
3543 * @bytes: total bytes
3544 *
3545 * Update the dim sample with the packets and bytes which are passed to this
3546 * function. Set the dim state appropriately if the dim settings gets stale.
3547 */
3548static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
3549				   struct dim_sample *dim_sample,
3550				   struct dim *dim, u64 packets, u64 bytes)
3551{
3552	dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
3553	dim_sample->comp_ctr = 0;
3554
3555	/* if dim settings get stale, like when not updated for 1 second or
3556	 * longer, force it to start again. This addresses the frequent case
3557	 * of an idle queue being switched to by the scheduler.
3558	 */
3559	if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
3560		dim->state = DIM_START_MEASURE;
3561}
3562
3563/**
3564 * idpf_net_dim - Update net DIM algorithm
3565 * @q_vector: the vector associated with the interrupt
3566 *
3567 * Create a DIM sample and notify net_dim() so that it can possibly decide
3568 * a new ITR value based on incoming packets, bytes, and interrupts.
3569 *
3570 * This function is a no-op if the queue is not configured to dynamic ITR.
3571 */
3572static void idpf_net_dim(struct idpf_q_vector *q_vector)
3573{
3574	struct dim_sample dim_sample = { };
3575	u64 packets, bytes;
3576	u32 i;
3577
3578	if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
3579		goto check_rx_itr;
3580
3581	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
3582		struct idpf_queue *txq = q_vector->tx[i];
3583		unsigned int start;
3584
3585		do {
3586			start = u64_stats_fetch_begin(&txq->stats_sync);
3587			packets += u64_stats_read(&txq->q_stats.tx.packets);
3588			bytes += u64_stats_read(&txq->q_stats.tx.bytes);
3589		} while (u64_stats_fetch_retry(&txq->stats_sync, start));
3590	}
3591
3592	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
3593			       packets, bytes);
3594	net_dim(&q_vector->tx_dim, dim_sample);
3595
3596check_rx_itr:
3597	if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
3598		return;
3599
3600	for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
3601		struct idpf_queue *rxq = q_vector->rx[i];
3602		unsigned int start;
3603
3604		do {
3605			start = u64_stats_fetch_begin(&rxq->stats_sync);
3606			packets += u64_stats_read(&rxq->q_stats.rx.packets);
3607			bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
3608		} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
3609	}
3610
3611	idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
3612			       packets, bytes);
3613	net_dim(&q_vector->rx_dim, dim_sample);
3614}
3615
3616/**
3617 * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
3618 * @q_vector: q_vector for which itr is being updated and interrupt enabled
3619 *
3620 * Update the net_dim() algorithm and re-enable the interrupt associated with
3621 * this vector.
3622 */
3623void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
3624{
3625	u32 intval;
3626
3627	/* net_dim() updates ITR out-of-band using a work item */
3628	idpf_net_dim(q_vector);
3629
3630	intval = idpf_vport_intr_buildreg_itr(q_vector,
3631					      IDPF_NO_ITR_UPDATE_IDX, 0);
3632
3633	writel(intval, q_vector->intr_reg.dyn_ctl);
3634}
3635
3636/**
3637 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3638 * @vport: main vport structure
3639 * @basename: name for the vector
3640 */
3641static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
3642{
3643	struct idpf_adapter *adapter = vport->adapter;
 
3644	int vector, err, irq_num, vidx;
3645	const char *vec_name;
 
 
3646
3647	for (vector = 0; vector < vport->num_q_vectors; vector++) {
3648		struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
 
3649
3650		vidx = vport->q_vector_idxs[vector];
3651		irq_num = adapter->msix_entries[vidx].vector;
3652
3653		if (q_vector->num_rxq && q_vector->num_txq)
3654			vec_name = "TxRx";
3655		else if (q_vector->num_rxq)
3656			vec_name = "Rx";
3657		else if (q_vector->num_txq)
3658			vec_name = "Tx";
3659		else
3660			continue;
3661
3662		q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
3663					   basename, vec_name, vidx);
3664
3665		err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
3666				  q_vector->name, q_vector);
3667		if (err) {
3668			netdev_err(vport->netdev,
3669				   "Request_irq failed, error: %d\n", err);
3670			goto free_q_irqs;
3671		}
3672		/* assign the mask for this irq */
3673		irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
3674	}
3675
3676	return 0;
3677
3678free_q_irqs:
3679	while (--vector >= 0) {
3680		vidx = vport->q_vector_idxs[vector];
3681		irq_num = adapter->msix_entries[vidx].vector;
3682		free_irq(irq_num, &vport->q_vectors[vector]);
3683	}
3684
3685	return err;
3686}
3687
3688/**
3689 * idpf_vport_intr_write_itr - Write ITR value to the ITR register
3690 * @q_vector: q_vector structure
3691 * @itr: Interrupt throttling rate
3692 * @tx: Tx or Rx ITR
3693 */
3694void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
3695{
3696	struct idpf_intr_reg *intr_reg;
3697
3698	if (tx && !q_vector->tx)
3699		return;
3700	else if (!tx && !q_vector->rx)
3701		return;
3702
3703	intr_reg = &q_vector->intr_reg;
3704	writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
3705	       tx ? intr_reg->tx_itr : intr_reg->rx_itr);
3706}
3707
3708/**
3709 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3710 * @vport: main vport structure
3711 */
3712static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
3713{
3714	bool dynamic;
3715	int q_idx;
3716	u16 itr;
3717
3718	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3719		struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
3720
3721		/* Set the initial ITR values */
3722		if (qv->num_txq) {
3723			dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
3724			itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
3725			idpf_vport_intr_write_itr(qv, dynamic ?
3726						  itr : qv->tx_itr_value,
3727						  true);
3728		}
3729
3730		if (qv->num_rxq) {
3731			dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
3732			itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
3733			idpf_vport_intr_write_itr(qv, dynamic ?
3734						  itr : qv->rx_itr_value,
3735						  false);
3736		}
3737
3738		if (qv->num_txq || qv->num_rxq)
3739			idpf_vport_intr_update_itr_ena_irq(qv);
3740	}
3741}
3742
3743/**
3744 * idpf_vport_intr_deinit - Release all vector associations for the vport
3745 * @vport: main vport structure
3746 */
3747void idpf_vport_intr_deinit(struct idpf_vport *vport)
3748{
 
3749	idpf_vport_intr_napi_dis_all(vport);
3750	idpf_vport_intr_napi_del_all(vport);
3751	idpf_vport_intr_dis_irq_all(vport);
3752	idpf_vport_intr_rel_irq(vport);
3753}
3754
3755/**
3756 * idpf_tx_dim_work - Call back from the stack
3757 * @work: work queue structure
3758 */
3759static void idpf_tx_dim_work(struct work_struct *work)
3760{
3761	struct idpf_q_vector *q_vector;
3762	struct idpf_vport *vport;
3763	struct dim *dim;
3764	u16 itr;
3765
3766	dim = container_of(work, struct dim, work);
3767	q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
3768	vport = q_vector->vport;
3769
3770	if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
3771		dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
3772
3773	/* look up the values in our local table */
3774	itr = vport->tx_itr_profile[dim->profile_ix];
3775
3776	idpf_vport_intr_write_itr(q_vector, itr, true);
3777
3778	dim->state = DIM_START_MEASURE;
3779}
3780
3781/**
3782 * idpf_rx_dim_work - Call back from the stack
3783 * @work: work queue structure
3784 */
3785static void idpf_rx_dim_work(struct work_struct *work)
3786{
3787	struct idpf_q_vector *q_vector;
3788	struct idpf_vport *vport;
3789	struct dim *dim;
3790	u16 itr;
3791
3792	dim = container_of(work, struct dim, work);
3793	q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
3794	vport = q_vector->vport;
3795
3796	if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
3797		dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
3798
3799	/* look up the values in our local table */
3800	itr = vport->rx_itr_profile[dim->profile_ix];
3801
3802	idpf_vport_intr_write_itr(q_vector, itr, false);
3803
3804	dim->state = DIM_START_MEASURE;
3805}
3806
3807/**
3808 * idpf_init_dim - Set up dynamic interrupt moderation
3809 * @qv: q_vector structure
3810 */
3811static void idpf_init_dim(struct idpf_q_vector *qv)
3812{
3813	INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
3814	qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3815	qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3816
3817	INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
3818	qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
3819	qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
3820}
3821
3822/**
3823 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3824 * @vport: main vport structure
3825 */
3826static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
3827{
3828	int q_idx;
3829
3830	for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
3831		struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
3832
3833		idpf_init_dim(q_vector);
3834		napi_enable(&q_vector->napi);
3835	}
3836}
3837
3838/**
3839 * idpf_tx_splitq_clean_all- Clean completion queues
3840 * @q_vec: queue vector
3841 * @budget: Used to determine if we are in netpoll
3842 * @cleaned: returns number of packets cleaned
3843 *
3844 * Returns false if clean is not complete else returns true
3845 */
3846static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
3847				     int budget, int *cleaned)
3848{
3849	u16 num_txq = q_vec->num_txq;
3850	bool clean_complete = true;
3851	int i, budget_per_q;
3852
3853	if (unlikely(!num_txq))
3854		return true;
3855
3856	budget_per_q = DIV_ROUND_UP(budget, num_txq);
3857	for (i = 0; i < num_txq; i++)
3858		clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
 
3859						       budget_per_q, cleaned);
3860
3861	return clean_complete;
3862}
3863
3864/**
3865 * idpf_rx_splitq_clean_all- Clean completion queues
3866 * @q_vec: queue vector
3867 * @budget: Used to determine if we are in netpoll
3868 * @cleaned: returns number of packets cleaned
3869 *
3870 * Returns false if clean is not complete else returns true
3871 */
3872static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
3873				     int *cleaned)
3874{
3875	u16 num_rxq = q_vec->num_rxq;
3876	bool clean_complete = true;
3877	int pkts_cleaned = 0;
3878	int i, budget_per_q;
 
3879
3880	/* We attempt to distribute budget to each Rx queue fairly, but don't
3881	 * allow the budget to go below 1 because that would exit polling early.
3882	 */
3883	budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
3884	for (i = 0; i < num_rxq; i++) {
3885		struct idpf_queue *rxq = q_vec->rx[i];
3886		int pkts_cleaned_per_q;
3887
3888		pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
3889		/* if we clean as many as budgeted, we must not be done */
3890		if (pkts_cleaned_per_q >= budget_per_q)
3891			clean_complete = false;
3892		pkts_cleaned += pkts_cleaned_per_q;
3893	}
3894	*cleaned = pkts_cleaned;
3895
 
 
3896	for (i = 0; i < q_vec->num_bufq; i++)
3897		idpf_rx_clean_refillq_all(q_vec->bufq[i]);
3898
3899	return clean_complete;
3900}
3901
3902/**
3903 * idpf_vport_splitq_napi_poll - NAPI handler
3904 * @napi: struct from which you get q_vector
3905 * @budget: budget provided by stack
3906 */
3907static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
3908{
3909	struct idpf_q_vector *q_vector =
3910				container_of(napi, struct idpf_q_vector, napi);
3911	bool clean_complete;
3912	int work_done = 0;
3913
3914	/* Handle case where we are called by netpoll with a budget of 0 */
3915	if (unlikely(!budget)) {
3916		idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
3917
3918		return 0;
3919	}
3920
3921	clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
3922	clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
3923
3924	/* If work not completed, return budget and polling will return */
3925	if (!clean_complete)
 
3926		return budget;
 
3927
3928	work_done = min_t(int, work_done, budget - 1);
3929
3930	/* Exit the polling mode, but don't re-enable interrupts if stack might
3931	 * poll us due to busy-polling
3932	 */
3933	if (likely(napi_complete_done(napi, work_done)))
3934		idpf_vport_intr_update_itr_ena_irq(q_vector);
 
 
3935
3936	/* Switch to poll mode in the tear-down path after sending disable
3937	 * queues virtchnl message, as the interrupts will be disabled after
3938	 * that
3939	 */
3940	if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
3941						   q_vector->tx[0]->flags)))
3942		return budget;
3943	else
3944		return work_done;
3945}
3946
3947/**
3948 * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
3949 * @vport: virtual port
3950 *
3951 * Mapping for vectors to queues
3952 */
3953static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
3954{
 
3955	u16 num_txq_grp = vport->num_txq_grp;
3956	int i, j, qv_idx, bufq_vidx = 0;
3957	struct idpf_rxq_group *rx_qgrp;
3958	struct idpf_txq_group *tx_qgrp;
3959	struct idpf_queue *q, *bufq;
3960	u16 q_index;
3961
3962	for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
3963		u16 num_rxq;
3964
 
 
 
3965		rx_qgrp = &vport->rxq_grps[i];
3966		if (idpf_is_queue_model_split(vport->rxq_model))
3967			num_rxq = rx_qgrp->splitq.num_rxq_sets;
3968		else
3969			num_rxq = rx_qgrp->singleq.num_rxq;
3970
3971		for (j = 0; j < num_rxq; j++) {
3972			if (qv_idx >= vport->num_q_vectors)
3973				qv_idx = 0;
3974
3975			if (idpf_is_queue_model_split(vport->rxq_model))
3976				q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
3977			else
3978				q = rx_qgrp->singleq.rxqs[j];
3979			q->q_vector = &vport->q_vectors[qv_idx];
3980			q_index = q->q_vector->num_rxq;
3981			q->q_vector->rx[q_index] = q;
3982			q->q_vector->num_rxq++;
3983			qv_idx++;
 
 
3984		}
3985
3986		if (idpf_is_queue_model_split(vport->rxq_model)) {
3987			for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
 
 
3988				bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
3989				bufq->q_vector = &vport->q_vectors[bufq_vidx];
3990				q_index = bufq->q_vector->num_bufq;
3991				bufq->q_vector->bufq[q_index] = bufq;
3992				bufq->q_vector->num_bufq++;
3993			}
3994			if (++bufq_vidx >= vport->num_q_vectors)
3995				bufq_vidx = 0;
3996		}
 
 
3997	}
3998
 
 
3999	for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
4000		u16 num_txq;
4001
 
 
 
4002		tx_qgrp = &vport->txq_grps[i];
4003		num_txq = tx_qgrp->num_txq;
4004
4005		if (idpf_is_queue_model_split(vport->txq_model)) {
4006			if (qv_idx >= vport->num_q_vectors)
4007				qv_idx = 0;
4008
4009			q = tx_qgrp->complq;
4010			q->q_vector = &vport->q_vectors[qv_idx];
4011			q_index = q->q_vector->num_txq;
4012			q->q_vector->tx[q_index] = q;
4013			q->q_vector->num_txq++;
4014			qv_idx++;
4015		} else {
4016			for (j = 0; j < num_txq; j++) {
4017				if (qv_idx >= vport->num_q_vectors)
4018					qv_idx = 0;
4019
4020				q = tx_qgrp->txqs[j];
4021				q->q_vector = &vport->q_vectors[qv_idx];
4022				q_index = q->q_vector->num_txq;
4023				q->q_vector->tx[q_index] = q;
4024				q->q_vector->num_txq++;
4025
4026				qv_idx++;
4027			}
4028		}
 
 
4029	}
4030}
4031
4032/**
4033 * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
4034 * @vport: virtual port
4035 *
4036 * Initialize vector indexes with values returened over mailbox
4037 */
4038static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
4039{
4040	struct idpf_adapter *adapter = vport->adapter;
4041	struct virtchnl2_alloc_vectors *ac;
4042	u16 *vecids, total_vecs;
4043	int i;
4044
4045	ac = adapter->req_vec_chunks;
4046	if (!ac) {
4047		for (i = 0; i < vport->num_q_vectors; i++)
4048			vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
4049
4050		return 0;
4051	}
4052
4053	total_vecs = idpf_get_reserved_vecs(adapter);
4054	vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
4055	if (!vecids)
4056		return -ENOMEM;
4057
4058	idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
4059
4060	for (i = 0; i < vport->num_q_vectors; i++)
4061		vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
4062
4063	kfree(vecids);
4064
4065	return 0;
4066}
4067
4068/**
4069 * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
4070 * @vport: virtual port structure
4071 */
4072static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
4073{
4074	int (*napi_poll)(struct napi_struct *napi, int budget);
4075	u16 v_idx;
4076
4077	if (idpf_is_queue_model_split(vport->txq_model))
4078		napi_poll = idpf_vport_splitq_napi_poll;
4079	else
4080		napi_poll = idpf_vport_singleq_napi_poll;
4081
4082	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4083		struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
4084
4085		netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
4086
4087		/* only set affinity_mask if the CPU is online */
4088		if (cpu_online(v_idx))
4089			cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
4090	}
4091}
4092
4093/**
4094 * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
4095 * @vport: virtual port
4096 *
4097 * We allocate one q_vector per queue interrupt. If allocation fails we
4098 * return -ENOMEM.
4099 */
4100int idpf_vport_intr_alloc(struct idpf_vport *vport)
4101{
4102	u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
4103	struct idpf_q_vector *q_vector;
4104	int v_idx, err;
4105
4106	vport->q_vectors = kcalloc(vport->num_q_vectors,
4107				   sizeof(struct idpf_q_vector), GFP_KERNEL);
4108	if (!vport->q_vectors)
4109		return -ENOMEM;
4110
4111	txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
4112	rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
 
 
4113	bufqs_per_vector = vport->num_bufqs_per_qgrp *
4114			   DIV_ROUND_UP(vport->num_rxq_grp,
4115					vport->num_q_vectors);
 
 
4116
4117	for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
4118		q_vector = &vport->q_vectors[v_idx];
4119		q_vector->vport = vport;
4120
4121		q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
4122		q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
4123		q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
4124
4125		q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
4126		q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
4127		q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
4128
4129		q_vector->tx = kcalloc(txqs_per_vector,
4130				       sizeof(struct idpf_queue *),
 
 
4131				       GFP_KERNEL);
4132		if (!q_vector->tx) {
4133			err = -ENOMEM;
4134			goto error;
4135		}
4136
4137		q_vector->rx = kcalloc(rxqs_per_vector,
4138				       sizeof(struct idpf_queue *),
4139				       GFP_KERNEL);
4140		if (!q_vector->rx) {
4141			err = -ENOMEM;
4142			goto error;
4143		}
4144
4145		if (!idpf_is_queue_model_split(vport->rxq_model))
4146			continue;
4147
4148		q_vector->bufq = kcalloc(bufqs_per_vector,
4149					 sizeof(struct idpf_queue *),
4150					 GFP_KERNEL);
4151		if (!q_vector->bufq) {
4152			err = -ENOMEM;
 
 
 
 
 
4153			goto error;
4154		}
4155	}
4156
4157	return 0;
4158
4159error:
4160	idpf_vport_intr_rel(vport);
4161
4162	return err;
4163}
4164
4165/**
4166 * idpf_vport_intr_init - Setup all vectors for the given vport
4167 * @vport: virtual port
4168 *
4169 * Returns 0 on success or negative on failure
4170 */
4171int idpf_vport_intr_init(struct idpf_vport *vport)
4172{
4173	char *int_name;
4174	int err;
4175
4176	err = idpf_vport_intr_init_vec_idx(vport);
4177	if (err)
4178		return err;
4179
4180	idpf_vport_intr_map_vector_to_qs(vport);
4181	idpf_vport_intr_napi_add_all(vport);
4182	idpf_vport_intr_napi_ena_all(vport);
4183
4184	err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
4185	if (err)
4186		goto unroll_vectors_alloc;
4187
4188	int_name = kasprintf(GFP_KERNEL, "%s-%s",
4189			     dev_driver_string(&vport->adapter->pdev->dev),
4190			     vport->netdev->name);
4191
4192	err = idpf_vport_intr_req_irq(vport, int_name);
4193	if (err)
4194		goto unroll_vectors_alloc;
4195
4196	idpf_vport_intr_ena_irq_all(vport);
4197
4198	return 0;
4199
4200unroll_vectors_alloc:
4201	idpf_vport_intr_napi_dis_all(vport);
4202	idpf_vport_intr_napi_del_all(vport);
4203
4204	return err;
 
 
 
 
 
 
4205}
4206
4207/**
4208 * idpf_config_rss - Send virtchnl messages to configure RSS
4209 * @vport: virtual port
4210 *
4211 * Return 0 on success, negative on failure
4212 */
4213int idpf_config_rss(struct idpf_vport *vport)
4214{
4215	int err;
4216
4217	err = idpf_send_get_set_rss_key_msg(vport, false);
4218	if (err)
4219		return err;
4220
4221	return idpf_send_get_set_rss_lut_msg(vport, false);
4222}
4223
4224/**
4225 * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
4226 * @vport: virtual port structure
4227 */
4228static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
4229{
4230	struct idpf_adapter *adapter = vport->adapter;
4231	u16 num_active_rxq = vport->num_rxq;
4232	struct idpf_rss_data *rss_data;
4233	int i;
4234
4235	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4236
4237	for (i = 0; i < rss_data->rss_lut_size; i++) {
4238		rss_data->rss_lut[i] = i % num_active_rxq;
4239		rss_data->cached_lut[i] = rss_data->rss_lut[i];
4240	}
4241}
4242
4243/**
4244 * idpf_init_rss - Allocate and initialize RSS resources
4245 * @vport: virtual port
4246 *
4247 * Return 0 on success, negative on failure
4248 */
4249int idpf_init_rss(struct idpf_vport *vport)
4250{
4251	struct idpf_adapter *adapter = vport->adapter;
4252	struct idpf_rss_data *rss_data;
4253	u32 lut_size;
4254
4255	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4256
4257	lut_size = rss_data->rss_lut_size * sizeof(u32);
4258	rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
4259	if (!rss_data->rss_lut)
4260		return -ENOMEM;
4261
4262	rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
4263	if (!rss_data->cached_lut) {
4264		kfree(rss_data->rss_lut);
4265		rss_data->rss_lut = NULL;
4266
4267		return -ENOMEM;
4268	}
4269
4270	/* Fill the default RSS lut values */
4271	idpf_fill_dflt_rss_lut(vport);
4272
4273	return idpf_config_rss(vport);
4274}
4275
4276/**
4277 * idpf_deinit_rss - Release RSS resources
4278 * @vport: virtual port
4279 */
4280void idpf_deinit_rss(struct idpf_vport *vport)
4281{
4282	struct idpf_adapter *adapter = vport->adapter;
4283	struct idpf_rss_data *rss_data;
4284
4285	rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
4286	kfree(rss_data->cached_lut);
4287	rss_data->cached_lut = NULL;
4288	kfree(rss_data->rss_lut);
4289	rss_data->rss_lut = NULL;
4290}