Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4/* The driver transmit and receive code */
   5
   6#include <linux/prefetch.h>
   7#include <linux/mm.h>
   8#include "ice.h"
   9
  10#define ICE_RX_HDR_SIZE		256
  11
  12/**
  13 * ice_unmap_and_free_tx_buf - Release a Tx buffer
  14 * @ring: the ring that owns the buffer
  15 * @tx_buf: the buffer to free
  16 */
  17static void
  18ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
  19{
  20	if (tx_buf->skb) {
  21		dev_kfree_skb_any(tx_buf->skb);
  22		if (dma_unmap_len(tx_buf, len))
  23			dma_unmap_single(ring->dev,
  24					 dma_unmap_addr(tx_buf, dma),
  25					 dma_unmap_len(tx_buf, len),
  26					 DMA_TO_DEVICE);
  27	} else if (dma_unmap_len(tx_buf, len)) {
  28		dma_unmap_page(ring->dev,
  29			       dma_unmap_addr(tx_buf, dma),
  30			       dma_unmap_len(tx_buf, len),
  31			       DMA_TO_DEVICE);
  32	}
  33
  34	tx_buf->next_to_watch = NULL;
  35	tx_buf->skb = NULL;
  36	dma_unmap_len_set(tx_buf, len, 0);
  37	/* tx_buf must be completely set up in the transmit path */
  38}
  39
  40static struct netdev_queue *txring_txq(const struct ice_ring *ring)
  41{
  42	return netdev_get_tx_queue(ring->netdev, ring->q_index);
  43}
  44
  45/**
  46 * ice_clean_tx_ring - Free any empty Tx buffers
  47 * @tx_ring: ring to be cleaned
  48 */
  49void ice_clean_tx_ring(struct ice_ring *tx_ring)
  50{
  51	unsigned long size;
  52	u16 i;
  53
  54	/* ring already cleared, nothing to do */
  55	if (!tx_ring->tx_buf)
  56		return;
  57
  58	/* Free all the Tx ring sk_bufss */
  59	for (i = 0; i < tx_ring->count; i++)
  60		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
  61
  62	size = sizeof(struct ice_tx_buf) * tx_ring->count;
  63	memset(tx_ring->tx_buf, 0, size);
  64
  65	/* Zero out the descriptor ring */
  66	memset(tx_ring->desc, 0, tx_ring->size);
  67
  68	tx_ring->next_to_use = 0;
  69	tx_ring->next_to_clean = 0;
  70
  71	if (!tx_ring->netdev)
  72		return;
  73
  74	/* cleanup Tx queue statistics */
  75	netdev_tx_reset_queue(txring_txq(tx_ring));
  76}
  77
  78/**
  79 * ice_free_tx_ring - Free Tx resources per queue
  80 * @tx_ring: Tx descriptor ring for a specific queue
  81 *
  82 * Free all transmit software resources
  83 */
  84void ice_free_tx_ring(struct ice_ring *tx_ring)
  85{
  86	ice_clean_tx_ring(tx_ring);
  87	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
  88	tx_ring->tx_buf = NULL;
  89
  90	if (tx_ring->desc) {
  91		dmam_free_coherent(tx_ring->dev, tx_ring->size,
  92				   tx_ring->desc, tx_ring->dma);
  93		tx_ring->desc = NULL;
  94	}
  95}
  96
  97/**
  98 * ice_clean_tx_irq - Reclaim resources after transmit completes
  99 * @vsi: the VSI we care about
 100 * @tx_ring: Tx ring to clean
 101 * @napi_budget: Used to determine if we are in netpoll
 102 *
 103 * Returns true if there's any budget left (e.g. the clean is finished)
 104 */
 105static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
 106			     int napi_budget)
 107{
 108	unsigned int total_bytes = 0, total_pkts = 0;
 109	unsigned int budget = vsi->work_lmt;
 110	s16 i = tx_ring->next_to_clean;
 111	struct ice_tx_desc *tx_desc;
 112	struct ice_tx_buf *tx_buf;
 113
 114	tx_buf = &tx_ring->tx_buf[i];
 115	tx_desc = ICE_TX_DESC(tx_ring, i);
 116	i -= tx_ring->count;
 117
 118	do {
 119		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
 120
 121		/* if next_to_watch is not set then there is no work pending */
 122		if (!eop_desc)
 123			break;
 124
 125		smp_rmb();	/* prevent any other reads prior to eop_desc */
 126
 127		/* if the descriptor isn't done, no work yet to do */
 128		if (!(eop_desc->cmd_type_offset_bsz &
 129		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
 130			break;
 131
 132		/* clear next_to_watch to prevent false hangs */
 133		tx_buf->next_to_watch = NULL;
 134
 135		/* update the statistics for this packet */
 136		total_bytes += tx_buf->bytecount;
 137		total_pkts += tx_buf->gso_segs;
 138
 139		/* free the skb */
 140		napi_consume_skb(tx_buf->skb, napi_budget);
 141
 142		/* unmap skb header data */
 143		dma_unmap_single(tx_ring->dev,
 144				 dma_unmap_addr(tx_buf, dma),
 145				 dma_unmap_len(tx_buf, len),
 146				 DMA_TO_DEVICE);
 147
 148		/* clear tx_buf data */
 149		tx_buf->skb = NULL;
 150		dma_unmap_len_set(tx_buf, len, 0);
 151
 152		/* unmap remaining buffers */
 153		while (tx_desc != eop_desc) {
 154			tx_buf++;
 155			tx_desc++;
 156			i++;
 157			if (unlikely(!i)) {
 158				i -= tx_ring->count;
 159				tx_buf = tx_ring->tx_buf;
 160				tx_desc = ICE_TX_DESC(tx_ring, 0);
 161			}
 162
 163			/* unmap any remaining paged data */
 164			if (dma_unmap_len(tx_buf, len)) {
 165				dma_unmap_page(tx_ring->dev,
 166					       dma_unmap_addr(tx_buf, dma),
 167					       dma_unmap_len(tx_buf, len),
 168					       DMA_TO_DEVICE);
 169				dma_unmap_len_set(tx_buf, len, 0);
 170			}
 171		}
 172
 173		/* move us one more past the eop_desc for start of next pkt */
 174		tx_buf++;
 175		tx_desc++;
 176		i++;
 177		if (unlikely(!i)) {
 178			i -= tx_ring->count;
 179			tx_buf = tx_ring->tx_buf;
 180			tx_desc = ICE_TX_DESC(tx_ring, 0);
 181		}
 182
 183		prefetch(tx_desc);
 184
 185		/* update budget accounting */
 186		budget--;
 187	} while (likely(budget));
 188
 189	i += tx_ring->count;
 190	tx_ring->next_to_clean = i;
 191	u64_stats_update_begin(&tx_ring->syncp);
 192	tx_ring->stats.bytes += total_bytes;
 193	tx_ring->stats.pkts += total_pkts;
 194	u64_stats_update_end(&tx_ring->syncp);
 195	tx_ring->q_vector->tx.total_bytes += total_bytes;
 196	tx_ring->q_vector->tx.total_pkts += total_pkts;
 197
 198	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
 199				  total_bytes);
 200
 201#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
 202	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
 203		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 204		/* Make sure that anybody stopping the queue after this
 205		 * sees the new next_to_clean.
 206		 */
 207		smp_mb();
 208		if (__netif_subqueue_stopped(tx_ring->netdev,
 209					     tx_ring->q_index) &&
 210		   !test_bit(__ICE_DOWN, vsi->state)) {
 211			netif_wake_subqueue(tx_ring->netdev,
 212					    tx_ring->q_index);
 213			++tx_ring->tx_stats.restart_q;
 214		}
 215	}
 216
 217	return !!budget;
 218}
 219
 220/**
 221 * ice_setup_tx_ring - Allocate the Tx descriptors
 222 * @tx_ring: the tx ring to set up
 223 *
 224 * Return 0 on success, negative on error
 225 */
 226int ice_setup_tx_ring(struct ice_ring *tx_ring)
 227{
 228	struct device *dev = tx_ring->dev;
 229	int bi_size;
 230
 231	if (!dev)
 232		return -ENOMEM;
 233
 234	/* warn if we are about to overwrite the pointer */
 235	WARN_ON(tx_ring->tx_buf);
 236	bi_size = sizeof(struct ice_tx_buf) * tx_ring->count;
 237	tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
 238	if (!tx_ring->tx_buf)
 239		return -ENOMEM;
 240
 241	/* round up to nearest 4K */
 242	tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc);
 243	tx_ring->size = ALIGN(tx_ring->size, 4096);
 244	tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
 245					    GFP_KERNEL);
 246	if (!tx_ring->desc) {
 247		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 248			tx_ring->size);
 249		goto err;
 250	}
 251
 252	tx_ring->next_to_use = 0;
 253	tx_ring->next_to_clean = 0;
 254	return 0;
 255
 256err:
 257	devm_kfree(dev, tx_ring->tx_buf);
 258	tx_ring->tx_buf = NULL;
 259	return -ENOMEM;
 260}
 261
 262/**
 263 * ice_clean_rx_ring - Free Rx buffers
 264 * @rx_ring: ring to be cleaned
 265 */
 266void ice_clean_rx_ring(struct ice_ring *rx_ring)
 267{
 268	struct device *dev = rx_ring->dev;
 269	unsigned long size;
 270	u16 i;
 271
 272	/* ring already cleared, nothing to do */
 273	if (!rx_ring->rx_buf)
 274		return;
 275
 276	/* Free all the Rx ring sk_buffs */
 277	for (i = 0; i < rx_ring->count; i++) {
 278		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
 279
 280		if (rx_buf->skb) {
 281			dev_kfree_skb(rx_buf->skb);
 282			rx_buf->skb = NULL;
 283		}
 284		if (!rx_buf->page)
 285			continue;
 286
 287		dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
 288		__free_pages(rx_buf->page, 0);
 289
 290		rx_buf->page = NULL;
 291		rx_buf->page_offset = 0;
 292	}
 293
 294	size = sizeof(struct ice_rx_buf) * rx_ring->count;
 295	memset(rx_ring->rx_buf, 0, size);
 296
 297	/* Zero out the descriptor ring */
 298	memset(rx_ring->desc, 0, rx_ring->size);
 299
 300	rx_ring->next_to_alloc = 0;
 301	rx_ring->next_to_clean = 0;
 302	rx_ring->next_to_use = 0;
 303}
 304
 305/**
 306 * ice_free_rx_ring - Free Rx resources
 307 * @rx_ring: ring to clean the resources from
 308 *
 309 * Free all receive software resources
 310 */
 311void ice_free_rx_ring(struct ice_ring *rx_ring)
 312{
 313	ice_clean_rx_ring(rx_ring);
 314	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
 315	rx_ring->rx_buf = NULL;
 316
 317	if (rx_ring->desc) {
 318		dmam_free_coherent(rx_ring->dev, rx_ring->size,
 319				   rx_ring->desc, rx_ring->dma);
 320		rx_ring->desc = NULL;
 321	}
 322}
 323
 324/**
 325 * ice_setup_rx_ring - Allocate the Rx descriptors
 326 * @rx_ring: the rx ring to set up
 327 *
 328 * Return 0 on success, negative on error
 329 */
 330int ice_setup_rx_ring(struct ice_ring *rx_ring)
 331{
 332	struct device *dev = rx_ring->dev;
 333	int bi_size;
 334
 335	if (!dev)
 336		return -ENOMEM;
 337
 338	/* warn if we are about to overwrite the pointer */
 339	WARN_ON(rx_ring->rx_buf);
 340	bi_size = sizeof(struct ice_rx_buf) * rx_ring->count;
 341	rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL);
 342	if (!rx_ring->rx_buf)
 343		return -ENOMEM;
 344
 345	/* round up to nearest 4K */
 346	rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
 347	rx_ring->size = ALIGN(rx_ring->size, 4096);
 348	rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
 349					    GFP_KERNEL);
 350	if (!rx_ring->desc) {
 351		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 352			rx_ring->size);
 353		goto err;
 354	}
 355
 356	rx_ring->next_to_use = 0;
 357	rx_ring->next_to_clean = 0;
 358	return 0;
 359
 360err:
 361	devm_kfree(dev, rx_ring->rx_buf);
 362	rx_ring->rx_buf = NULL;
 363	return -ENOMEM;
 364}
 365
 366/**
 367 * ice_release_rx_desc - Store the new tail and head values
 368 * @rx_ring: ring to bump
 369 * @val: new head index
 370 */
 371static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
 372{
 373	rx_ring->next_to_use = val;
 374
 375	/* update next to alloc since we have filled the ring */
 376	rx_ring->next_to_alloc = val;
 377
 378	/* Force memory writes to complete before letting h/w
 379	 * know there are new descriptors to fetch.  (Only
 380	 * applicable for weak-ordered memory model archs,
 381	 * such as IA-64).
 382	 */
 383	wmb();
 384	writel(val, rx_ring->tail);
 385}
 386
 387/**
 388 * ice_alloc_mapped_page - recycle or make a new page
 389 * @rx_ring: ring to use
 390 * @bi: rx_buf struct to modify
 391 *
 392 * Returns true if the page was successfully allocated or
 393 * reused.
 394 */
 395static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
 396				  struct ice_rx_buf *bi)
 397{
 398	struct page *page = bi->page;
 399	dma_addr_t dma;
 400
 401	/* since we are recycling buffers we should seldom need to alloc */
 402	if (likely(page)) {
 403		rx_ring->rx_stats.page_reuse_count++;
 404		return true;
 405	}
 406
 407	/* alloc new page for storage */
 408	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
 409	if (unlikely(!page)) {
 410		rx_ring->rx_stats.alloc_page_failed++;
 411		return false;
 412	}
 413
 414	/* map page for use */
 415	dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
 416
 417	/* if mapping failed free memory back to system since
 418	 * there isn't much point in holding memory we can't use
 419	 */
 420	if (dma_mapping_error(rx_ring->dev, dma)) {
 421		__free_pages(page, 0);
 422		rx_ring->rx_stats.alloc_page_failed++;
 423		return false;
 424	}
 425
 426	bi->dma = dma;
 427	bi->page = page;
 428	bi->page_offset = 0;
 429
 430	return true;
 431}
 432
 433/**
 434 * ice_alloc_rx_bufs - Replace used receive buffers
 435 * @rx_ring: ring to place buffers on
 436 * @cleaned_count: number of buffers to replace
 437 *
 438 * Returns false if all allocations were successful, true if any fail
 439 */
 440bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
 441{
 442	union ice_32b_rx_flex_desc *rx_desc;
 443	u16 ntu = rx_ring->next_to_use;
 444	struct ice_rx_buf *bi;
 445
 446	/* do nothing if no valid netdev defined */
 447	if (!rx_ring->netdev || !cleaned_count)
 448		return false;
 449
 450	/* get the RX descriptor and buffer based on next_to_use */
 451	rx_desc = ICE_RX_DESC(rx_ring, ntu);
 452	bi = &rx_ring->rx_buf[ntu];
 453
 454	do {
 455		if (!ice_alloc_mapped_page(rx_ring, bi))
 456			goto no_bufs;
 457
 458		/* Refresh the desc even if buffer_addrs didn't change
 459		 * because each write-back erases this info.
 460		 */
 461		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 462
 463		rx_desc++;
 464		bi++;
 465		ntu++;
 466		if (unlikely(ntu == rx_ring->count)) {
 467			rx_desc = ICE_RX_DESC(rx_ring, 0);
 468			bi = rx_ring->rx_buf;
 469			ntu = 0;
 470		}
 471
 472		/* clear the status bits for the next_to_use descriptor */
 473		rx_desc->wb.status_error0 = 0;
 474
 475		cleaned_count--;
 476	} while (cleaned_count);
 477
 478	if (rx_ring->next_to_use != ntu)
 479		ice_release_rx_desc(rx_ring, ntu);
 480
 481	return false;
 482
 483no_bufs:
 484	if (rx_ring->next_to_use != ntu)
 485		ice_release_rx_desc(rx_ring, ntu);
 486
 487	/* make sure to come back via polling to try again after
 488	 * allocation failure
 489	 */
 490	return true;
 491}
 492
 493/**
 494 * ice_page_is_reserved - check if reuse is possible
 495 * @page: page struct to check
 496 */
 497static bool ice_page_is_reserved(struct page *page)
 498{
 499	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 500}
 501
 502/**
 503 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
 504 * @rx_buf: buffer containing page to add
 505 * @rx_desc: descriptor containing length of buffer written by hardware
 506 * @skb: sk_buf to place the data into
 507 *
 508 * This function will add the data contained in rx_buf->page to the skb.
 509 * This is done either through a direct copy if the data in the buffer is
 510 * less than the skb header size, otherwise it will just attach the page as
 511 * a frag to the skb.
 512 *
 513 * The function will then update the page offset if necessary and return
 514 * true if the buffer can be reused by the adapter.
 515 */
 516static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
 517			    union ice_32b_rx_flex_desc *rx_desc,
 518			    struct sk_buff *skb)
 519{
 520#if (PAGE_SIZE < 8192)
 521	unsigned int truesize = ICE_RXBUF_2048;
 522#else
 523	unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
 524	unsigned int truesize;
 525#endif /* PAGE_SIZE < 8192) */
 526
 527	struct page *page;
 528	unsigned int size;
 529
 530	size = le16_to_cpu(rx_desc->wb.pkt_len) &
 531		ICE_RX_FLX_DESC_PKT_LEN_M;
 532
 533	page = rx_buf->page;
 534
 535#if (PAGE_SIZE >= 8192)
 536	truesize = ALIGN(size, L1_CACHE_BYTES);
 537#endif /* PAGE_SIZE >= 8192) */
 538
 539	/* will the data fit in the skb we allocated? if so, just
 540	 * copy it as it is pretty small anyway
 541	 */
 542	if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
 543		unsigned char *va = page_address(page) + rx_buf->page_offset;
 544
 545		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 546
 547		/* page is not reserved, we can reuse buffer as-is */
 548		if (likely(!ice_page_is_reserved(page)))
 549			return true;
 550
 551		/* this page cannot be reused so discard it */
 552		__free_pages(page, 0);
 553		return false;
 554	}
 555
 556	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 557			rx_buf->page_offset, size, truesize);
 558
 559	/* avoid re-using remote pages */
 560	if (unlikely(ice_page_is_reserved(page)))
 561		return false;
 562
 563#if (PAGE_SIZE < 8192)
 564	/* if we are only owner of page we can reuse it */
 565	if (unlikely(page_count(page) != 1))
 566		return false;
 567
 568	/* flip page offset to other buffer */
 569	rx_buf->page_offset ^= truesize;
 570#else
 571	/* move offset up to the next cache line */
 572	rx_buf->page_offset += truesize;
 573
 574	if (rx_buf->page_offset > last_offset)
 575		return false;
 576#endif /* PAGE_SIZE < 8192) */
 577
 578	/* Even if we own the page, we are not allowed to use atomic_set()
 579	 * This would break get_page_unless_zero() users.
 580	 */
 581	get_page(rx_buf->page);
 582
 583	return true;
 584}
 585
 586/**
 587 * ice_reuse_rx_page - page flip buffer and store it back on the ring
 588 * @rx_ring: rx descriptor ring to store buffers on
 589 * @old_buf: donor buffer to have page reused
 590 *
 591 * Synchronizes page for reuse by the adapter
 592 */
 593static void ice_reuse_rx_page(struct ice_ring *rx_ring,
 594			      struct ice_rx_buf *old_buf)
 595{
 596	u16 nta = rx_ring->next_to_alloc;
 597	struct ice_rx_buf *new_buf;
 598
 599	new_buf = &rx_ring->rx_buf[nta];
 600
 601	/* update, and store next to alloc */
 602	nta++;
 603	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 604
 605	/* transfer page from old buffer to new buffer */
 606	*new_buf = *old_buf;
 607}
 608
 609/**
 610 * ice_fetch_rx_buf - Allocate skb and populate it
 611 * @rx_ring: rx descriptor ring to transact packets on
 612 * @rx_desc: descriptor containing info written by hardware
 613 *
 614 * This function allocates an skb on the fly, and populates it with the page
 615 * data from the current receive descriptor, taking care to set up the skb
 616 * correctly, as well as handling calling the page recycle function if
 617 * necessary.
 618 */
 619static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
 620					union ice_32b_rx_flex_desc *rx_desc)
 621{
 622	struct ice_rx_buf *rx_buf;
 623	struct sk_buff *skb;
 624	struct page *page;
 625
 626	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
 627	page = rx_buf->page;
 628	prefetchw(page);
 629
 630	skb = rx_buf->skb;
 631
 632	if (likely(!skb)) {
 633		u8 *page_addr = page_address(page) + rx_buf->page_offset;
 634
 635		/* prefetch first cache line of first page */
 636		prefetch(page_addr);
 637#if L1_CACHE_BYTES < 128
 638		prefetch((void *)(page_addr + L1_CACHE_BYTES));
 639#endif /* L1_CACHE_BYTES */
 640
 641		/* allocate a skb to store the frags */
 642		skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
 643				       ICE_RX_HDR_SIZE,
 644				       GFP_ATOMIC | __GFP_NOWARN);
 645		if (unlikely(!skb)) {
 646			rx_ring->rx_stats.alloc_buf_failed++;
 647			return NULL;
 648		}
 649
 650		/* we will be copying header into skb->data in
 651		 * pskb_may_pull so it is in our interest to prefetch
 652		 * it now to avoid a possible cache miss
 653		 */
 654		prefetchw(skb->data);
 655
 656		skb_record_rx_queue(skb, rx_ring->q_index);
 657	} else {
 658		/* we are reusing so sync this buffer for CPU use */
 659		dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
 660					      rx_buf->page_offset,
 661					      ICE_RXBUF_2048,
 662					      DMA_FROM_DEVICE);
 663
 664		rx_buf->skb = NULL;
 665	}
 666
 667	/* pull page into skb */
 668	if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
 669		/* hand second half of page back to the ring */
 670		ice_reuse_rx_page(rx_ring, rx_buf);
 671		rx_ring->rx_stats.page_reuse_count++;
 672	} else {
 673		/* we are not reusing the buffer so unmap it */
 674		dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
 675			       DMA_FROM_DEVICE);
 676	}
 677
 678	/* clear contents of buffer_info */
 679	rx_buf->page = NULL;
 680
 681	return skb;
 682}
 683
 684/**
 685 * ice_pull_tail - ice specific version of skb_pull_tail
 686 * @skb: pointer to current skb being adjusted
 687 *
 688 * This function is an ice specific version of __pskb_pull_tail.  The
 689 * main difference between this version and the original function is that
 690 * this function can make several assumptions about the state of things
 691 * that allow for significant optimizations versus the standard function.
 692 * As a result we can do things like drop a frag and maintain an accurate
 693 * truesize for the skb.
 694 */
 695static void ice_pull_tail(struct sk_buff *skb)
 696{
 697	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 698	unsigned int pull_len;
 699	unsigned char *va;
 700
 701	/* it is valid to use page_address instead of kmap since we are
 702	 * working with pages allocated out of the lomem pool per
 703	 * alloc_page(GFP_ATOMIC)
 704	 */
 705	va = skb_frag_address(frag);
 706
 707	/* we need the header to contain the greater of either ETH_HLEN or
 708	 * 60 bytes if the skb->len is less than 60 for skb_pad.
 709	 */
 710	pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
 711
 712	/* align pull length to size of long to optimize memcpy performance */
 713	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
 714
 715	/* update all of the pointers */
 716	skb_frag_size_sub(frag, pull_len);
 717	frag->page_offset += pull_len;
 718	skb->data_len -= pull_len;
 719	skb->tail += pull_len;
 720}
 721
 722/**
 723 * ice_cleanup_headers - Correct empty headers
 724 * @skb: pointer to current skb being fixed
 725 *
 726 * Also address the case where we are pulling data in on pages only
 727 * and as such no data is present in the skb header.
 728 *
 729 * In addition if skb is not at least 60 bytes we need to pad it so that
 730 * it is large enough to qualify as a valid Ethernet frame.
 731 *
 732 * Returns true if an error was encountered and skb was freed.
 733 */
 734static bool ice_cleanup_headers(struct sk_buff *skb)
 735{
 736	/* place header in linear portion of buffer */
 737	if (skb_is_nonlinear(skb))
 738		ice_pull_tail(skb);
 739
 740	/* if eth_skb_pad returns an error the skb was freed */
 741	if (eth_skb_pad(skb))
 742		return true;
 743
 744	return false;
 745}
 746
 747/**
 748 * ice_test_staterr - tests bits in Rx descriptor status and error fields
 749 * @rx_desc: pointer to receive descriptor (in le64 format)
 750 * @stat_err_bits: value to mask
 751 *
 752 * This function does some fast chicanery in order to return the
 753 * value of the mask which is really only used for boolean tests.
 754 * The status_error_len doesn't need to be shifted because it begins
 755 * at offset zero.
 756 */
 757static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
 758			     const u16 stat_err_bits)
 759{
 760	return !!(rx_desc->wb.status_error0 &
 761		  cpu_to_le16(stat_err_bits));
 762}
 763
 764/**
 765 * ice_is_non_eop - process handling of non-EOP buffers
 766 * @rx_ring: Rx ring being processed
 767 * @rx_desc: Rx descriptor for current buffer
 768 * @skb: Current socket buffer containing buffer in progress
 769 *
 770 * This function updates next to clean.  If the buffer is an EOP buffer
 771 * this function exits returning false, otherwise it will place the
 772 * sk_buff in the next buffer to be chained and return true indicating
 773 * that this is in fact a non-EOP buffer.
 774 */
 775static bool ice_is_non_eop(struct ice_ring *rx_ring,
 776			   union ice_32b_rx_flex_desc *rx_desc,
 777			   struct sk_buff *skb)
 778{
 779	u32 ntc = rx_ring->next_to_clean + 1;
 780
 781	/* fetch, update, and store next to clean */
 782	ntc = (ntc < rx_ring->count) ? ntc : 0;
 783	rx_ring->next_to_clean = ntc;
 784
 785	prefetch(ICE_RX_DESC(rx_ring, ntc));
 786
 787	/* if we are the last buffer then there is nothing else to do */
 788#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
 789	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
 790		return false;
 791
 792	/* place skb in next buffer to be received */
 793	rx_ring->rx_buf[ntc].skb = skb;
 794	rx_ring->rx_stats.non_eop_descs++;
 795
 796	return true;
 797}
 798
 799/**
 800 * ice_ptype_to_htype - get a hash type
 801 * @ptype: the ptype value from the descriptor
 802 *
 803 * Returns a hash type to be used by skb_set_hash
 804 */
 805static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
 806{
 807	return PKT_HASH_TYPE_NONE;
 808}
 809
 810/**
 811 * ice_rx_hash - set the hash value in the skb
 812 * @rx_ring: descriptor ring
 813 * @rx_desc: specific descriptor
 814 * @skb: pointer to current skb
 815 * @rx_ptype: the ptype value from the descriptor
 816 */
 817static void
 818ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
 819	    struct sk_buff *skb, u8 rx_ptype)
 820{
 821	struct ice_32b_rx_flex_desc_nic *nic_mdid;
 822	u32 hash;
 823
 824	if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
 825		return;
 826
 827	if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
 828		return;
 829
 830	nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
 831	hash = le32_to_cpu(nic_mdid->rss_hash);
 832	skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
 833}
 834
 835/**
 836 * ice_rx_csum - Indicate in skb if checksum is good
 837 * @vsi: the VSI we care about
 838 * @skb: skb currently being received and modified
 839 * @rx_desc: the receive descriptor
 840 * @ptype: the packet type decoded by hardware
 841 *
 842 * skb->protocol must be set before this function is called
 843 */
 844static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
 845			union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
 846{
 847	struct ice_rx_ptype_decoded decoded;
 848	u32 rx_error, rx_status;
 849	bool ipv4, ipv6;
 850
 851	rx_status = le16_to_cpu(rx_desc->wb.status_error0);
 852	rx_error = rx_status;
 853
 854	decoded = ice_decode_rx_desc_ptype(ptype);
 855
 856	/* Start with CHECKSUM_NONE and by default csum_level = 0 */
 857	skb->ip_summed = CHECKSUM_NONE;
 858	skb_checksum_none_assert(skb);
 859
 860	/* check if Rx checksum is enabled */
 861	if (!(vsi->netdev->features & NETIF_F_RXCSUM))
 862		return;
 863
 864	/* check if HW has decoded the packet and checksum */
 865	if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
 866		return;
 867
 868	if (!(decoded.known && decoded.outer_ip))
 869		return;
 870
 871	ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
 872	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
 873	ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
 874	       (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
 875
 876	if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
 877				 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
 878		goto checksum_fail;
 879	else if (ipv6 && (rx_status &
 880		 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
 881		goto checksum_fail;
 882
 883	/* check for L4 errors and handle packets that were not able to be
 884	 * checksummed due to arrival speed
 885	 */
 886	if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
 887		goto checksum_fail;
 888
 889	/* Only report checksum unnecessary for TCP, UDP, or SCTP */
 890	switch (decoded.inner_prot) {
 891	case ICE_RX_PTYPE_INNER_PROT_TCP:
 892	case ICE_RX_PTYPE_INNER_PROT_UDP:
 893	case ICE_RX_PTYPE_INNER_PROT_SCTP:
 894		skb->ip_summed = CHECKSUM_UNNECESSARY;
 895	default:
 896		break;
 897	}
 898	return;
 899
 900checksum_fail:
 901	vsi->back->hw_csum_rx_error++;
 902}
 903
 904/**
 905 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
 906 * @rx_ring: rx descriptor ring packet is being transacted on
 907 * @rx_desc: pointer to the EOP Rx descriptor
 908 * @skb: pointer to current skb being populated
 909 * @ptype: the packet type decoded by hardware
 910 *
 911 * This function checks the ring, descriptor, and packet information in
 912 * order to populate the hash, checksum, VLAN, protocol, and
 913 * other fields within the skb.
 914 */
 915static void ice_process_skb_fields(struct ice_ring *rx_ring,
 916				   union ice_32b_rx_flex_desc *rx_desc,
 917				   struct sk_buff *skb, u8 ptype)
 918{
 919	ice_rx_hash(rx_ring, rx_desc, skb, ptype);
 920
 921	/* modifies the skb - consumes the enet header */
 922	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 923
 924	ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
 925}
 926
 927/**
 928 * ice_receive_skb - Send a completed packet up the stack
 929 * @rx_ring: rx ring in play
 930 * @skb: packet to send up
 931 * @vlan_tag: vlan tag for packet
 932 *
 933 * This function sends the completed packet (via. skb) up the stack using
 934 * gro receive functions (with/without vlan tag)
 935 */
 936static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
 937			    u16 vlan_tag)
 938{
 939	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 940	    (vlan_tag & VLAN_VID_MASK)) {
 941		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 942	}
 943	napi_gro_receive(&rx_ring->q_vector->napi, skb);
 944}
 945
 946/**
 947 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
 948 * @rx_ring: rx descriptor ring to transact packets on
 949 * @budget: Total limit on number of packets to process
 950 *
 951 * This function provides a "bounce buffer" approach to Rx interrupt
 952 * processing.  The advantage to this is that on systems that have
 953 * expensive overhead for IOMMU access this provides a means of avoiding
 954 * it by maintaining the mapping of the page to the system.
 955 *
 956 * Returns amount of work completed
 957 */
 958static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
 959{
 960	unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
 961	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
 962	bool failure = false;
 963
 964	/* start the loop to process RX packets bounded by 'budget' */
 965	while (likely(total_rx_pkts < (unsigned int)budget)) {
 966		union ice_32b_rx_flex_desc *rx_desc;
 967		struct sk_buff *skb;
 968		u16 stat_err_bits;
 969		u16 vlan_tag = 0;
 970		u8 rx_ptype;
 971
 972		/* return some buffers to hardware, one at a time is too slow */
 973		if (cleaned_count >= ICE_RX_BUF_WRITE) {
 974			failure = failure ||
 975				  ice_alloc_rx_bufs(rx_ring, cleaned_count);
 976			cleaned_count = 0;
 977		}
 978
 979		/* get the RX desc from RX ring based on 'next_to_clean' */
 980		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 981
 982		/* status_error_len will always be zero for unused descriptors
 983		 * because it's cleared in cleanup, and overlaps with hdr_addr
 984		 * which is always zero because packet split isn't used, if the
 985		 * hardware wrote DD then it will be non-zero
 986		 */
 987		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
 988		if (!ice_test_staterr(rx_desc, stat_err_bits))
 989			break;
 990
 991		/* This memory barrier is needed to keep us from reading
 992		 * any other fields out of the rx_desc until we know the
 993		 * DD bit is set.
 994		 */
 995		dma_rmb();
 996
 997		/* allocate (if needed) and populate skb */
 998		skb = ice_fetch_rx_buf(rx_ring, rx_desc);
 999		if (!skb)
1000			break;
1001
1002		cleaned_count++;
1003
1004		/* skip if it is NOP desc */
1005		if (ice_is_non_eop(rx_ring, rx_desc, skb))
1006			continue;
1007
1008		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1009		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1010			dev_kfree_skb_any(skb);
1011			continue;
1012		}
1013
1014		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1015			ICE_RX_FLEX_DESC_PTYPE_M;
1016
1017		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1018		if (ice_test_staterr(rx_desc, stat_err_bits))
1019			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1020
1021		/* correct empty headers and pad skb if needed (to make valid
1022		 * ethernet frame
1023		 */
1024		if (ice_cleanup_headers(skb)) {
1025			skb = NULL;
1026			continue;
1027		}
1028
1029		/* probably a little skewed due to removing CRC */
1030		total_rx_bytes += skb->len;
1031
1032		/* populate checksum, VLAN, and protocol */
1033		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1034
1035		/* send completed skb up the stack */
1036		ice_receive_skb(rx_ring, skb, vlan_tag);
1037
1038		/* update budget accounting */
1039		total_rx_pkts++;
1040	}
1041
1042	/* update queue and vector specific stats */
1043	u64_stats_update_begin(&rx_ring->syncp);
1044	rx_ring->stats.pkts += total_rx_pkts;
1045	rx_ring->stats.bytes += total_rx_bytes;
1046	u64_stats_update_end(&rx_ring->syncp);
1047	rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1048	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1049
1050	/* guarantee a trip back through this routine if there was a failure */
1051	return failure ? budget : (int)total_rx_pkts;
1052}
1053
1054/**
1055 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1056 * @napi: napi struct with our devices info in it
1057 * @budget: amount of work driver is allowed to do this pass, in packets
1058 *
1059 * This function will clean all queues associated with a q_vector.
1060 *
1061 * Returns the amount of work done
1062 */
1063int ice_napi_poll(struct napi_struct *napi, int budget)
1064{
1065	struct ice_q_vector *q_vector =
1066				container_of(napi, struct ice_q_vector, napi);
1067	struct ice_vsi *vsi = q_vector->vsi;
1068	struct ice_pf *pf = vsi->back;
1069	bool clean_complete = true;
1070	int budget_per_ring = 0;
1071	struct ice_ring *ring;
1072	int work_done = 0;
1073
1074	/* Since the actual Tx work is minimal, we can give the Tx a larger
1075	 * budget and be more aggressive about cleaning up the Tx descriptors.
1076	 */
1077	ice_for_each_ring(ring, q_vector->tx)
1078		if (!ice_clean_tx_irq(vsi, ring, budget))
1079			clean_complete = false;
1080
1081	/* Handle case where we are called by netpoll with a budget of 0 */
1082	if (budget <= 0)
1083		return budget;
1084
1085	/* We attempt to distribute budget to each Rx queue fairly, but don't
1086	 * allow the budget to go below 1 because that would exit polling early.
1087	 */
1088	if (q_vector->num_ring_rx)
1089		budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1090
1091	ice_for_each_ring(ring, q_vector->rx) {
1092		int cleaned;
1093
1094		cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1095		work_done += cleaned;
1096		/* if we clean as many as budgeted, we must not be done */
1097		if (cleaned >= budget_per_ring)
1098			clean_complete = false;
1099	}
1100
1101	/* If work not completed, return budget and polling will return */
1102	if (!clean_complete)
1103		return budget;
1104
1105	/* Work is done so exit the polling mode and re-enable the interrupt */
1106	napi_complete_done(napi, work_done);
1107	if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1108		ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
1109	return 0;
1110}
1111
1112/* helper function for building cmd/type/offset */
1113static __le64
1114build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1115{
1116	return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1117			   (td_cmd    << ICE_TXD_QW1_CMD_S) |
1118			   (td_offset << ICE_TXD_QW1_OFFSET_S) |
1119			   ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1120			   (td_tag    << ICE_TXD_QW1_L2TAG1_S));
1121}
1122
1123/**
1124 * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
1125 * @tx_ring: the ring to be checked
1126 * @size: the size buffer we want to assure is available
1127 *
1128 * Returns -EBUSY if a stop is needed, else 0
1129 */
1130static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1131{
1132	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1133	/* Memory barrier before checking head and tail */
1134	smp_mb();
1135
1136	/* Check again in a case another CPU has just made room available. */
1137	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1138		return -EBUSY;
1139
1140	/* A reprieve! - use start_subqueue because it doesn't call schedule */
1141	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1142	++tx_ring->tx_stats.restart_q;
1143	return 0;
1144}
1145
1146/**
1147 * ice_maybe_stop_tx - 1st level check for tx stop conditions
1148 * @tx_ring: the ring to be checked
1149 * @size:    the size buffer we want to assure is available
1150 *
1151 * Returns 0 if stop is not needed
1152 */
1153static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1154{
1155	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1156		return 0;
1157	return __ice_maybe_stop_tx(tx_ring, size);
1158}
1159
1160/**
1161 * ice_tx_map - Build the Tx descriptor
1162 * @tx_ring: ring to send buffer on
1163 * @first: first buffer info buffer to use
1164 * @off: pointer to struct that holds offload parameters
1165 *
1166 * This function loops over the skb data pointed to by *first
1167 * and gets a physical address for each memory location and programs
1168 * it and the length into the transmit descriptor.
1169 */
1170static void
1171ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1172	   struct ice_tx_offload_params *off)
1173{
1174	u64 td_offset, td_tag, td_cmd;
1175	u16 i = tx_ring->next_to_use;
1176	struct skb_frag_struct *frag;
1177	unsigned int data_len, size;
1178	struct ice_tx_desc *tx_desc;
1179	struct ice_tx_buf *tx_buf;
1180	struct sk_buff *skb;
1181	dma_addr_t dma;
1182
1183	td_tag = off->td_l2tag1;
1184	td_cmd = off->td_cmd;
1185	td_offset = off->td_offset;
1186	skb = first->skb;
1187
1188	data_len = skb->data_len;
1189	size = skb_headlen(skb);
1190
1191	tx_desc = ICE_TX_DESC(tx_ring, i);
1192
1193	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1194		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1195		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1196			  ICE_TX_FLAGS_VLAN_S;
1197	}
1198
1199	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1200
1201	tx_buf = first;
1202
1203	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1204		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1205
1206		if (dma_mapping_error(tx_ring->dev, dma))
1207			goto dma_error;
1208
1209		/* record length, and DMA address */
1210		dma_unmap_len_set(tx_buf, len, size);
1211		dma_unmap_addr_set(tx_buf, dma, dma);
1212
1213		/* align size to end of page */
1214		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1215		tx_desc->buf_addr = cpu_to_le64(dma);
1216
1217		/* account for data chunks larger than the hardware
1218		 * can handle
1219		 */
1220		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1221			tx_desc->cmd_type_offset_bsz =
1222				build_ctob(td_cmd, td_offset, max_data, td_tag);
1223
1224			tx_desc++;
1225			i++;
1226
1227			if (i == tx_ring->count) {
1228				tx_desc = ICE_TX_DESC(tx_ring, 0);
1229				i = 0;
1230			}
1231
1232			dma += max_data;
1233			size -= max_data;
1234
1235			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1236			tx_desc->buf_addr = cpu_to_le64(dma);
1237		}
1238
1239		if (likely(!data_len))
1240			break;
1241
1242		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1243							  size, td_tag);
1244
1245		tx_desc++;
1246		i++;
1247
1248		if (i == tx_ring->count) {
1249			tx_desc = ICE_TX_DESC(tx_ring, 0);
1250			i = 0;
1251		}
1252
1253		size = skb_frag_size(frag);
1254		data_len -= size;
1255
1256		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1257				       DMA_TO_DEVICE);
1258
1259		tx_buf = &tx_ring->tx_buf[i];
1260	}
1261
1262	/* record bytecount for BQL */
1263	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1264
1265	/* record SW timestamp if HW timestamp is not available */
1266	skb_tx_timestamp(first->skb);
1267
1268	i++;
1269	if (i == tx_ring->count)
1270		i = 0;
1271
1272	/* write last descriptor with RS and EOP bits */
1273	td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1274	tx_desc->cmd_type_offset_bsz =
1275			build_ctob(td_cmd, td_offset, size, td_tag);
1276
1277	/* Force memory writes to complete before letting h/w know there
1278	 * are new descriptors to fetch.
1279	 *
1280	 * We also use this memory barrier to make certain all of the
1281	 * status bits have been updated before next_to_watch is written.
1282	 */
1283	wmb();
1284
1285	/* set next_to_watch value indicating a packet is present */
1286	first->next_to_watch = tx_desc;
1287
1288	tx_ring->next_to_use = i;
1289
1290	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1291
1292	/* notify HW of packet */
1293	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1294		writel(i, tx_ring->tail);
1295
1296		/* we need this if more than one processor can write to our tail
1297		 * at a time, it synchronizes IO on IA64/Altix systems
1298		 */
1299		mmiowb();
1300	}
1301
1302	return;
1303
1304dma_error:
1305	/* clear dma mappings for failed tx_buf map */
1306	for (;;) {
1307		tx_buf = &tx_ring->tx_buf[i];
1308		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1309		if (tx_buf == first)
1310			break;
1311		if (i == 0)
1312			i = tx_ring->count;
1313		i--;
1314	}
1315
1316	tx_ring->next_to_use = i;
1317}
1318
1319/**
1320 * ice_tx_csum - Enable Tx checksum offloads
1321 * @first: pointer to the first descriptor
1322 * @off: pointer to struct that holds offload parameters
1323 *
1324 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1325 */
1326static
1327int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1328{
1329	u32 l4_len = 0, l3_len = 0, l2_len = 0;
1330	struct sk_buff *skb = first->skb;
1331	union {
1332		struct iphdr *v4;
1333		struct ipv6hdr *v6;
1334		unsigned char *hdr;
1335	} ip;
1336	union {
1337		struct tcphdr *tcp;
1338		unsigned char *hdr;
1339	} l4;
1340	__be16 frag_off, protocol;
1341	unsigned char *exthdr;
1342	u32 offset, cmd = 0;
1343	u8 l4_proto = 0;
1344
1345	if (skb->ip_summed != CHECKSUM_PARTIAL)
1346		return 0;
1347
1348	ip.hdr = skb_network_header(skb);
1349	l4.hdr = skb_transport_header(skb);
1350
1351	/* compute outer L2 header size */
1352	l2_len = ip.hdr - skb->data;
1353	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1354
1355	if (skb->encapsulation)
1356		return -1;
1357
1358	/* Enable IP checksum offloads */
1359	protocol = vlan_get_protocol(skb);
1360	if (protocol == htons(ETH_P_IP)) {
1361		l4_proto = ip.v4->protocol;
1362		/* the stack computes the IP header already, the only time we
1363		 * need the hardware to recompute it is in the case of TSO.
1364		 */
1365		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1366			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1367		else
1368			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1369
1370	} else if (protocol == htons(ETH_P_IPV6)) {
1371		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1372		exthdr = ip.hdr + sizeof(*ip.v6);
1373		l4_proto = ip.v6->nexthdr;
1374		if (l4.hdr != exthdr)
1375			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1376					 &frag_off);
1377	} else {
1378		return -1;
1379	}
1380
1381	/* compute inner L3 header size */
1382	l3_len = l4.hdr - ip.hdr;
1383	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1384
1385	/* Enable L4 checksum offloads */
1386	switch (l4_proto) {
1387	case IPPROTO_TCP:
1388		/* enable checksum offloads */
1389		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1390		l4_len = l4.tcp->doff;
1391		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1392		break;
1393	case IPPROTO_UDP:
1394		/* enable UDP checksum offload */
1395		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1396		l4_len = (sizeof(struct udphdr) >> 2);
1397		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1398		break;
1399	case IPPROTO_SCTP:
1400	default:
1401		if (first->tx_flags & ICE_TX_FLAGS_TSO)
1402			return -1;
1403		skb_checksum_help(skb);
1404		return 0;
1405	}
1406
1407	off->td_cmd |= cmd;
1408	off->td_offset |= offset;
1409	return 1;
1410}
1411
1412/**
1413 * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1414 * @tx_ring: ring to send buffer on
1415 * @first: pointer to struct ice_tx_buf
1416 *
1417 * Checks the skb and set up correspondingly several generic transmit flags
1418 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1419 *
1420 * Returns error code indicate the frame should be dropped upon error and the
1421 * otherwise returns 0 to indicate the flags has been set properly.
1422 */
1423static int
1424ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1425{
1426	struct sk_buff *skb = first->skb;
1427	__be16 protocol = skb->protocol;
1428
1429	if (protocol == htons(ETH_P_8021Q) &&
1430	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1431		/* when HW VLAN acceleration is turned off by the user the
1432		 * stack sets the protocol to 8021q so that the driver
1433		 * can take any steps required to support the SW only
1434		 * VLAN handling. In our case the driver doesn't need
1435		 * to take any further steps so just set the protocol
1436		 * to the encapsulated ethertype.
1437		 */
1438		skb->protocol = vlan_get_protocol(skb);
1439		goto out;
1440	}
1441
1442	/* if we have a HW VLAN tag being added, default to the HW one */
1443	if (skb_vlan_tag_present(skb)) {
1444		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1445		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1446	} else if (protocol == htons(ETH_P_8021Q)) {
1447		struct vlan_hdr *vhdr, _vhdr;
1448
1449		/* for SW VLAN, check the next protocol and store the tag */
1450		vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1451							     sizeof(_vhdr),
1452							     &_vhdr);
1453		if (!vhdr)
1454			return -EINVAL;
1455
1456		first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1457				   ICE_TX_FLAGS_VLAN_S;
1458		first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1459	}
1460
1461out:
1462	return 0;
1463}
1464
1465/**
1466 * ice_tso - computes mss and TSO length to prepare for TSO
1467 * @first: pointer to struct ice_tx_buf
1468 * @off: pointer to struct that holds offload parameters
1469 *
1470 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1471 */
1472static
1473int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1474{
1475	struct sk_buff *skb = first->skb;
1476	union {
1477		struct iphdr *v4;
1478		struct ipv6hdr *v6;
1479		unsigned char *hdr;
1480	} ip;
1481	union {
1482		struct tcphdr *tcp;
1483		unsigned char *hdr;
1484	} l4;
1485	u64 cd_mss, cd_tso_len;
1486	u32 paylen, l4_start;
1487	int err;
1488
1489	if (skb->ip_summed != CHECKSUM_PARTIAL)
1490		return 0;
1491
1492	if (!skb_is_gso(skb))
1493		return 0;
1494
1495	err = skb_cow_head(skb, 0);
1496	if (err < 0)
1497		return err;
1498
1499	ip.hdr = skb_network_header(skb);
1500	l4.hdr = skb_transport_header(skb);
1501
1502	/* initialize outer IP header fields */
1503	if (ip.v4->version == 4) {
1504		ip.v4->tot_len = 0;
1505		ip.v4->check = 0;
1506	} else {
1507		ip.v6->payload_len = 0;
1508	}
1509
1510	/* determine offset of transport header */
1511	l4_start = l4.hdr - skb->data;
1512
1513	/* remove payload length from checksum */
1514	paylen = skb->len - l4_start;
1515	csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1516
1517	/* compute length of segmentation header */
1518	off->header_len = (l4.tcp->doff * 4) + l4_start;
1519
1520	/* update gso_segs and bytecount */
1521	first->gso_segs = skb_shinfo(skb)->gso_segs;
1522	first->bytecount = (first->gso_segs - 1) * off->header_len;
1523
1524	cd_tso_len = skb->len - off->header_len;
1525	cd_mss = skb_shinfo(skb)->gso_size;
1526
1527	/* record cdesc_qw1 with TSO parameters */
1528	off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX |
1529			 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1530			 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1531			 (cd_mss << ICE_TXD_CTX_QW1_MSS_S);
1532	first->tx_flags |= ICE_TX_FLAGS_TSO;
1533	return 1;
1534}
1535
1536/**
1537 * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1538 * @size: transmit request size in bytes
1539 *
1540 * Due to hardware alignment restrictions (4K alignment), we need to
1541 * assume that we can have no more than 12K of data per descriptor, even
1542 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1543 * Thus, we need to divide by 12K. But division is slow! Instead,
1544 * we decompose the operation into shifts and one relatively cheap
1545 * multiply operation.
1546 *
1547 * To divide by 12K, we first divide by 4K, then divide by 3:
1548 *     To divide by 4K, shift right by 12 bits
1549 *     To divide by 3, multiply by 85, then divide by 256
1550 *     (Divide by 256 is done by shifting right by 8 bits)
1551 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1552 * 3, we'll underestimate near each multiple of 12K. This is actually more
1553 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1554 * segment.  For our purposes this is accurate out to 1M which is orders of
1555 * magnitude greater than our largest possible GSO size.
1556 *
1557 * This would then be implemented as:
1558 *     return (((size >> 12) * 85) >> 8) + 1;
1559 *
1560 * Since multiplication and division are commutative, we can reorder
1561 * operations into:
1562 *     return ((size * 85) >> 20) + 1;
1563 */
1564static unsigned int ice_txd_use_count(unsigned int size)
1565{
1566	return ((size * 85) >> 20) + 1;
1567}
1568
1569/**
1570 * ice_xmit_desc_count - calculate number of tx descriptors needed
1571 * @skb: send buffer
1572 *
1573 * Returns number of data descriptors needed for this skb.
1574 */
1575static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1576{
1577	const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
1578	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1579	unsigned int count = 0, size = skb_headlen(skb);
1580
1581	for (;;) {
1582		count += ice_txd_use_count(size);
1583
1584		if (!nr_frags--)
1585			break;
1586
1587		size = skb_frag_size(frag++);
1588	}
1589
1590	return count;
1591}
1592
1593/**
1594 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1595 * @skb: send buffer
1596 *
1597 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1598 * and so we need to figure out the cases where we need to linearize the skb.
1599 *
1600 * For TSO we need to count the TSO header and segment payload separately.
1601 * As such we need to check cases where we have 7 fragments or more as we
1602 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1603 * the segment payload in the first descriptor, and another 7 for the
1604 * fragments.
1605 */
1606static bool __ice_chk_linearize(struct sk_buff *skb)
1607{
1608	const struct skb_frag_struct *frag, *stale;
1609	int nr_frags, sum;
1610
1611	/* no need to check if number of frags is less than 7 */
1612	nr_frags = skb_shinfo(skb)->nr_frags;
1613	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
1614		return false;
1615
1616	/* We need to walk through the list and validate that each group
1617	 * of 6 fragments totals at least gso_size.
1618	 */
1619	nr_frags -= ICE_MAX_BUF_TXD - 2;
1620	frag = &skb_shinfo(skb)->frags[0];
1621
1622	/* Initialize size to the negative value of gso_size minus 1.  We
1623	 * use this as the worst case scenerio in which the frag ahead
1624	 * of us only provides one byte which is why we are limited to 6
1625	 * descriptors for a single transmit as the header and previous
1626	 * fragment are already consuming 2 descriptors.
1627	 */
1628	sum = 1 - skb_shinfo(skb)->gso_size;
1629
1630	/* Add size of frags 0 through 4 to create our initial sum */
1631	sum += skb_frag_size(frag++);
1632	sum += skb_frag_size(frag++);
1633	sum += skb_frag_size(frag++);
1634	sum += skb_frag_size(frag++);
1635	sum += skb_frag_size(frag++);
1636
1637	/* Walk through fragments adding latest fragment, testing it, and
1638	 * then removing stale fragments from the sum.
1639	 */
1640	stale = &skb_shinfo(skb)->frags[0];
1641	for (;;) {
1642		sum += skb_frag_size(frag++);
1643
1644		/* if sum is negative we failed to make sufficient progress */
1645		if (sum < 0)
1646			return true;
1647
1648		if (!nr_frags--)
1649			break;
1650
1651		sum -= skb_frag_size(stale++);
1652	}
1653
1654	return false;
1655}
1656
1657/**
1658 * ice_chk_linearize - Check if there are more than 8 fragments per packet
1659 * @skb:      send buffer
1660 * @count:    number of buffers used
1661 *
1662 * Note: Our HW can't scatter-gather more than 8 fragments to build
1663 * a packet on the wire and so we need to figure out the cases where we
1664 * need to linearize the skb.
1665 */
1666static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
1667{
1668	/* Both TSO and single send will work if count is less than 8 */
1669	if (likely(count < ICE_MAX_BUF_TXD))
1670		return false;
1671
1672	if (skb_is_gso(skb))
1673		return __ice_chk_linearize(skb);
1674
1675	/* we can support up to 8 data buffers for a single send */
1676	return count != ICE_MAX_BUF_TXD;
1677}
1678
1679/**
1680 * ice_xmit_frame_ring - Sends buffer on Tx ring
1681 * @skb: send buffer
1682 * @tx_ring: ring to send buffer on
1683 *
1684 * Returns NETDEV_TX_OK if sent, else an error code
1685 */
1686static netdev_tx_t
1687ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
1688{
1689	struct ice_tx_offload_params offload = { 0 };
1690	struct ice_tx_buf *first;
1691	unsigned int count;
1692	int tso, csum;
1693
1694	count = ice_xmit_desc_count(skb);
1695	if (ice_chk_linearize(skb, count)) {
1696		if (__skb_linearize(skb))
1697			goto out_drop;
1698		count = ice_txd_use_count(skb->len);
1699		tx_ring->tx_stats.tx_linearize++;
1700	}
1701
1702	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
1703	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
1704	 *       + 4 desc gap to avoid the cache line where head is,
1705	 *       + 1 desc for context descriptor,
1706	 * otherwise try next time
1707	 */
1708	if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1709		tx_ring->tx_stats.tx_busy++;
1710		return NETDEV_TX_BUSY;
1711	}
1712
1713	offload.tx_ring = tx_ring;
1714
1715	/* record the location of the first descriptor for this packet */
1716	first = &tx_ring->tx_buf[tx_ring->next_to_use];
1717	first->skb = skb;
1718	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1719	first->gso_segs = 1;
1720	first->tx_flags = 0;
1721
1722	/* prepare the VLAN tagging flags for Tx */
1723	if (ice_tx_prepare_vlan_flags(tx_ring, first))
1724		goto out_drop;
1725
1726	/* set up TSO offload */
1727	tso = ice_tso(first, &offload);
1728	if (tso < 0)
1729		goto out_drop;
1730
1731	/* always set up Tx checksum offload */
1732	csum = ice_tx_csum(first, &offload);
1733	if (csum < 0)
1734		goto out_drop;
1735
1736	if (tso || offload.cd_tunnel_params) {
1737		struct ice_tx_ctx_desc *cdesc;
1738		int i = tx_ring->next_to_use;
1739
1740		/* grab the next descriptor */
1741		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
1742		i++;
1743		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1744
1745		/* setup context descriptor */
1746		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
1747		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
1748		cdesc->rsvd = cpu_to_le16(0);
1749		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
1750	}
1751
1752	ice_tx_map(tx_ring, first, &offload);
1753	return NETDEV_TX_OK;
1754
1755out_drop:
1756	dev_kfree_skb_any(skb);
1757	return NETDEV_TX_OK;
1758}
1759
1760/**
1761 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
1762 * @skb: send buffer
1763 * @netdev: network interface device structure
1764 *
1765 * Returns NETDEV_TX_OK if sent, else an error code
1766 */
1767netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1768{
1769	struct ice_netdev_priv *np = netdev_priv(netdev);
1770	struct ice_vsi *vsi = np->vsi;
1771	struct ice_ring *tx_ring;
1772
1773	tx_ring = vsi->tx_rings[skb->queue_mapping];
1774
1775	/* hardware can't handle really short frames, hardware padding works
1776	 * beyond this point
1777	 */
1778	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
1779		return NETDEV_TX_OK;
1780
1781	return ice_xmit_frame_ring(skb, tx_ring);
1782}