Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include <linux/bitfield.h>
5#include <linux/prefetch.h>
6
7#include "iavf.h"
8#include "iavf_trace.h"
9#include "iavf_prototype.h"
10
11static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
12 u32 td_tag)
13{
14 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
15 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
16 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
17 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
18 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
19}
20
21#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
22
23/**
24 * iavf_unmap_and_free_tx_resource - Release a Tx buffer
25 * @ring: the ring that owns the buffer
26 * @tx_buffer: the buffer to free
27 **/
28static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
29 struct iavf_tx_buffer *tx_buffer)
30{
31 if (tx_buffer->skb) {
32 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
33 kfree(tx_buffer->raw_buf);
34 else
35 dev_kfree_skb_any(tx_buffer->skb);
36 if (dma_unmap_len(tx_buffer, len))
37 dma_unmap_single(ring->dev,
38 dma_unmap_addr(tx_buffer, dma),
39 dma_unmap_len(tx_buffer, len),
40 DMA_TO_DEVICE);
41 } else if (dma_unmap_len(tx_buffer, len)) {
42 dma_unmap_page(ring->dev,
43 dma_unmap_addr(tx_buffer, dma),
44 dma_unmap_len(tx_buffer, len),
45 DMA_TO_DEVICE);
46 }
47
48 tx_buffer->next_to_watch = NULL;
49 tx_buffer->skb = NULL;
50 dma_unmap_len_set(tx_buffer, len, 0);
51 /* tx_buffer must be completely set up in the transmit path */
52}
53
54/**
55 * iavf_clean_tx_ring - Free any empty Tx buffers
56 * @tx_ring: ring to be cleaned
57 **/
58static void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
59{
60 unsigned long bi_size;
61 u16 i;
62
63 /* ring already cleared, nothing to do */
64 if (!tx_ring->tx_bi)
65 return;
66
67 /* Free all the Tx ring sk_buffs */
68 for (i = 0; i < tx_ring->count; i++)
69 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
70
71 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
72 memset(tx_ring->tx_bi, 0, bi_size);
73
74 /* Zero out the descriptor ring */
75 memset(tx_ring->desc, 0, tx_ring->size);
76
77 tx_ring->next_to_use = 0;
78 tx_ring->next_to_clean = 0;
79
80 if (!tx_ring->netdev)
81 return;
82
83 /* cleanup Tx queue statistics */
84 netdev_tx_reset_queue(txring_txq(tx_ring));
85}
86
87/**
88 * iavf_free_tx_resources - Free Tx resources per queue
89 * @tx_ring: Tx descriptor ring for a specific queue
90 *
91 * Free all transmit software resources
92 **/
93void iavf_free_tx_resources(struct iavf_ring *tx_ring)
94{
95 iavf_clean_tx_ring(tx_ring);
96 kfree(tx_ring->tx_bi);
97 tx_ring->tx_bi = NULL;
98
99 if (tx_ring->desc) {
100 dma_free_coherent(tx_ring->dev, tx_ring->size,
101 tx_ring->desc, tx_ring->dma);
102 tx_ring->desc = NULL;
103 }
104}
105
106/**
107 * iavf_get_tx_pending - how many Tx descriptors not processed
108 * @ring: the ring of descriptors
109 * @in_sw: is tx_pending being checked in SW or HW
110 *
111 * Since there is no access to the ring head register
112 * in XL710, we need to use our local copies
113 **/
114static u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
115{
116 u32 head, tail;
117
118 /* underlying hardware might not allow access and/or always return
119 * 0 for the head/tail registers so just use the cached values
120 */
121 head = ring->next_to_clean;
122 tail = ring->next_to_use;
123
124 if (head != tail)
125 return (head < tail) ?
126 tail - head : (tail + ring->count - head);
127
128 return 0;
129}
130
131/**
132 * iavf_force_wb - Issue SW Interrupt so HW does a wb
133 * @vsi: the VSI we care about
134 * @q_vector: the vector on which to force writeback
135 **/
136static void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
137{
138 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
139 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
140 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
141 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
142 /* allow 00 to be written to the index */;
143
144 wr32(&vsi->back->hw,
145 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
146 val);
147}
148
149/**
150 * iavf_detect_recover_hung - Function to detect and recover hung_queues
151 * @vsi: pointer to vsi struct with tx queues
152 *
153 * VSI has netdev and netdev has TX queues. This function is to check each of
154 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
155 **/
156void iavf_detect_recover_hung(struct iavf_vsi *vsi)
157{
158 struct iavf_ring *tx_ring = NULL;
159 struct net_device *netdev;
160 unsigned int i;
161 int packets;
162
163 if (!vsi)
164 return;
165
166 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
167 return;
168
169 netdev = vsi->netdev;
170 if (!netdev)
171 return;
172
173 if (!netif_carrier_ok(netdev))
174 return;
175
176 for (i = 0; i < vsi->back->num_active_queues; i++) {
177 tx_ring = &vsi->back->tx_rings[i];
178 if (tx_ring && tx_ring->desc) {
179 /* If packet counter has not changed the queue is
180 * likely stalled, so force an interrupt for this
181 * queue.
182 *
183 * prev_pkt_ctr would be negative if there was no
184 * pending work.
185 */
186 packets = tx_ring->stats.packets & INT_MAX;
187 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
188 iavf_force_wb(vsi, tx_ring->q_vector);
189 continue;
190 }
191
192 /* Memory barrier between read of packet count and call
193 * to iavf_get_tx_pending()
194 */
195 smp_rmb();
196 tx_ring->tx_stats.prev_pkt_ctr =
197 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
198 }
199 }
200}
201
202#define WB_STRIDE 4
203
204/**
205 * iavf_clean_tx_irq - Reclaim resources after transmit completes
206 * @vsi: the VSI we care about
207 * @tx_ring: Tx ring to clean
208 * @napi_budget: Used to determine if we are in netpoll
209 *
210 * Returns true if there's any budget left (e.g. the clean is finished)
211 **/
212static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
213 struct iavf_ring *tx_ring, int napi_budget)
214{
215 int i = tx_ring->next_to_clean;
216 struct iavf_tx_buffer *tx_buf;
217 struct iavf_tx_desc *tx_desc;
218 unsigned int total_bytes = 0, total_packets = 0;
219 unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
220
221 tx_buf = &tx_ring->tx_bi[i];
222 tx_desc = IAVF_TX_DESC(tx_ring, i);
223 i -= tx_ring->count;
224
225 do {
226 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
227
228 /* if next_to_watch is not set then there is no work pending */
229 if (!eop_desc)
230 break;
231
232 /* prevent any other reads prior to eop_desc */
233 smp_rmb();
234
235 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
236 /* if the descriptor isn't done, no work yet to do */
237 if (!(eop_desc->cmd_type_offset_bsz &
238 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
239 break;
240
241 /* clear next_to_watch to prevent false hangs */
242 tx_buf->next_to_watch = NULL;
243
244 /* update the statistics for this packet */
245 total_bytes += tx_buf->bytecount;
246 total_packets += tx_buf->gso_segs;
247
248 /* free the skb */
249 napi_consume_skb(tx_buf->skb, napi_budget);
250
251 /* unmap skb header data */
252 dma_unmap_single(tx_ring->dev,
253 dma_unmap_addr(tx_buf, dma),
254 dma_unmap_len(tx_buf, len),
255 DMA_TO_DEVICE);
256
257 /* clear tx_buffer data */
258 tx_buf->skb = NULL;
259 dma_unmap_len_set(tx_buf, len, 0);
260
261 /* unmap remaining buffers */
262 while (tx_desc != eop_desc) {
263 iavf_trace(clean_tx_irq_unmap,
264 tx_ring, tx_desc, tx_buf);
265
266 tx_buf++;
267 tx_desc++;
268 i++;
269 if (unlikely(!i)) {
270 i -= tx_ring->count;
271 tx_buf = tx_ring->tx_bi;
272 tx_desc = IAVF_TX_DESC(tx_ring, 0);
273 }
274
275 /* unmap any remaining paged data */
276 if (dma_unmap_len(tx_buf, len)) {
277 dma_unmap_page(tx_ring->dev,
278 dma_unmap_addr(tx_buf, dma),
279 dma_unmap_len(tx_buf, len),
280 DMA_TO_DEVICE);
281 dma_unmap_len_set(tx_buf, len, 0);
282 }
283 }
284
285 /* move us one more past the eop_desc for start of next pkt */
286 tx_buf++;
287 tx_desc++;
288 i++;
289 if (unlikely(!i)) {
290 i -= tx_ring->count;
291 tx_buf = tx_ring->tx_bi;
292 tx_desc = IAVF_TX_DESC(tx_ring, 0);
293 }
294
295 prefetch(tx_desc);
296
297 /* update budget accounting */
298 budget--;
299 } while (likely(budget));
300
301 i += tx_ring->count;
302 tx_ring->next_to_clean = i;
303 u64_stats_update_begin(&tx_ring->syncp);
304 tx_ring->stats.bytes += total_bytes;
305 tx_ring->stats.packets += total_packets;
306 u64_stats_update_end(&tx_ring->syncp);
307 tx_ring->q_vector->tx.total_bytes += total_bytes;
308 tx_ring->q_vector->tx.total_packets += total_packets;
309
310 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
311 /* check to see if there are < 4 descriptors
312 * waiting to be written back, then kick the hardware to force
313 * them to be written back in case we stay in NAPI.
314 * In this mode on X722 we do not enable Interrupt.
315 */
316 unsigned int j = iavf_get_tx_pending(tx_ring, false);
317
318 if (budget &&
319 ((j / WB_STRIDE) == 0) && (j > 0) &&
320 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
321 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
322 tx_ring->arm_wb = true;
323 }
324
325 /* notify netdev of completed buffers */
326 netdev_tx_completed_queue(txring_txq(tx_ring),
327 total_packets, total_bytes);
328
329#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
330 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
331 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
332 /* Make sure that anybody stopping the queue after this
333 * sees the new next_to_clean.
334 */
335 smp_mb();
336 if (__netif_subqueue_stopped(tx_ring->netdev,
337 tx_ring->queue_index) &&
338 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
339 netif_wake_subqueue(tx_ring->netdev,
340 tx_ring->queue_index);
341 ++tx_ring->tx_stats.restart_queue;
342 }
343 }
344
345 return !!budget;
346}
347
348/**
349 * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
350 * @vsi: the VSI we care about
351 * @q_vector: the vector on which to enable writeback
352 *
353 **/
354static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
355 struct iavf_q_vector *q_vector)
356{
357 u16 flags = q_vector->tx.ring[0].flags;
358 u32 val;
359
360 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
361 return;
362
363 if (q_vector->arm_wb_state)
364 return;
365
366 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
367 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
368
369 wr32(&vsi->back->hw,
370 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
371 q_vector->arm_wb_state = true;
372}
373
374static bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
375 struct iavf_ring_container *rc)
376{
377 return &q_vector->rx == rc;
378}
379
380#define IAVF_AIM_MULTIPLIER_100G 2560
381#define IAVF_AIM_MULTIPLIER_50G 1280
382#define IAVF_AIM_MULTIPLIER_40G 1024
383#define IAVF_AIM_MULTIPLIER_20G 512
384#define IAVF_AIM_MULTIPLIER_10G 256
385#define IAVF_AIM_MULTIPLIER_1G 32
386
387static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
388{
389 switch (speed_mbps) {
390 case SPEED_100000:
391 return IAVF_AIM_MULTIPLIER_100G;
392 case SPEED_50000:
393 return IAVF_AIM_MULTIPLIER_50G;
394 case SPEED_40000:
395 return IAVF_AIM_MULTIPLIER_40G;
396 case SPEED_25000:
397 case SPEED_20000:
398 return IAVF_AIM_MULTIPLIER_20G;
399 case SPEED_10000:
400 default:
401 return IAVF_AIM_MULTIPLIER_10G;
402 case SPEED_1000:
403 case SPEED_100:
404 return IAVF_AIM_MULTIPLIER_1G;
405 }
406}
407
408static unsigned int
409iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
410{
411 switch (speed_virtchnl) {
412 case VIRTCHNL_LINK_SPEED_40GB:
413 return IAVF_AIM_MULTIPLIER_40G;
414 case VIRTCHNL_LINK_SPEED_25GB:
415 case VIRTCHNL_LINK_SPEED_20GB:
416 return IAVF_AIM_MULTIPLIER_20G;
417 case VIRTCHNL_LINK_SPEED_10GB:
418 default:
419 return IAVF_AIM_MULTIPLIER_10G;
420 case VIRTCHNL_LINK_SPEED_1GB:
421 case VIRTCHNL_LINK_SPEED_100MB:
422 return IAVF_AIM_MULTIPLIER_1G;
423 }
424}
425
426static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
427{
428 if (ADV_LINK_SUPPORT(adapter))
429 return IAVF_ITR_ADAPTIVE_MIN_INC *
430 iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
431 else
432 return IAVF_ITR_ADAPTIVE_MIN_INC *
433 iavf_virtchnl_itr_multiplier(adapter->link_speed);
434}
435
436/**
437 * iavf_update_itr - update the dynamic ITR value based on statistics
438 * @q_vector: structure containing interrupt and ring information
439 * @rc: structure containing ring performance data
440 *
441 * Stores a new ITR value based on packets and byte
442 * counts during the last interrupt. The advantage of per interrupt
443 * computation is faster updates and more accurate ITR for the current
444 * traffic pattern. Constants in this function were computed
445 * based on theoretical maximum wire speed and thresholds were set based
446 * on testing data as well as attempting to minimize response time
447 * while increasing bulk throughput.
448 **/
449static void iavf_update_itr(struct iavf_q_vector *q_vector,
450 struct iavf_ring_container *rc)
451{
452 unsigned int avg_wire_size, packets, bytes, itr;
453 unsigned long next_update = jiffies;
454
455 /* If we don't have any rings just leave ourselves set for maximum
456 * possible latency so we take ourselves out of the equation.
457 */
458 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
459 return;
460
461 /* For Rx we want to push the delay up and default to low latency.
462 * for Tx we want to pull the delay down and default to high latency.
463 */
464 itr = iavf_container_is_rx(q_vector, rc) ?
465 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
466 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
467
468 /* If we didn't update within up to 1 - 2 jiffies we can assume
469 * that either packets are coming in so slow there hasn't been
470 * any work, or that there is so much work that NAPI is dealing
471 * with interrupt moderation and we don't need to do anything.
472 */
473 if (time_after(next_update, rc->next_update))
474 goto clear_counts;
475
476 /* If itr_countdown is set it means we programmed an ITR within
477 * the last 4 interrupt cycles. This has a side effect of us
478 * potentially firing an early interrupt. In order to work around
479 * this we need to throw out any data received for a few
480 * interrupts following the update.
481 */
482 if (q_vector->itr_countdown) {
483 itr = rc->target_itr;
484 goto clear_counts;
485 }
486
487 packets = rc->total_packets;
488 bytes = rc->total_bytes;
489
490 if (iavf_container_is_rx(q_vector, rc)) {
491 /* If Rx there are 1 to 4 packets and bytes are less than
492 * 9000 assume insufficient data to use bulk rate limiting
493 * approach unless Tx is already in bulk rate limiting. We
494 * are likely latency driven.
495 */
496 if (packets && packets < 4 && bytes < 9000 &&
497 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
498 itr = IAVF_ITR_ADAPTIVE_LATENCY;
499 goto adjust_by_size;
500 }
501 } else if (packets < 4) {
502 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
503 * bulk mode and we are receiving 4 or fewer packets just
504 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
505 * that the Rx can relax.
506 */
507 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
508 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
509 IAVF_ITR_ADAPTIVE_MAX_USECS)
510 goto clear_counts;
511 } else if (packets > 32) {
512 /* If we have processed over 32 packets in a single interrupt
513 * for Tx assume we need to switch over to "bulk" mode.
514 */
515 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
516 }
517
518 /* We have no packets to actually measure against. This means
519 * either one of the other queues on this vector is active or
520 * we are a Tx queue doing TSO with too high of an interrupt rate.
521 *
522 * Between 4 and 56 we can assume that our current interrupt delay
523 * is only slightly too low. As such we should increase it by a small
524 * fixed amount.
525 */
526 if (packets < 56) {
527 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
528 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
529 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
530 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
531 }
532 goto clear_counts;
533 }
534
535 if (packets <= 256) {
536 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
537 itr &= IAVF_ITR_MASK;
538
539 /* Between 56 and 112 is our "goldilocks" zone where we are
540 * working out "just right". Just report that our current
541 * ITR is good for us.
542 */
543 if (packets <= 112)
544 goto clear_counts;
545
546 /* If packet count is 128 or greater we are likely looking
547 * at a slight overrun of the delay we want. Try halving
548 * our delay to see if that will cut the number of packets
549 * in half per interrupt.
550 */
551 itr /= 2;
552 itr &= IAVF_ITR_MASK;
553 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
554 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
555
556 goto clear_counts;
557 }
558
559 /* The paths below assume we are dealing with a bulk ITR since
560 * number of packets is greater than 256. We are just going to have
561 * to compute a value and try to bring the count under control,
562 * though for smaller packet sizes there isn't much we can do as
563 * NAPI polling will likely be kicking in sooner rather than later.
564 */
565 itr = IAVF_ITR_ADAPTIVE_BULK;
566
567adjust_by_size:
568 /* If packet counts are 256 or greater we can assume we have a gross
569 * overestimation of what the rate should be. Instead of trying to fine
570 * tune it just use the formula below to try and dial in an exact value
571 * give the current packet size of the frame.
572 */
573 avg_wire_size = bytes / packets;
574
575 /* The following is a crude approximation of:
576 * wmem_default / (size + overhead) = desired_pkts_per_int
577 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
578 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
579 *
580 * Assuming wmem_default is 212992 and overhead is 640 bytes per
581 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
582 * formula down to
583 *
584 * (170 * (size + 24)) / (size + 640) = ITR
585 *
586 * We first do some math on the packet size and then finally bitshift
587 * by 8 after rounding up. We also have to account for PCIe link speed
588 * difference as ITR scales based on this.
589 */
590 if (avg_wire_size <= 60) {
591 /* Start at 250k ints/sec */
592 avg_wire_size = 4096;
593 } else if (avg_wire_size <= 380) {
594 /* 250K ints/sec to 60K ints/sec */
595 avg_wire_size *= 40;
596 avg_wire_size += 1696;
597 } else if (avg_wire_size <= 1084) {
598 /* 60K ints/sec to 36K ints/sec */
599 avg_wire_size *= 15;
600 avg_wire_size += 11452;
601 } else if (avg_wire_size <= 1980) {
602 /* 36K ints/sec to 30K ints/sec */
603 avg_wire_size *= 5;
604 avg_wire_size += 22420;
605 } else {
606 /* plateau at a limit of 30K ints/sec */
607 avg_wire_size = 32256;
608 }
609
610 /* If we are in low latency mode halve our delay which doubles the
611 * rate to somewhere between 100K to 16K ints/sec
612 */
613 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
614 avg_wire_size /= 2;
615
616 /* Resultant value is 256 times larger than it needs to be. This
617 * gives us room to adjust the value as needed to either increase
618 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
619 *
620 * Use addition as we have already recorded the new latency flag
621 * for the ITR value.
622 */
623 itr += DIV_ROUND_UP(avg_wire_size,
624 iavf_itr_divisor(q_vector->adapter)) *
625 IAVF_ITR_ADAPTIVE_MIN_INC;
626
627 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
628 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
629 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
630 }
631
632clear_counts:
633 /* write back value */
634 rc->target_itr = itr;
635
636 /* next update should occur within next jiffy */
637 rc->next_update = next_update + 1;
638
639 rc->total_bytes = 0;
640 rc->total_packets = 0;
641}
642
643/**
644 * iavf_setup_tx_descriptors - Allocate the Tx descriptors
645 * @tx_ring: the tx ring to set up
646 *
647 * Return 0 on success, negative on error
648 **/
649int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
650{
651 struct device *dev = tx_ring->dev;
652 int bi_size;
653
654 if (!dev)
655 return -ENOMEM;
656
657 /* warn if we are about to overwrite the pointer */
658 WARN_ON(tx_ring->tx_bi);
659 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
660 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
661 if (!tx_ring->tx_bi)
662 goto err;
663
664 /* round up to nearest 4K */
665 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
666 tx_ring->size = ALIGN(tx_ring->size, 4096);
667 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
668 &tx_ring->dma, GFP_KERNEL);
669 if (!tx_ring->desc) {
670 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
671 tx_ring->size);
672 goto err;
673 }
674
675 tx_ring->next_to_use = 0;
676 tx_ring->next_to_clean = 0;
677 tx_ring->tx_stats.prev_pkt_ctr = -1;
678 return 0;
679
680err:
681 kfree(tx_ring->tx_bi);
682 tx_ring->tx_bi = NULL;
683 return -ENOMEM;
684}
685
686/**
687 * iavf_clean_rx_ring - Free Rx buffers
688 * @rx_ring: ring to be cleaned
689 **/
690static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
691{
692 unsigned long bi_size;
693 u16 i;
694
695 /* ring already cleared, nothing to do */
696 if (!rx_ring->rx_bi)
697 return;
698
699 if (rx_ring->skb) {
700 dev_kfree_skb(rx_ring->skb);
701 rx_ring->skb = NULL;
702 }
703
704 /* Free all the Rx ring sk_buffs */
705 for (i = 0; i < rx_ring->count; i++) {
706 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
707
708 if (!rx_bi->page)
709 continue;
710
711 /* Invalidate cache lines that may have been written to by
712 * device so that we avoid corrupting memory.
713 */
714 dma_sync_single_range_for_cpu(rx_ring->dev,
715 rx_bi->dma,
716 rx_bi->page_offset,
717 rx_ring->rx_buf_len,
718 DMA_FROM_DEVICE);
719
720 /* free resources associated with mapping */
721 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
722 iavf_rx_pg_size(rx_ring),
723 DMA_FROM_DEVICE,
724 IAVF_RX_DMA_ATTR);
725
726 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
727
728 rx_bi->page = NULL;
729 rx_bi->page_offset = 0;
730 }
731
732 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
733 memset(rx_ring->rx_bi, 0, bi_size);
734
735 /* Zero out the descriptor ring */
736 memset(rx_ring->desc, 0, rx_ring->size);
737
738 rx_ring->next_to_alloc = 0;
739 rx_ring->next_to_clean = 0;
740 rx_ring->next_to_use = 0;
741}
742
743/**
744 * iavf_free_rx_resources - Free Rx resources
745 * @rx_ring: ring to clean the resources from
746 *
747 * Free all receive software resources
748 **/
749void iavf_free_rx_resources(struct iavf_ring *rx_ring)
750{
751 iavf_clean_rx_ring(rx_ring);
752 kfree(rx_ring->rx_bi);
753 rx_ring->rx_bi = NULL;
754
755 if (rx_ring->desc) {
756 dma_free_coherent(rx_ring->dev, rx_ring->size,
757 rx_ring->desc, rx_ring->dma);
758 rx_ring->desc = NULL;
759 }
760}
761
762/**
763 * iavf_setup_rx_descriptors - Allocate Rx descriptors
764 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
765 *
766 * Returns 0 on success, negative on failure
767 **/
768int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
769{
770 struct device *dev = rx_ring->dev;
771 int bi_size;
772
773 /* warn if we are about to overwrite the pointer */
774 WARN_ON(rx_ring->rx_bi);
775 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
776 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
777 if (!rx_ring->rx_bi)
778 goto err;
779
780 u64_stats_init(&rx_ring->syncp);
781
782 /* Round up to nearest 4K */
783 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
784 rx_ring->size = ALIGN(rx_ring->size, 4096);
785 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
786 &rx_ring->dma, GFP_KERNEL);
787
788 if (!rx_ring->desc) {
789 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
790 rx_ring->size);
791 goto err;
792 }
793
794 rx_ring->next_to_alloc = 0;
795 rx_ring->next_to_clean = 0;
796 rx_ring->next_to_use = 0;
797
798 return 0;
799err:
800 kfree(rx_ring->rx_bi);
801 rx_ring->rx_bi = NULL;
802 return -ENOMEM;
803}
804
805/**
806 * iavf_release_rx_desc - Store the new tail and head values
807 * @rx_ring: ring to bump
808 * @val: new head index
809 **/
810static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
811{
812 rx_ring->next_to_use = val;
813
814 /* update next to alloc since we have filled the ring */
815 rx_ring->next_to_alloc = val;
816
817 /* Force memory writes to complete before letting h/w
818 * know there are new descriptors to fetch. (Only
819 * applicable for weak-ordered memory model archs,
820 * such as IA-64).
821 */
822 wmb();
823 writel(val, rx_ring->tail);
824}
825
826/**
827 * iavf_rx_offset - Return expected offset into page to access data
828 * @rx_ring: Ring we are requesting offset of
829 *
830 * Returns the offset value for ring into the data buffer.
831 */
832static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
833{
834 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
835}
836
837/**
838 * iavf_alloc_mapped_page - recycle or make a new page
839 * @rx_ring: ring to use
840 * @bi: rx_buffer struct to modify
841 *
842 * Returns true if the page was successfully allocated or
843 * reused.
844 **/
845static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
846 struct iavf_rx_buffer *bi)
847{
848 struct page *page = bi->page;
849 dma_addr_t dma;
850
851 /* since we are recycling buffers we should seldom need to alloc */
852 if (likely(page)) {
853 rx_ring->rx_stats.page_reuse_count++;
854 return true;
855 }
856
857 /* alloc new page for storage */
858 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
859 if (unlikely(!page)) {
860 rx_ring->rx_stats.alloc_page_failed++;
861 return false;
862 }
863
864 /* map page for use */
865 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
866 iavf_rx_pg_size(rx_ring),
867 DMA_FROM_DEVICE,
868 IAVF_RX_DMA_ATTR);
869
870 /* if mapping failed free memory back to system since
871 * there isn't much point in holding memory we can't use
872 */
873 if (dma_mapping_error(rx_ring->dev, dma)) {
874 __free_pages(page, iavf_rx_pg_order(rx_ring));
875 rx_ring->rx_stats.alloc_page_failed++;
876 return false;
877 }
878
879 bi->dma = dma;
880 bi->page = page;
881 bi->page_offset = iavf_rx_offset(rx_ring);
882
883 /* initialize pagecnt_bias to 1 representing we fully own page */
884 bi->pagecnt_bias = 1;
885
886 return true;
887}
888
889/**
890 * iavf_receive_skb - Send a completed packet up the stack
891 * @rx_ring: rx ring in play
892 * @skb: packet to send up
893 * @vlan_tag: vlan tag for packet
894 **/
895static void iavf_receive_skb(struct iavf_ring *rx_ring,
896 struct sk_buff *skb, u16 vlan_tag)
897{
898 struct iavf_q_vector *q_vector = rx_ring->q_vector;
899
900 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
901 (vlan_tag & VLAN_VID_MASK))
902 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
903 else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
904 vlan_tag & VLAN_VID_MASK)
905 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
906
907 napi_gro_receive(&q_vector->napi, skb);
908}
909
910/**
911 * iavf_alloc_rx_buffers - Replace used receive buffers
912 * @rx_ring: ring to place buffers on
913 * @cleaned_count: number of buffers to replace
914 *
915 * Returns false if all allocations were successful, true if any fail
916 **/
917bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
918{
919 u16 ntu = rx_ring->next_to_use;
920 union iavf_rx_desc *rx_desc;
921 struct iavf_rx_buffer *bi;
922
923 /* do nothing if no valid netdev defined */
924 if (!rx_ring->netdev || !cleaned_count)
925 return false;
926
927 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
928 bi = &rx_ring->rx_bi[ntu];
929
930 do {
931 if (!iavf_alloc_mapped_page(rx_ring, bi))
932 goto no_buffers;
933
934 /* sync the buffer for use by the device */
935 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
936 bi->page_offset,
937 rx_ring->rx_buf_len,
938 DMA_FROM_DEVICE);
939
940 /* Refresh the desc even if buffer_addrs didn't change
941 * because each write-back erases this info.
942 */
943 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
944
945 rx_desc++;
946 bi++;
947 ntu++;
948 if (unlikely(ntu == rx_ring->count)) {
949 rx_desc = IAVF_RX_DESC(rx_ring, 0);
950 bi = rx_ring->rx_bi;
951 ntu = 0;
952 }
953
954 /* clear the status bits for the next_to_use descriptor */
955 rx_desc->wb.qword1.status_error_len = 0;
956
957 cleaned_count--;
958 } while (cleaned_count);
959
960 if (rx_ring->next_to_use != ntu)
961 iavf_release_rx_desc(rx_ring, ntu);
962
963 return false;
964
965no_buffers:
966 if (rx_ring->next_to_use != ntu)
967 iavf_release_rx_desc(rx_ring, ntu);
968
969 /* make sure to come back via polling to try again after
970 * allocation failure
971 */
972 return true;
973}
974
975/**
976 * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
977 * @vsi: the VSI we care about
978 * @skb: skb currently being received and modified
979 * @rx_desc: the receive descriptor
980 **/
981static void iavf_rx_checksum(struct iavf_vsi *vsi,
982 struct sk_buff *skb,
983 union iavf_rx_desc *rx_desc)
984{
985 struct iavf_rx_ptype_decoded decoded;
986 u32 rx_error, rx_status;
987 bool ipv4, ipv6;
988 u8 ptype;
989 u64 qword;
990
991 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
992 ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
993 rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
994 rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
995 decoded = decode_rx_desc_ptype(ptype);
996
997 skb->ip_summed = CHECKSUM_NONE;
998
999 skb_checksum_none_assert(skb);
1000
1001 /* Rx csum enabled and ip headers found? */
1002 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1003 return;
1004
1005 /* did the hardware decode the packet and checksum? */
1006 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
1007 return;
1008
1009 /* both known and outer_ip must be set for the below code to work */
1010 if (!(decoded.known && decoded.outer_ip))
1011 return;
1012
1013 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
1014 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
1015 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
1016 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
1017
1018 if (ipv4 &&
1019 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
1020 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
1021 goto checksum_fail;
1022
1023 /* likely incorrect csum if alternate IP extension headers found */
1024 if (ipv6 &&
1025 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1026 /* don't increment checksum err here, non-fatal err */
1027 return;
1028
1029 /* there was some L4 error, count error and punt packet to the stack */
1030 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
1031 goto checksum_fail;
1032
1033 /* handle packets that were not able to be checksummed due
1034 * to arrival speed, in this case the stack can compute
1035 * the csum.
1036 */
1037 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1038 return;
1039
1040 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1041 switch (decoded.inner_prot) {
1042 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1043 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1044 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1045 skb->ip_summed = CHECKSUM_UNNECESSARY;
1046 fallthrough;
1047 default:
1048 break;
1049 }
1050
1051 return;
1052
1053checksum_fail:
1054 vsi->back->hw_csum_rx_error++;
1055}
1056
1057/**
1058 * iavf_ptype_to_htype - get a hash type
1059 * @ptype: the ptype value from the descriptor
1060 *
1061 * Returns a hash type to be used by skb_set_hash
1062 **/
1063static int iavf_ptype_to_htype(u8 ptype)
1064{
1065 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1066
1067 if (!decoded.known)
1068 return PKT_HASH_TYPE_NONE;
1069
1070 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1071 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1072 return PKT_HASH_TYPE_L4;
1073 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1074 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1075 return PKT_HASH_TYPE_L3;
1076 else
1077 return PKT_HASH_TYPE_L2;
1078}
1079
1080/**
1081 * iavf_rx_hash - set the hash value in the skb
1082 * @ring: descriptor ring
1083 * @rx_desc: specific descriptor
1084 * @skb: skb currently being received and modified
1085 * @rx_ptype: Rx packet type
1086 **/
1087static void iavf_rx_hash(struct iavf_ring *ring,
1088 union iavf_rx_desc *rx_desc,
1089 struct sk_buff *skb,
1090 u8 rx_ptype)
1091{
1092 u32 hash;
1093 const __le64 rss_mask =
1094 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1095 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1096
1097 if (!(ring->netdev->features & NETIF_F_RXHASH))
1098 return;
1099
1100 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1101 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1102 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1103 }
1104}
1105
1106/**
1107 * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1108 * @rx_ring: rx descriptor ring packet is being transacted on
1109 * @rx_desc: pointer to the EOP Rx descriptor
1110 * @skb: pointer to current skb being populated
1111 * @rx_ptype: the packet type decoded by hardware
1112 *
1113 * This function checks the ring, descriptor, and packet information in
1114 * order to populate the hash, checksum, VLAN, protocol, and
1115 * other fields within the skb.
1116 **/
1117static void
1118iavf_process_skb_fields(struct iavf_ring *rx_ring,
1119 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1120 u8 rx_ptype)
1121{
1122 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1123
1124 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1125
1126 skb_record_rx_queue(skb, rx_ring->queue_index);
1127
1128 /* modifies the skb - consumes the enet header */
1129 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1130}
1131
1132/**
1133 * iavf_cleanup_headers - Correct empty headers
1134 * @rx_ring: rx descriptor ring packet is being transacted on
1135 * @skb: pointer to current skb being fixed
1136 *
1137 * Also address the case where we are pulling data in on pages only
1138 * and as such no data is present in the skb header.
1139 *
1140 * In addition if skb is not at least 60 bytes we need to pad it so that
1141 * it is large enough to qualify as a valid Ethernet frame.
1142 *
1143 * Returns true if an error was encountered and skb was freed.
1144 **/
1145static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1146{
1147 /* if eth_skb_pad returns an error the skb was freed */
1148 if (eth_skb_pad(skb))
1149 return true;
1150
1151 return false;
1152}
1153
1154/**
1155 * iavf_reuse_rx_page - page flip buffer and store it back on the ring
1156 * @rx_ring: rx descriptor ring to store buffers on
1157 * @old_buff: donor buffer to have page reused
1158 *
1159 * Synchronizes page for reuse by the adapter
1160 **/
1161static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1162 struct iavf_rx_buffer *old_buff)
1163{
1164 struct iavf_rx_buffer *new_buff;
1165 u16 nta = rx_ring->next_to_alloc;
1166
1167 new_buff = &rx_ring->rx_bi[nta];
1168
1169 /* update, and store next to alloc */
1170 nta++;
1171 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1172
1173 /* transfer page from old buffer to new buffer */
1174 new_buff->dma = old_buff->dma;
1175 new_buff->page = old_buff->page;
1176 new_buff->page_offset = old_buff->page_offset;
1177 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1178}
1179
1180/**
1181 * iavf_can_reuse_rx_page - Determine if this page can be reused by
1182 * the adapter for another receive
1183 *
1184 * @rx_buffer: buffer containing the page
1185 *
1186 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1187 * an unused region in the page.
1188 *
1189 * For small pages, @truesize will be a constant value, half the size
1190 * of the memory at page. We'll attempt to alternate between high and
1191 * low halves of the page, with one half ready for use by the hardware
1192 * and the other half being consumed by the stack. We use the page
1193 * ref count to determine whether the stack has finished consuming the
1194 * portion of this page that was passed up with a previous packet. If
1195 * the page ref count is >1, we'll assume the "other" half page is
1196 * still busy, and this page cannot be reused.
1197 *
1198 * For larger pages, @truesize will be the actual space used by the
1199 * received packet (adjusted upward to an even multiple of the cache
1200 * line size). This will advance through the page by the amount
1201 * actually consumed by the received packets while there is still
1202 * space for a buffer. Each region of larger pages will be used at
1203 * most once, after which the page will not be reused.
1204 *
1205 * In either case, if the page is reusable its refcount is increased.
1206 **/
1207static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1208{
1209 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1210 struct page *page = rx_buffer->page;
1211
1212 /* Is any reuse possible? */
1213 if (!dev_page_is_reusable(page))
1214 return false;
1215
1216#if (PAGE_SIZE < 8192)
1217 /* if we are only owner of page we can reuse it */
1218 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1219 return false;
1220#else
1221#define IAVF_LAST_OFFSET \
1222 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1223 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1224 return false;
1225#endif
1226
1227 /* If we have drained the page fragment pool we need to update
1228 * the pagecnt_bias and page count so that we fully restock the
1229 * number of references the driver holds.
1230 */
1231 if (unlikely(!pagecnt_bias)) {
1232 page_ref_add(page, USHRT_MAX);
1233 rx_buffer->pagecnt_bias = USHRT_MAX;
1234 }
1235
1236 return true;
1237}
1238
1239/**
1240 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1241 * @rx_ring: rx descriptor ring to transact packets on
1242 * @rx_buffer: buffer containing page to add
1243 * @skb: sk_buff to place the data into
1244 * @size: packet length from rx_desc
1245 *
1246 * This function will add the data contained in rx_buffer->page to the skb.
1247 * It will just attach the page as a frag to the skb.
1248 *
1249 * The function will then update the page offset.
1250 **/
1251static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1252 struct iavf_rx_buffer *rx_buffer,
1253 struct sk_buff *skb,
1254 unsigned int size)
1255{
1256#if (PAGE_SIZE < 8192)
1257 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1258#else
1259 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1260#endif
1261
1262 if (!size)
1263 return;
1264
1265 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1266 rx_buffer->page_offset, size, truesize);
1267
1268 /* page is being used so we must update the page offset */
1269#if (PAGE_SIZE < 8192)
1270 rx_buffer->page_offset ^= truesize;
1271#else
1272 rx_buffer->page_offset += truesize;
1273#endif
1274}
1275
1276/**
1277 * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1278 * @rx_ring: rx descriptor ring to transact packets on
1279 * @size: size of buffer to add to skb
1280 *
1281 * This function will pull an Rx buffer from the ring and synchronize it
1282 * for use by the CPU.
1283 */
1284static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1285 const unsigned int size)
1286{
1287 struct iavf_rx_buffer *rx_buffer;
1288
1289 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1290 prefetchw(rx_buffer->page);
1291 if (!size)
1292 return rx_buffer;
1293
1294 /* we are reusing so sync this buffer for CPU use */
1295 dma_sync_single_range_for_cpu(rx_ring->dev,
1296 rx_buffer->dma,
1297 rx_buffer->page_offset,
1298 size,
1299 DMA_FROM_DEVICE);
1300
1301 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1302 rx_buffer->pagecnt_bias--;
1303
1304 return rx_buffer;
1305}
1306
1307/**
1308 * iavf_construct_skb - Allocate skb and populate it
1309 * @rx_ring: rx descriptor ring to transact packets on
1310 * @rx_buffer: rx buffer to pull data from
1311 * @size: size of buffer to add to skb
1312 *
1313 * This function allocates an skb. It then populates it with the page
1314 * data from the current receive descriptor, taking care to set up the
1315 * skb correctly.
1316 */
1317static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1318 struct iavf_rx_buffer *rx_buffer,
1319 unsigned int size)
1320{
1321 void *va;
1322#if (PAGE_SIZE < 8192)
1323 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1324#else
1325 unsigned int truesize = SKB_DATA_ALIGN(size);
1326#endif
1327 unsigned int headlen;
1328 struct sk_buff *skb;
1329
1330 if (!rx_buffer)
1331 return NULL;
1332 /* prefetch first cache line of first page */
1333 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1334 net_prefetch(va);
1335
1336 /* allocate a skb to store the frags */
1337 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1338 IAVF_RX_HDR_SIZE,
1339 GFP_ATOMIC | __GFP_NOWARN);
1340 if (unlikely(!skb))
1341 return NULL;
1342
1343 /* Determine available headroom for copy */
1344 headlen = size;
1345 if (headlen > IAVF_RX_HDR_SIZE)
1346 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1347
1348 /* align pull length to size of long to optimize memcpy performance */
1349 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1350
1351 /* update all of the pointers */
1352 size -= headlen;
1353 if (size) {
1354 skb_add_rx_frag(skb, 0, rx_buffer->page,
1355 rx_buffer->page_offset + headlen,
1356 size, truesize);
1357
1358 /* buffer is used by skb, update page_offset */
1359#if (PAGE_SIZE < 8192)
1360 rx_buffer->page_offset ^= truesize;
1361#else
1362 rx_buffer->page_offset += truesize;
1363#endif
1364 } else {
1365 /* buffer is unused, reset bias back to rx_buffer */
1366 rx_buffer->pagecnt_bias++;
1367 }
1368
1369 return skb;
1370}
1371
1372/**
1373 * iavf_build_skb - Build skb around an existing buffer
1374 * @rx_ring: Rx descriptor ring to transact packets on
1375 * @rx_buffer: Rx buffer to pull data from
1376 * @size: size of buffer to add to skb
1377 *
1378 * This function builds an skb around an existing Rx buffer, taking care
1379 * to set up the skb correctly and avoid any memcpy overhead.
1380 */
1381static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1382 struct iavf_rx_buffer *rx_buffer,
1383 unsigned int size)
1384{
1385 void *va;
1386#if (PAGE_SIZE < 8192)
1387 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1388#else
1389 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1390 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1391#endif
1392 struct sk_buff *skb;
1393
1394 if (!rx_buffer || !size)
1395 return NULL;
1396 /* prefetch first cache line of first page */
1397 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1398 net_prefetch(va);
1399
1400 /* build an skb around the page buffer */
1401 skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
1402 if (unlikely(!skb))
1403 return NULL;
1404
1405 /* update pointers within the skb to store the data */
1406 skb_reserve(skb, IAVF_SKB_PAD);
1407 __skb_put(skb, size);
1408
1409 /* buffer is used by skb, update page_offset */
1410#if (PAGE_SIZE < 8192)
1411 rx_buffer->page_offset ^= truesize;
1412#else
1413 rx_buffer->page_offset += truesize;
1414#endif
1415
1416 return skb;
1417}
1418
1419/**
1420 * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
1421 * @rx_ring: rx descriptor ring to transact packets on
1422 * @rx_buffer: rx buffer to pull data from
1423 *
1424 * This function will clean up the contents of the rx_buffer. It will
1425 * either recycle the buffer or unmap it and free the associated resources.
1426 */
1427static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1428 struct iavf_rx_buffer *rx_buffer)
1429{
1430 if (!rx_buffer)
1431 return;
1432
1433 if (iavf_can_reuse_rx_page(rx_buffer)) {
1434 /* hand second half of page back to the ring */
1435 iavf_reuse_rx_page(rx_ring, rx_buffer);
1436 rx_ring->rx_stats.page_reuse_count++;
1437 } else {
1438 /* we are not reusing the buffer so unmap it */
1439 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1440 iavf_rx_pg_size(rx_ring),
1441 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1442 __page_frag_cache_drain(rx_buffer->page,
1443 rx_buffer->pagecnt_bias);
1444 }
1445
1446 /* clear contents of buffer_info */
1447 rx_buffer->page = NULL;
1448}
1449
1450/**
1451 * iavf_is_non_eop - process handling of non-EOP buffers
1452 * @rx_ring: Rx ring being processed
1453 * @rx_desc: Rx descriptor for current buffer
1454 * @skb: Current socket buffer containing buffer in progress
1455 *
1456 * This function updates next to clean. If the buffer is an EOP buffer
1457 * this function exits returning false, otherwise it will place the
1458 * sk_buff in the next buffer to be chained and return true indicating
1459 * that this is in fact a non-EOP buffer.
1460 **/
1461static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1462 union iavf_rx_desc *rx_desc,
1463 struct sk_buff *skb)
1464{
1465 u32 ntc = rx_ring->next_to_clean + 1;
1466
1467 /* fetch, update, and store next to clean */
1468 ntc = (ntc < rx_ring->count) ? ntc : 0;
1469 rx_ring->next_to_clean = ntc;
1470
1471 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1472
1473 /* if we are the last buffer then there is nothing else to do */
1474#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1475 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1476 return false;
1477
1478 rx_ring->rx_stats.non_eop_descs++;
1479
1480 return true;
1481}
1482
1483/**
1484 * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1485 * @rx_ring: rx descriptor ring to transact packets on
1486 * @budget: Total limit on number of packets to process
1487 *
1488 * This function provides a "bounce buffer" approach to Rx interrupt
1489 * processing. The advantage to this is that on systems that have
1490 * expensive overhead for IOMMU access this provides a means of avoiding
1491 * it by maintaining the mapping of the page to the system.
1492 *
1493 * Returns amount of work completed
1494 **/
1495static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1496{
1497 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1498 struct sk_buff *skb = rx_ring->skb;
1499 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1500 bool failure = false;
1501
1502 while (likely(total_rx_packets < (unsigned int)budget)) {
1503 struct iavf_rx_buffer *rx_buffer;
1504 union iavf_rx_desc *rx_desc;
1505 unsigned int size;
1506 u16 vlan_tag = 0;
1507 u8 rx_ptype;
1508 u64 qword;
1509
1510 /* return some buffers to hardware, one at a time is too slow */
1511 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1512 failure = failure ||
1513 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1514 cleaned_count = 0;
1515 }
1516
1517 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1518
1519 /* status_error_len will always be zero for unused descriptors
1520 * because it's cleared in cleanup, and overlaps with hdr_addr
1521 * which is always zero because packet split isn't used, if the
1522 * hardware wrote DD then the length will be non-zero
1523 */
1524 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1525
1526 /* This memory barrier is needed to keep us from reading
1527 * any other fields out of the rx_desc until we have
1528 * verified the descriptor has been written back.
1529 */
1530 dma_rmb();
1531#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1532 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1533 break;
1534
1535 size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
1536
1537 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1538 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1539
1540 /* retrieve a buffer from the ring */
1541 if (skb)
1542 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1543 else if (ring_uses_build_skb(rx_ring))
1544 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1545 else
1546 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1547
1548 /* exit if we failed to retrieve a buffer */
1549 if (!skb) {
1550 rx_ring->rx_stats.alloc_buff_failed++;
1551 if (rx_buffer && size)
1552 rx_buffer->pagecnt_bias++;
1553 break;
1554 }
1555
1556 iavf_put_rx_buffer(rx_ring, rx_buffer);
1557 cleaned_count++;
1558
1559 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1560 continue;
1561
1562 /* ERR_MASK will only have valid bits if EOP set, and
1563 * what we are doing here is actually checking
1564 * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1565 * the error field
1566 */
1567 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1568 dev_kfree_skb_any(skb);
1569 skb = NULL;
1570 continue;
1571 }
1572
1573 if (iavf_cleanup_headers(rx_ring, skb)) {
1574 skb = NULL;
1575 continue;
1576 }
1577
1578 /* probably a little skewed due to removing CRC */
1579 total_rx_bytes += skb->len;
1580
1581 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1582 rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
1583
1584 /* populate checksum, VLAN, and protocol */
1585 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1586
1587 if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
1588 rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
1589 vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
1590 if (rx_desc->wb.qword2.ext_status &
1591 cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
1592 rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
1593 vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
1594
1595 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1596 iavf_receive_skb(rx_ring, skb, vlan_tag);
1597 skb = NULL;
1598
1599 /* update budget accounting */
1600 total_rx_packets++;
1601 }
1602
1603 rx_ring->skb = skb;
1604
1605 u64_stats_update_begin(&rx_ring->syncp);
1606 rx_ring->stats.packets += total_rx_packets;
1607 rx_ring->stats.bytes += total_rx_bytes;
1608 u64_stats_update_end(&rx_ring->syncp);
1609 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1610 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1611
1612 /* guarantee a trip back through this routine if there was a failure */
1613 return failure ? budget : (int)total_rx_packets;
1614}
1615
1616static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1617{
1618 u32 val;
1619
1620 /* We don't bother with setting the CLEARPBA bit as the data sheet
1621 * points out doing so is "meaningless since it was already
1622 * auto-cleared". The auto-clearing happens when the interrupt is
1623 * asserted.
1624 *
1625 * Hardware errata 28 for also indicates that writing to a
1626 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
1627 * an event in the PBA anyway so we need to rely on the automask
1628 * to hold pending events for us until the interrupt is re-enabled
1629 *
1630 * The itr value is reported in microseconds, and the register
1631 * value is recorded in 2 microsecond units. For this reason we
1632 * only need to shift by the interval shift - 1 instead of the
1633 * full value.
1634 */
1635 itr &= IAVF_ITR_MASK;
1636
1637 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1638 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1639 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1640
1641 return val;
1642}
1643
1644/* a small macro to shorten up some long lines */
1645#define INTREG IAVF_VFINT_DYN_CTLN1
1646
1647/* The act of updating the ITR will cause it to immediately trigger. In order
1648 * to prevent this from throwing off adaptive update statistics we defer the
1649 * update so that it can only happen so often. So after either Tx or Rx are
1650 * updated we make the adaptive scheme wait until either the ITR completely
1651 * expires via the next_update expiration or we have been through at least
1652 * 3 interrupts.
1653 */
1654#define ITR_COUNTDOWN_START 3
1655
1656/**
1657 * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1658 * @vsi: the VSI we care about
1659 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1660 *
1661 **/
1662static void iavf_update_enable_itr(struct iavf_vsi *vsi,
1663 struct iavf_q_vector *q_vector)
1664{
1665 struct iavf_hw *hw = &vsi->back->hw;
1666 u32 intval;
1667
1668 /* These will do nothing if dynamic updates are not enabled */
1669 iavf_update_itr(q_vector, &q_vector->tx);
1670 iavf_update_itr(q_vector, &q_vector->rx);
1671
1672 /* This block of logic allows us to get away with only updating
1673 * one ITR value with each interrupt. The idea is to perform a
1674 * pseudo-lazy update with the following criteria.
1675 *
1676 * 1. Rx is given higher priority than Tx if both are in same state
1677 * 2. If we must reduce an ITR that is given highest priority.
1678 * 3. We then give priority to increasing ITR based on amount.
1679 */
1680 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1681 /* Rx ITR needs to be reduced, this is highest priority */
1682 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1683 q_vector->rx.target_itr);
1684 q_vector->rx.current_itr = q_vector->rx.target_itr;
1685 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1686 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1687 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1688 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1689 /* Tx ITR needs to be reduced, this is second priority
1690 * Tx ITR needs to be increased more than Rx, fourth priority
1691 */
1692 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1693 q_vector->tx.target_itr);
1694 q_vector->tx.current_itr = q_vector->tx.target_itr;
1695 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1696 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1697 /* Rx ITR needs to be increased, third priority */
1698 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1699 q_vector->rx.target_itr);
1700 q_vector->rx.current_itr = q_vector->rx.target_itr;
1701 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1702 } else {
1703 /* No ITR update, lowest priority */
1704 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1705 if (q_vector->itr_countdown)
1706 q_vector->itr_countdown--;
1707 }
1708
1709 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1710 wr32(hw, INTREG(q_vector->reg_idx), intval);
1711}
1712
1713/**
1714 * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
1715 * @napi: napi struct with our devices info in it
1716 * @budget: amount of work driver is allowed to do this pass, in packets
1717 *
1718 * This function will clean all queues associated with a q_vector.
1719 *
1720 * Returns the amount of work done
1721 **/
1722int iavf_napi_poll(struct napi_struct *napi, int budget)
1723{
1724 struct iavf_q_vector *q_vector =
1725 container_of(napi, struct iavf_q_vector, napi);
1726 struct iavf_vsi *vsi = q_vector->vsi;
1727 struct iavf_ring *ring;
1728 bool clean_complete = true;
1729 bool arm_wb = false;
1730 int budget_per_ring;
1731 int work_done = 0;
1732
1733 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1734 napi_complete(napi);
1735 return 0;
1736 }
1737
1738 /* Since the actual Tx work is minimal, we can give the Tx a larger
1739 * budget and be more aggressive about cleaning up the Tx descriptors.
1740 */
1741 iavf_for_each_ring(ring, q_vector->tx) {
1742 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1743 clean_complete = false;
1744 continue;
1745 }
1746 arm_wb |= ring->arm_wb;
1747 ring->arm_wb = false;
1748 }
1749
1750 /* Handle case where we are called by netpoll with a budget of 0 */
1751 if (budget <= 0)
1752 goto tx_only;
1753
1754 /* We attempt to distribute budget to each Rx queue fairly, but don't
1755 * allow the budget to go below 1 because that would exit polling early.
1756 */
1757 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1758
1759 iavf_for_each_ring(ring, q_vector->rx) {
1760 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1761
1762 work_done += cleaned;
1763 /* if we clean as many as budgeted, we must not be done */
1764 if (cleaned >= budget_per_ring)
1765 clean_complete = false;
1766 }
1767
1768 /* If work not completed, return budget and polling will return */
1769 if (!clean_complete) {
1770 int cpu_id = smp_processor_id();
1771
1772 /* It is possible that the interrupt affinity has changed but,
1773 * if the cpu is pegged at 100%, polling will never exit while
1774 * traffic continues and the interrupt will be stuck on this
1775 * cpu. We check to make sure affinity is correct before we
1776 * continue to poll, otherwise we must stop polling so the
1777 * interrupt can move to the correct cpu.
1778 */
1779 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1780 /* Tell napi that we are done polling */
1781 napi_complete_done(napi, work_done);
1782
1783 /* Force an interrupt */
1784 iavf_force_wb(vsi, q_vector);
1785
1786 /* Return budget-1 so that polling stops */
1787 return budget - 1;
1788 }
1789tx_only:
1790 if (arm_wb) {
1791 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1792 iavf_enable_wb_on_itr(vsi, q_vector);
1793 }
1794 return budget;
1795 }
1796
1797 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1798 q_vector->arm_wb_state = false;
1799
1800 /* Exit the polling mode, but don't re-enable interrupts if stack might
1801 * poll us due to busy-polling
1802 */
1803 if (likely(napi_complete_done(napi, work_done)))
1804 iavf_update_enable_itr(vsi, q_vector);
1805
1806 return min_t(int, work_done, budget - 1);
1807}
1808
1809/**
1810 * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1811 * @skb: send buffer
1812 * @tx_ring: ring to send buffer on
1813 * @flags: the tx flags to be set
1814 *
1815 * Checks the skb and set up correspondingly several generic transmit flags
1816 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1817 *
1818 * Returns error code indicate the frame should be dropped upon error and the
1819 * otherwise returns 0 to indicate the flags has been set properly.
1820 **/
1821static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1822 struct iavf_ring *tx_ring, u32 *flags)
1823{
1824 u32 tx_flags = 0;
1825
1826
1827 /* stack will only request hardware VLAN insertion offload for protocols
1828 * that the driver supports and has enabled
1829 */
1830 if (!skb_vlan_tag_present(skb))
1831 return;
1832
1833 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1834 if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
1835 tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1836 } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
1837 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1838 } else {
1839 dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
1840 return;
1841 }
1842
1843 *flags = tx_flags;
1844}
1845
1846/**
1847 * iavf_tso - set up the tso context descriptor
1848 * @first: pointer to first Tx buffer for xmit
1849 * @hdr_len: ptr to the size of the packet header
1850 * @cd_type_cmd_tso_mss: Quad Word 1
1851 *
1852 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1853 **/
1854static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1855 u64 *cd_type_cmd_tso_mss)
1856{
1857 struct sk_buff *skb = first->skb;
1858 u64 cd_cmd, cd_tso_len, cd_mss;
1859 union {
1860 struct iphdr *v4;
1861 struct ipv6hdr *v6;
1862 unsigned char *hdr;
1863 } ip;
1864 union {
1865 struct tcphdr *tcp;
1866 struct udphdr *udp;
1867 unsigned char *hdr;
1868 } l4;
1869 u32 paylen, l4_offset;
1870 u16 gso_segs, gso_size;
1871 int err;
1872
1873 if (skb->ip_summed != CHECKSUM_PARTIAL)
1874 return 0;
1875
1876 if (!skb_is_gso(skb))
1877 return 0;
1878
1879 err = skb_cow_head(skb, 0);
1880 if (err < 0)
1881 return err;
1882
1883 ip.hdr = skb_network_header(skb);
1884 l4.hdr = skb_transport_header(skb);
1885
1886 /* initialize outer IP header fields */
1887 if (ip.v4->version == 4) {
1888 ip.v4->tot_len = 0;
1889 ip.v4->check = 0;
1890 } else {
1891 ip.v6->payload_len = 0;
1892 }
1893
1894 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1895 SKB_GSO_GRE_CSUM |
1896 SKB_GSO_IPXIP4 |
1897 SKB_GSO_IPXIP6 |
1898 SKB_GSO_UDP_TUNNEL |
1899 SKB_GSO_UDP_TUNNEL_CSUM)) {
1900 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1901 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1902 l4.udp->len = 0;
1903
1904 /* determine offset of outer transport header */
1905 l4_offset = l4.hdr - skb->data;
1906
1907 /* remove payload length from outer checksum */
1908 paylen = skb->len - l4_offset;
1909 csum_replace_by_diff(&l4.udp->check,
1910 (__force __wsum)htonl(paylen));
1911 }
1912
1913 /* reset pointers to inner headers */
1914 ip.hdr = skb_inner_network_header(skb);
1915 l4.hdr = skb_inner_transport_header(skb);
1916
1917 /* initialize inner IP header fields */
1918 if (ip.v4->version == 4) {
1919 ip.v4->tot_len = 0;
1920 ip.v4->check = 0;
1921 } else {
1922 ip.v6->payload_len = 0;
1923 }
1924 }
1925
1926 /* determine offset of inner transport header */
1927 l4_offset = l4.hdr - skb->data;
1928 /* remove payload length from inner checksum */
1929 paylen = skb->len - l4_offset;
1930
1931 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1932 csum_replace_by_diff(&l4.udp->check,
1933 (__force __wsum)htonl(paylen));
1934 /* compute length of UDP segmentation header */
1935 *hdr_len = (u8)sizeof(l4.udp) + l4_offset;
1936 } else {
1937 csum_replace_by_diff(&l4.tcp->check,
1938 (__force __wsum)htonl(paylen));
1939 /* compute length of TCP segmentation header */
1940 *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
1941 }
1942
1943 /* pull values out of skb_shinfo */
1944 gso_size = skb_shinfo(skb)->gso_size;
1945 gso_segs = skb_shinfo(skb)->gso_segs;
1946
1947 /* update GSO size and bytecount with header size */
1948 first->gso_segs = gso_segs;
1949 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1950
1951 /* find the field values */
1952 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1953 cd_tso_len = skb->len - *hdr_len;
1954 cd_mss = gso_size;
1955 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1956 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1957 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1958 return 1;
1959}
1960
1961/**
1962 * iavf_tx_enable_csum - Enable Tx checksum offloads
1963 * @skb: send buffer
1964 * @tx_flags: pointer to Tx flags currently set
1965 * @td_cmd: Tx descriptor command bits to set
1966 * @td_offset: Tx descriptor header offsets to set
1967 * @tx_ring: Tx descriptor ring
1968 * @cd_tunneling: ptr to context desc bits
1969 **/
1970static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1971 u32 *td_cmd, u32 *td_offset,
1972 struct iavf_ring *tx_ring,
1973 u32 *cd_tunneling)
1974{
1975 union {
1976 struct iphdr *v4;
1977 struct ipv6hdr *v6;
1978 unsigned char *hdr;
1979 } ip;
1980 union {
1981 struct tcphdr *tcp;
1982 struct udphdr *udp;
1983 unsigned char *hdr;
1984 } l4;
1985 unsigned char *exthdr;
1986 u32 offset, cmd = 0;
1987 __be16 frag_off;
1988 u8 l4_proto = 0;
1989
1990 if (skb->ip_summed != CHECKSUM_PARTIAL)
1991 return 0;
1992
1993 ip.hdr = skb_network_header(skb);
1994 l4.hdr = skb_transport_header(skb);
1995
1996 /* compute outer L2 header size */
1997 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1998
1999 if (skb->encapsulation) {
2000 u32 tunnel = 0;
2001 /* define outer network header type */
2002 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2003 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2004 IAVF_TX_CTX_EXT_IP_IPV4 :
2005 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2006
2007 l4_proto = ip.v4->protocol;
2008 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2009 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
2010
2011 exthdr = ip.hdr + sizeof(*ip.v6);
2012 l4_proto = ip.v6->nexthdr;
2013 if (l4.hdr != exthdr)
2014 ipv6_skip_exthdr(skb, exthdr - skb->data,
2015 &l4_proto, &frag_off);
2016 }
2017
2018 /* define outer transport */
2019 switch (l4_proto) {
2020 case IPPROTO_UDP:
2021 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2022 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2023 break;
2024 case IPPROTO_GRE:
2025 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2026 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2027 break;
2028 case IPPROTO_IPIP:
2029 case IPPROTO_IPV6:
2030 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2031 l4.hdr = skb_inner_network_header(skb);
2032 break;
2033 default:
2034 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2035 return -1;
2036
2037 skb_checksum_help(skb);
2038 return 0;
2039 }
2040
2041 /* compute outer L3 header size */
2042 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2043 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2044
2045 /* switch IP header pointer from outer to inner header */
2046 ip.hdr = skb_inner_network_header(skb);
2047
2048 /* compute tunnel header size */
2049 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2050 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2051
2052 /* indicate if we need to offload outer UDP header */
2053 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2054 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2055 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2056 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2057
2058 /* record tunnel offload values */
2059 *cd_tunneling |= tunnel;
2060
2061 /* switch L4 header pointer from outer to inner */
2062 l4.hdr = skb_inner_transport_header(skb);
2063 l4_proto = 0;
2064
2065 /* reset type as we transition from outer to inner headers */
2066 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2067 if (ip.v4->version == 4)
2068 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2069 if (ip.v6->version == 6)
2070 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2071 }
2072
2073 /* Enable IP checksum offloads */
2074 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2075 l4_proto = ip.v4->protocol;
2076 /* the stack computes the IP header already, the only time we
2077 * need the hardware to recompute it is in the case of TSO.
2078 */
2079 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2080 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2081 IAVF_TX_DESC_CMD_IIPT_IPV4;
2082 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2083 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2084
2085 exthdr = ip.hdr + sizeof(*ip.v6);
2086 l4_proto = ip.v6->nexthdr;
2087 if (l4.hdr != exthdr)
2088 ipv6_skip_exthdr(skb, exthdr - skb->data,
2089 &l4_proto, &frag_off);
2090 }
2091
2092 /* compute inner L3 header size */
2093 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2094
2095 /* Enable L4 checksum offloads */
2096 switch (l4_proto) {
2097 case IPPROTO_TCP:
2098 /* enable checksum offloads */
2099 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2100 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2101 break;
2102 case IPPROTO_SCTP:
2103 /* enable SCTP checksum offload */
2104 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2105 offset |= (sizeof(struct sctphdr) >> 2) <<
2106 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2107 break;
2108 case IPPROTO_UDP:
2109 /* enable UDP checksum offload */
2110 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2111 offset |= (sizeof(struct udphdr) >> 2) <<
2112 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2113 break;
2114 default:
2115 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2116 return -1;
2117 skb_checksum_help(skb);
2118 return 0;
2119 }
2120
2121 *td_cmd |= cmd;
2122 *td_offset |= offset;
2123
2124 return 1;
2125}
2126
2127/**
2128 * iavf_create_tx_ctx - Build the Tx context descriptor
2129 * @tx_ring: ring to create the descriptor on
2130 * @cd_type_cmd_tso_mss: Quad Word 1
2131 * @cd_tunneling: Quad Word 0 - bits 0-31
2132 * @cd_l2tag2: Quad Word 0 - bits 32-63
2133 **/
2134static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2135 const u64 cd_type_cmd_tso_mss,
2136 const u32 cd_tunneling, const u32 cd_l2tag2)
2137{
2138 struct iavf_tx_context_desc *context_desc;
2139 int i = tx_ring->next_to_use;
2140
2141 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2142 !cd_tunneling && !cd_l2tag2)
2143 return;
2144
2145 /* grab the next descriptor */
2146 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2147
2148 i++;
2149 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2150
2151 /* cpu_to_le32 and assign to struct fields */
2152 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2153 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2154 context_desc->rsvd = cpu_to_le16(0);
2155 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2156}
2157
2158/**
2159 * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
2160 * @skb: send buffer
2161 *
2162 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2163 * and so we need to figure out the cases where we need to linearize the skb.
2164 *
2165 * For TSO we need to count the TSO header and segment payload separately.
2166 * As such we need to check cases where we have 7 fragments or more as we
2167 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2168 * the segment payload in the first descriptor, and another 7 for the
2169 * fragments.
2170 **/
2171bool __iavf_chk_linearize(struct sk_buff *skb)
2172{
2173 const skb_frag_t *frag, *stale;
2174 int nr_frags, sum;
2175
2176 /* no need to check if number of frags is less than 7 */
2177 nr_frags = skb_shinfo(skb)->nr_frags;
2178 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2179 return false;
2180
2181 /* We need to walk through the list and validate that each group
2182 * of 6 fragments totals at least gso_size.
2183 */
2184 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2185 frag = &skb_shinfo(skb)->frags[0];
2186
2187 /* Initialize size to the negative value of gso_size minus 1. We
2188 * use this as the worst case scenerio in which the frag ahead
2189 * of us only provides one byte which is why we are limited to 6
2190 * descriptors for a single transmit as the header and previous
2191 * fragment are already consuming 2 descriptors.
2192 */
2193 sum = 1 - skb_shinfo(skb)->gso_size;
2194
2195 /* Add size of frags 0 through 4 to create our initial sum */
2196 sum += skb_frag_size(frag++);
2197 sum += skb_frag_size(frag++);
2198 sum += skb_frag_size(frag++);
2199 sum += skb_frag_size(frag++);
2200 sum += skb_frag_size(frag++);
2201
2202 /* Walk through fragments adding latest fragment, testing it, and
2203 * then removing stale fragments from the sum.
2204 */
2205 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2206 int stale_size = skb_frag_size(stale);
2207
2208 sum += skb_frag_size(frag++);
2209
2210 /* The stale fragment may present us with a smaller
2211 * descriptor than the actual fragment size. To account
2212 * for that we need to remove all the data on the front and
2213 * figure out what the remainder would be in the last
2214 * descriptor associated with the fragment.
2215 */
2216 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2217 int align_pad = -(skb_frag_off(stale)) &
2218 (IAVF_MAX_READ_REQ_SIZE - 1);
2219
2220 sum -= align_pad;
2221 stale_size -= align_pad;
2222
2223 do {
2224 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2225 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2226 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2227 }
2228
2229 /* if sum is negative we failed to make sufficient progress */
2230 if (sum < 0)
2231 return true;
2232
2233 if (!nr_frags--)
2234 break;
2235
2236 sum -= stale_size;
2237 }
2238
2239 return false;
2240}
2241
2242/**
2243 * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
2244 * @tx_ring: the ring to be checked
2245 * @size: the size buffer we want to assure is available
2246 *
2247 * Returns -EBUSY if a stop is needed, else 0
2248 **/
2249int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2250{
2251 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2252 /* Memory barrier before checking head and tail */
2253 smp_mb();
2254
2255 /* Check again in a case another CPU has just made room available. */
2256 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2257 return -EBUSY;
2258
2259 /* A reprieve! - use start_queue because it doesn't call schedule */
2260 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2261 ++tx_ring->tx_stats.restart_queue;
2262 return 0;
2263}
2264
2265/**
2266 * iavf_tx_map - Build the Tx descriptor
2267 * @tx_ring: ring to send buffer on
2268 * @skb: send buffer
2269 * @first: first buffer info buffer to use
2270 * @tx_flags: collected send information
2271 * @hdr_len: size of the packet header
2272 * @td_cmd: the command field in the descriptor
2273 * @td_offset: offset for checksum or crc
2274 **/
2275static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2276 struct iavf_tx_buffer *first, u32 tx_flags,
2277 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2278{
2279 unsigned int data_len = skb->data_len;
2280 unsigned int size = skb_headlen(skb);
2281 skb_frag_t *frag;
2282 struct iavf_tx_buffer *tx_bi;
2283 struct iavf_tx_desc *tx_desc;
2284 u16 i = tx_ring->next_to_use;
2285 u32 td_tag = 0;
2286 dma_addr_t dma;
2287
2288 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2289 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2290 td_tag = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
2291 }
2292
2293 first->tx_flags = tx_flags;
2294
2295 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2296
2297 tx_desc = IAVF_TX_DESC(tx_ring, i);
2298 tx_bi = first;
2299
2300 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2301 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2302
2303 if (dma_mapping_error(tx_ring->dev, dma))
2304 goto dma_error;
2305
2306 /* record length, and DMA address */
2307 dma_unmap_len_set(tx_bi, len, size);
2308 dma_unmap_addr_set(tx_bi, dma, dma);
2309
2310 /* align size to end of page */
2311 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2312 tx_desc->buffer_addr = cpu_to_le64(dma);
2313
2314 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2315 tx_desc->cmd_type_offset_bsz =
2316 build_ctob(td_cmd, td_offset,
2317 max_data, td_tag);
2318
2319 tx_desc++;
2320 i++;
2321
2322 if (i == tx_ring->count) {
2323 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2324 i = 0;
2325 }
2326
2327 dma += max_data;
2328 size -= max_data;
2329
2330 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2331 tx_desc->buffer_addr = cpu_to_le64(dma);
2332 }
2333
2334 if (likely(!data_len))
2335 break;
2336
2337 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2338 size, td_tag);
2339
2340 tx_desc++;
2341 i++;
2342
2343 if (i == tx_ring->count) {
2344 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2345 i = 0;
2346 }
2347
2348 size = skb_frag_size(frag);
2349 data_len -= size;
2350
2351 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2352 DMA_TO_DEVICE);
2353
2354 tx_bi = &tx_ring->tx_bi[i];
2355 }
2356
2357 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2358
2359 i++;
2360 if (i == tx_ring->count)
2361 i = 0;
2362
2363 tx_ring->next_to_use = i;
2364
2365 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2366
2367 /* write last descriptor with RS and EOP bits */
2368 td_cmd |= IAVF_TXD_CMD;
2369 tx_desc->cmd_type_offset_bsz =
2370 build_ctob(td_cmd, td_offset, size, td_tag);
2371
2372 skb_tx_timestamp(skb);
2373
2374 /* Force memory writes to complete before letting h/w know there
2375 * are new descriptors to fetch.
2376 *
2377 * We also use this memory barrier to make certain all of the
2378 * status bits have been updated before next_to_watch is written.
2379 */
2380 wmb();
2381
2382 /* set next_to_watch value indicating a packet is present */
2383 first->next_to_watch = tx_desc;
2384
2385 /* notify HW of packet */
2386 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2387 writel(i, tx_ring->tail);
2388 }
2389
2390 return;
2391
2392dma_error:
2393 dev_info(tx_ring->dev, "TX DMA map failed\n");
2394
2395 /* clear dma mappings for failed tx_bi map */
2396 for (;;) {
2397 tx_bi = &tx_ring->tx_bi[i];
2398 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2399 if (tx_bi == first)
2400 break;
2401 if (i == 0)
2402 i = tx_ring->count;
2403 i--;
2404 }
2405
2406 tx_ring->next_to_use = i;
2407}
2408
2409/**
2410 * iavf_xmit_frame_ring - Sends buffer on Tx ring
2411 * @skb: send buffer
2412 * @tx_ring: ring to send buffer on
2413 *
2414 * Returns NETDEV_TX_OK if sent, else an error code
2415 **/
2416static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2417 struct iavf_ring *tx_ring)
2418{
2419 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2420 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2421 struct iavf_tx_buffer *first;
2422 u32 td_offset = 0;
2423 u32 tx_flags = 0;
2424 __be16 protocol;
2425 u32 td_cmd = 0;
2426 u8 hdr_len = 0;
2427 int tso, count;
2428
2429 /* prefetch the data, we'll need it later */
2430 prefetch(skb->data);
2431
2432 iavf_trace(xmit_frame_ring, skb, tx_ring);
2433
2434 count = iavf_xmit_descriptor_count(skb);
2435 if (iavf_chk_linearize(skb, count)) {
2436 if (__skb_linearize(skb)) {
2437 dev_kfree_skb_any(skb);
2438 return NETDEV_TX_OK;
2439 }
2440 count = iavf_txd_use_count(skb->len);
2441 tx_ring->tx_stats.tx_linearize++;
2442 }
2443
2444 /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
2445 * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
2446 * + 4 desc gap to avoid the cache line where head is,
2447 * + 1 desc for context descriptor,
2448 * otherwise try next time
2449 */
2450 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2451 tx_ring->tx_stats.tx_busy++;
2452 return NETDEV_TX_BUSY;
2453 }
2454
2455 /* record the location of the first descriptor for this packet */
2456 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2457 first->skb = skb;
2458 first->bytecount = skb->len;
2459 first->gso_segs = 1;
2460
2461 /* prepare the xmit flags */
2462 iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
2463 if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2464 cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
2465 IAVF_TXD_CTX_QW1_CMD_SHIFT;
2466 cd_l2tag2 = FIELD_GET(IAVF_TX_FLAGS_VLAN_MASK, tx_flags);
2467 }
2468
2469 /* obtain protocol of skb */
2470 protocol = vlan_get_protocol(skb);
2471
2472 /* setup IPv4/IPv6 offloads */
2473 if (protocol == htons(ETH_P_IP))
2474 tx_flags |= IAVF_TX_FLAGS_IPV4;
2475 else if (protocol == htons(ETH_P_IPV6))
2476 tx_flags |= IAVF_TX_FLAGS_IPV6;
2477
2478 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2479
2480 if (tso < 0)
2481 goto out_drop;
2482 else if (tso)
2483 tx_flags |= IAVF_TX_FLAGS_TSO;
2484
2485 /* Always offload the checksum, since it's in the data descriptor */
2486 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2487 tx_ring, &cd_tunneling);
2488 if (tso < 0)
2489 goto out_drop;
2490
2491 /* always enable CRC insertion offload */
2492 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2493
2494 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2495 cd_tunneling, cd_l2tag2);
2496
2497 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2498 td_cmd, td_offset);
2499
2500 return NETDEV_TX_OK;
2501
2502out_drop:
2503 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2504 dev_kfree_skb_any(first->skb);
2505 first->skb = NULL;
2506 return NETDEV_TX_OK;
2507}
2508
2509/**
2510 * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2511 * @skb: send buffer
2512 * @netdev: network interface device structure
2513 *
2514 * Returns NETDEV_TX_OK if sent, else an error code
2515 **/
2516netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2517{
2518 struct iavf_adapter *adapter = netdev_priv(netdev);
2519 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2520
2521 /* hardware can't handle really short frames, hardware padding works
2522 * beyond this point
2523 */
2524 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2525 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2526 return NETDEV_TX_OK;
2527 skb->len = IAVF_MIN_TX_LEN;
2528 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2529 }
2530
2531 return iavf_xmit_frame_ring(skb, tx_ring);
2532}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include <linux/prefetch.h>
5
6#include "iavf.h"
7#include "iavf_trace.h"
8#include "iavf_prototype.h"
9
10static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
11 u32 td_tag)
12{
13 return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
14 ((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
15 ((u64)td_offset << IAVF_TXD_QW1_OFFSET_SHIFT) |
16 ((u64)size << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT) |
17 ((u64)td_tag << IAVF_TXD_QW1_L2TAG1_SHIFT));
18}
19
20#define IAVF_TXD_CMD (IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS)
21
22/**
23 * iavf_unmap_and_free_tx_resource - Release a Tx buffer
24 * @ring: the ring that owns the buffer
25 * @tx_buffer: the buffer to free
26 **/
27static void iavf_unmap_and_free_tx_resource(struct iavf_ring *ring,
28 struct iavf_tx_buffer *tx_buffer)
29{
30 if (tx_buffer->skb) {
31 if (tx_buffer->tx_flags & IAVF_TX_FLAGS_FD_SB)
32 kfree(tx_buffer->raw_buf);
33 else
34 dev_kfree_skb_any(tx_buffer->skb);
35 if (dma_unmap_len(tx_buffer, len))
36 dma_unmap_single(ring->dev,
37 dma_unmap_addr(tx_buffer, dma),
38 dma_unmap_len(tx_buffer, len),
39 DMA_TO_DEVICE);
40 } else if (dma_unmap_len(tx_buffer, len)) {
41 dma_unmap_page(ring->dev,
42 dma_unmap_addr(tx_buffer, dma),
43 dma_unmap_len(tx_buffer, len),
44 DMA_TO_DEVICE);
45 }
46
47 tx_buffer->next_to_watch = NULL;
48 tx_buffer->skb = NULL;
49 dma_unmap_len_set(tx_buffer, len, 0);
50 /* tx_buffer must be completely set up in the transmit path */
51}
52
53/**
54 * iavf_clean_tx_ring - Free any empty Tx buffers
55 * @tx_ring: ring to be cleaned
56 **/
57void iavf_clean_tx_ring(struct iavf_ring *tx_ring)
58{
59 unsigned long bi_size;
60 u16 i;
61
62 /* ring already cleared, nothing to do */
63 if (!tx_ring->tx_bi)
64 return;
65
66 /* Free all the Tx ring sk_buffs */
67 for (i = 0; i < tx_ring->count; i++)
68 iavf_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
69
70 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
71 memset(tx_ring->tx_bi, 0, bi_size);
72
73 /* Zero out the descriptor ring */
74 memset(tx_ring->desc, 0, tx_ring->size);
75
76 tx_ring->next_to_use = 0;
77 tx_ring->next_to_clean = 0;
78
79 if (!tx_ring->netdev)
80 return;
81
82 /* cleanup Tx queue statistics */
83 netdev_tx_reset_queue(txring_txq(tx_ring));
84}
85
86/**
87 * iavf_free_tx_resources - Free Tx resources per queue
88 * @tx_ring: Tx descriptor ring for a specific queue
89 *
90 * Free all transmit software resources
91 **/
92void iavf_free_tx_resources(struct iavf_ring *tx_ring)
93{
94 iavf_clean_tx_ring(tx_ring);
95 kfree(tx_ring->tx_bi);
96 tx_ring->tx_bi = NULL;
97
98 if (tx_ring->desc) {
99 dma_free_coherent(tx_ring->dev, tx_ring->size,
100 tx_ring->desc, tx_ring->dma);
101 tx_ring->desc = NULL;
102 }
103}
104
105/**
106 * iavf_get_tx_pending - how many Tx descriptors not processed
107 * @ring: the ring of descriptors
108 * @in_sw: is tx_pending being checked in SW or HW
109 *
110 * Since there is no access to the ring head register
111 * in XL710, we need to use our local copies
112 **/
113u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
114{
115 u32 head, tail;
116
117 head = ring->next_to_clean;
118 tail = readl(ring->tail);
119
120 if (head != tail)
121 return (head < tail) ?
122 tail - head : (tail + ring->count - head);
123
124 return 0;
125}
126
127/**
128 * iavf_detect_recover_hung - Function to detect and recover hung_queues
129 * @vsi: pointer to vsi struct with tx queues
130 *
131 * VSI has netdev and netdev has TX queues. This function is to check each of
132 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
133 **/
134void iavf_detect_recover_hung(struct iavf_vsi *vsi)
135{
136 struct iavf_ring *tx_ring = NULL;
137 struct net_device *netdev;
138 unsigned int i;
139 int packets;
140
141 if (!vsi)
142 return;
143
144 if (test_bit(__IAVF_VSI_DOWN, vsi->state))
145 return;
146
147 netdev = vsi->netdev;
148 if (!netdev)
149 return;
150
151 if (!netif_carrier_ok(netdev))
152 return;
153
154 for (i = 0; i < vsi->back->num_active_queues; i++) {
155 tx_ring = &vsi->back->tx_rings[i];
156 if (tx_ring && tx_ring->desc) {
157 /* If packet counter has not changed the queue is
158 * likely stalled, so force an interrupt for this
159 * queue.
160 *
161 * prev_pkt_ctr would be negative if there was no
162 * pending work.
163 */
164 packets = tx_ring->stats.packets & INT_MAX;
165 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
166 iavf_force_wb(vsi, tx_ring->q_vector);
167 continue;
168 }
169
170 /* Memory barrier between read of packet count and call
171 * to iavf_get_tx_pending()
172 */
173 smp_rmb();
174 tx_ring->tx_stats.prev_pkt_ctr =
175 iavf_get_tx_pending(tx_ring, true) ? packets : -1;
176 }
177 }
178}
179
180#define WB_STRIDE 4
181
182/**
183 * iavf_clean_tx_irq - Reclaim resources after transmit completes
184 * @vsi: the VSI we care about
185 * @tx_ring: Tx ring to clean
186 * @napi_budget: Used to determine if we are in netpoll
187 *
188 * Returns true if there's any budget left (e.g. the clean is finished)
189 **/
190static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
191 struct iavf_ring *tx_ring, int napi_budget)
192{
193 int i = tx_ring->next_to_clean;
194 struct iavf_tx_buffer *tx_buf;
195 struct iavf_tx_desc *tx_desc;
196 unsigned int total_bytes = 0, total_packets = 0;
197 unsigned int budget = vsi->work_limit;
198
199 tx_buf = &tx_ring->tx_bi[i];
200 tx_desc = IAVF_TX_DESC(tx_ring, i);
201 i -= tx_ring->count;
202
203 do {
204 struct iavf_tx_desc *eop_desc = tx_buf->next_to_watch;
205
206 /* if next_to_watch is not set then there is no work pending */
207 if (!eop_desc)
208 break;
209
210 /* prevent any other reads prior to eop_desc */
211 smp_rmb();
212
213 iavf_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
214 /* if the descriptor isn't done, no work yet to do */
215 if (!(eop_desc->cmd_type_offset_bsz &
216 cpu_to_le64(IAVF_TX_DESC_DTYPE_DESC_DONE)))
217 break;
218
219 /* clear next_to_watch to prevent false hangs */
220 tx_buf->next_to_watch = NULL;
221
222 /* update the statistics for this packet */
223 total_bytes += tx_buf->bytecount;
224 total_packets += tx_buf->gso_segs;
225
226 /* free the skb */
227 napi_consume_skb(tx_buf->skb, napi_budget);
228
229 /* unmap skb header data */
230 dma_unmap_single(tx_ring->dev,
231 dma_unmap_addr(tx_buf, dma),
232 dma_unmap_len(tx_buf, len),
233 DMA_TO_DEVICE);
234
235 /* clear tx_buffer data */
236 tx_buf->skb = NULL;
237 dma_unmap_len_set(tx_buf, len, 0);
238
239 /* unmap remaining buffers */
240 while (tx_desc != eop_desc) {
241 iavf_trace(clean_tx_irq_unmap,
242 tx_ring, tx_desc, tx_buf);
243
244 tx_buf++;
245 tx_desc++;
246 i++;
247 if (unlikely(!i)) {
248 i -= tx_ring->count;
249 tx_buf = tx_ring->tx_bi;
250 tx_desc = IAVF_TX_DESC(tx_ring, 0);
251 }
252
253 /* unmap any remaining paged data */
254 if (dma_unmap_len(tx_buf, len)) {
255 dma_unmap_page(tx_ring->dev,
256 dma_unmap_addr(tx_buf, dma),
257 dma_unmap_len(tx_buf, len),
258 DMA_TO_DEVICE);
259 dma_unmap_len_set(tx_buf, len, 0);
260 }
261 }
262
263 /* move us one more past the eop_desc for start of next pkt */
264 tx_buf++;
265 tx_desc++;
266 i++;
267 if (unlikely(!i)) {
268 i -= tx_ring->count;
269 tx_buf = tx_ring->tx_bi;
270 tx_desc = IAVF_TX_DESC(tx_ring, 0);
271 }
272
273 prefetch(tx_desc);
274
275 /* update budget accounting */
276 budget--;
277 } while (likely(budget));
278
279 i += tx_ring->count;
280 tx_ring->next_to_clean = i;
281 u64_stats_update_begin(&tx_ring->syncp);
282 tx_ring->stats.bytes += total_bytes;
283 tx_ring->stats.packets += total_packets;
284 u64_stats_update_end(&tx_ring->syncp);
285 tx_ring->q_vector->tx.total_bytes += total_bytes;
286 tx_ring->q_vector->tx.total_packets += total_packets;
287
288 if (tx_ring->flags & IAVF_TXR_FLAGS_WB_ON_ITR) {
289 /* check to see if there are < 4 descriptors
290 * waiting to be written back, then kick the hardware to force
291 * them to be written back in case we stay in NAPI.
292 * In this mode on X722 we do not enable Interrupt.
293 */
294 unsigned int j = iavf_get_tx_pending(tx_ring, false);
295
296 if (budget &&
297 ((j / WB_STRIDE) == 0) && (j > 0) &&
298 !test_bit(__IAVF_VSI_DOWN, vsi->state) &&
299 (IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
300 tx_ring->arm_wb = true;
301 }
302
303 /* notify netdev of completed buffers */
304 netdev_tx_completed_queue(txring_txq(tx_ring),
305 total_packets, total_bytes);
306
307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
309 (IAVF_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310 /* Make sure that anybody stopping the queue after this
311 * sees the new next_to_clean.
312 */
313 smp_mb();
314 if (__netif_subqueue_stopped(tx_ring->netdev,
315 tx_ring->queue_index) &&
316 !test_bit(__IAVF_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
318 tx_ring->queue_index);
319 ++tx_ring->tx_stats.restart_queue;
320 }
321 }
322
323 return !!budget;
324}
325
326/**
327 * iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
328 * @vsi: the VSI we care about
329 * @q_vector: the vector on which to enable writeback
330 *
331 **/
332static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
333 struct iavf_q_vector *q_vector)
334{
335 u16 flags = q_vector->tx.ring[0].flags;
336 u32 val;
337
338 if (!(flags & IAVF_TXR_FLAGS_WB_ON_ITR))
339 return;
340
341 if (q_vector->arm_wb_state)
342 return;
343
344 val = IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
345 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
346
347 wr32(&vsi->back->hw,
348 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx), val);
349 q_vector->arm_wb_state = true;
350}
351
352/**
353 * iavf_force_wb - Issue SW Interrupt so HW does a wb
354 * @vsi: the VSI we care about
355 * @q_vector: the vector on which to force writeback
356 *
357 **/
358void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector)
359{
360 u32 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
361 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
362 IAVF_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
363 IAVF_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
364 /* allow 00 to be written to the index */;
365
366 wr32(&vsi->back->hw,
367 IAVF_VFINT_DYN_CTLN1(q_vector->reg_idx),
368 val);
369}
370
371static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
372 struct iavf_ring_container *rc)
373{
374 return &q_vector->rx == rc;
375}
376
377static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
378{
379 unsigned int divisor;
380
381 switch (q_vector->adapter->link_speed) {
382 case VIRTCHNL_LINK_SPEED_40GB:
383 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
384 break;
385 case VIRTCHNL_LINK_SPEED_25GB:
386 case VIRTCHNL_LINK_SPEED_20GB:
387 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
388 break;
389 default:
390 case VIRTCHNL_LINK_SPEED_10GB:
391 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
392 break;
393 case VIRTCHNL_LINK_SPEED_1GB:
394 case VIRTCHNL_LINK_SPEED_100MB:
395 divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
396 break;
397 }
398
399 return divisor;
400}
401
402/**
403 * iavf_update_itr - update the dynamic ITR value based on statistics
404 * @q_vector: structure containing interrupt and ring information
405 * @rc: structure containing ring performance data
406 *
407 * Stores a new ITR value based on packets and byte
408 * counts during the last interrupt. The advantage of per interrupt
409 * computation is faster updates and more accurate ITR for the current
410 * traffic pattern. Constants in this function were computed
411 * based on theoretical maximum wire speed and thresholds were set based
412 * on testing data as well as attempting to minimize response time
413 * while increasing bulk throughput.
414 **/
415static void iavf_update_itr(struct iavf_q_vector *q_vector,
416 struct iavf_ring_container *rc)
417{
418 unsigned int avg_wire_size, packets, bytes, itr;
419 unsigned long next_update = jiffies;
420
421 /* If we don't have any rings just leave ourselves set for maximum
422 * possible latency so we take ourselves out of the equation.
423 */
424 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
425 return;
426
427 /* For Rx we want to push the delay up and default to low latency.
428 * for Tx we want to pull the delay down and default to high latency.
429 */
430 itr = iavf_container_is_rx(q_vector, rc) ?
431 IAVF_ITR_ADAPTIVE_MIN_USECS | IAVF_ITR_ADAPTIVE_LATENCY :
432 IAVF_ITR_ADAPTIVE_MAX_USECS | IAVF_ITR_ADAPTIVE_LATENCY;
433
434 /* If we didn't update within up to 1 - 2 jiffies we can assume
435 * that either packets are coming in so slow there hasn't been
436 * any work, or that there is so much work that NAPI is dealing
437 * with interrupt moderation and we don't need to do anything.
438 */
439 if (time_after(next_update, rc->next_update))
440 goto clear_counts;
441
442 /* If itr_countdown is set it means we programmed an ITR within
443 * the last 4 interrupt cycles. This has a side effect of us
444 * potentially firing an early interrupt. In order to work around
445 * this we need to throw out any data received for a few
446 * interrupts following the update.
447 */
448 if (q_vector->itr_countdown) {
449 itr = rc->target_itr;
450 goto clear_counts;
451 }
452
453 packets = rc->total_packets;
454 bytes = rc->total_bytes;
455
456 if (iavf_container_is_rx(q_vector, rc)) {
457 /* If Rx there are 1 to 4 packets and bytes are less than
458 * 9000 assume insufficient data to use bulk rate limiting
459 * approach unless Tx is already in bulk rate limiting. We
460 * are likely latency driven.
461 */
462 if (packets && packets < 4 && bytes < 9000 &&
463 (q_vector->tx.target_itr & IAVF_ITR_ADAPTIVE_LATENCY)) {
464 itr = IAVF_ITR_ADAPTIVE_LATENCY;
465 goto adjust_by_size;
466 }
467 } else if (packets < 4) {
468 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
469 * bulk mode and we are receiving 4 or fewer packets just
470 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
471 * that the Rx can relax.
472 */
473 if (rc->target_itr == IAVF_ITR_ADAPTIVE_MAX_USECS &&
474 (q_vector->rx.target_itr & IAVF_ITR_MASK) ==
475 IAVF_ITR_ADAPTIVE_MAX_USECS)
476 goto clear_counts;
477 } else if (packets > 32) {
478 /* If we have processed over 32 packets in a single interrupt
479 * for Tx assume we need to switch over to "bulk" mode.
480 */
481 rc->target_itr &= ~IAVF_ITR_ADAPTIVE_LATENCY;
482 }
483
484 /* We have no packets to actually measure against. This means
485 * either one of the other queues on this vector is active or
486 * we are a Tx queue doing TSO with too high of an interrupt rate.
487 *
488 * Between 4 and 56 we can assume that our current interrupt delay
489 * is only slightly too low. As such we should increase it by a small
490 * fixed amount.
491 */
492 if (packets < 56) {
493 itr = rc->target_itr + IAVF_ITR_ADAPTIVE_MIN_INC;
494 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
495 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
496 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
497 }
498 goto clear_counts;
499 }
500
501 if (packets <= 256) {
502 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
503 itr &= IAVF_ITR_MASK;
504
505 /* Between 56 and 112 is our "goldilocks" zone where we are
506 * working out "just right". Just report that our current
507 * ITR is good for us.
508 */
509 if (packets <= 112)
510 goto clear_counts;
511
512 /* If packet count is 128 or greater we are likely looking
513 * at a slight overrun of the delay we want. Try halving
514 * our delay to see if that will cut the number of packets
515 * in half per interrupt.
516 */
517 itr /= 2;
518 itr &= IAVF_ITR_MASK;
519 if (itr < IAVF_ITR_ADAPTIVE_MIN_USECS)
520 itr = IAVF_ITR_ADAPTIVE_MIN_USECS;
521
522 goto clear_counts;
523 }
524
525 /* The paths below assume we are dealing with a bulk ITR since
526 * number of packets is greater than 256. We are just going to have
527 * to compute a value and try to bring the count under control,
528 * though for smaller packet sizes there isn't much we can do as
529 * NAPI polling will likely be kicking in sooner rather than later.
530 */
531 itr = IAVF_ITR_ADAPTIVE_BULK;
532
533adjust_by_size:
534 /* If packet counts are 256 or greater we can assume we have a gross
535 * overestimation of what the rate should be. Instead of trying to fine
536 * tune it just use the formula below to try and dial in an exact value
537 * give the current packet size of the frame.
538 */
539 avg_wire_size = bytes / packets;
540
541 /* The following is a crude approximation of:
542 * wmem_default / (size + overhead) = desired_pkts_per_int
543 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
544 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
545 *
546 * Assuming wmem_default is 212992 and overhead is 640 bytes per
547 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
548 * formula down to
549 *
550 * (170 * (size + 24)) / (size + 640) = ITR
551 *
552 * We first do some math on the packet size and then finally bitshift
553 * by 8 after rounding up. We also have to account for PCIe link speed
554 * difference as ITR scales based on this.
555 */
556 if (avg_wire_size <= 60) {
557 /* Start at 250k ints/sec */
558 avg_wire_size = 4096;
559 } else if (avg_wire_size <= 380) {
560 /* 250K ints/sec to 60K ints/sec */
561 avg_wire_size *= 40;
562 avg_wire_size += 1696;
563 } else if (avg_wire_size <= 1084) {
564 /* 60K ints/sec to 36K ints/sec */
565 avg_wire_size *= 15;
566 avg_wire_size += 11452;
567 } else if (avg_wire_size <= 1980) {
568 /* 36K ints/sec to 30K ints/sec */
569 avg_wire_size *= 5;
570 avg_wire_size += 22420;
571 } else {
572 /* plateau at a limit of 30K ints/sec */
573 avg_wire_size = 32256;
574 }
575
576 /* If we are in low latency mode halve our delay which doubles the
577 * rate to somewhere between 100K to 16K ints/sec
578 */
579 if (itr & IAVF_ITR_ADAPTIVE_LATENCY)
580 avg_wire_size /= 2;
581
582 /* Resultant value is 256 times larger than it needs to be. This
583 * gives us room to adjust the value as needed to either increase
584 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
585 *
586 * Use addition as we have already recorded the new latency flag
587 * for the ITR value.
588 */
589 itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
590 IAVF_ITR_ADAPTIVE_MIN_INC;
591
592 if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
593 itr &= IAVF_ITR_ADAPTIVE_LATENCY;
594 itr += IAVF_ITR_ADAPTIVE_MAX_USECS;
595 }
596
597clear_counts:
598 /* write back value */
599 rc->target_itr = itr;
600
601 /* next update should occur within next jiffy */
602 rc->next_update = next_update + 1;
603
604 rc->total_bytes = 0;
605 rc->total_packets = 0;
606}
607
608/**
609 * iavf_setup_tx_descriptors - Allocate the Tx descriptors
610 * @tx_ring: the tx ring to set up
611 *
612 * Return 0 on success, negative on error
613 **/
614int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
615{
616 struct device *dev = tx_ring->dev;
617 int bi_size;
618
619 if (!dev)
620 return -ENOMEM;
621
622 /* warn if we are about to overwrite the pointer */
623 WARN_ON(tx_ring->tx_bi);
624 bi_size = sizeof(struct iavf_tx_buffer) * tx_ring->count;
625 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
626 if (!tx_ring->tx_bi)
627 goto err;
628
629 /* round up to nearest 4K */
630 tx_ring->size = tx_ring->count * sizeof(struct iavf_tx_desc);
631 tx_ring->size = ALIGN(tx_ring->size, 4096);
632 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
633 &tx_ring->dma, GFP_KERNEL);
634 if (!tx_ring->desc) {
635 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
636 tx_ring->size);
637 goto err;
638 }
639
640 tx_ring->next_to_use = 0;
641 tx_ring->next_to_clean = 0;
642 tx_ring->tx_stats.prev_pkt_ctr = -1;
643 return 0;
644
645err:
646 kfree(tx_ring->tx_bi);
647 tx_ring->tx_bi = NULL;
648 return -ENOMEM;
649}
650
651/**
652 * iavf_clean_rx_ring - Free Rx buffers
653 * @rx_ring: ring to be cleaned
654 **/
655void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
656{
657 unsigned long bi_size;
658 u16 i;
659
660 /* ring already cleared, nothing to do */
661 if (!rx_ring->rx_bi)
662 return;
663
664 if (rx_ring->skb) {
665 dev_kfree_skb(rx_ring->skb);
666 rx_ring->skb = NULL;
667 }
668
669 /* Free all the Rx ring sk_buffs */
670 for (i = 0; i < rx_ring->count; i++) {
671 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
672
673 if (!rx_bi->page)
674 continue;
675
676 /* Invalidate cache lines that may have been written to by
677 * device so that we avoid corrupting memory.
678 */
679 dma_sync_single_range_for_cpu(rx_ring->dev,
680 rx_bi->dma,
681 rx_bi->page_offset,
682 rx_ring->rx_buf_len,
683 DMA_FROM_DEVICE);
684
685 /* free resources associated with mapping */
686 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
687 iavf_rx_pg_size(rx_ring),
688 DMA_FROM_DEVICE,
689 IAVF_RX_DMA_ATTR);
690
691 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
692
693 rx_bi->page = NULL;
694 rx_bi->page_offset = 0;
695 }
696
697 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
698 memset(rx_ring->rx_bi, 0, bi_size);
699
700 /* Zero out the descriptor ring */
701 memset(rx_ring->desc, 0, rx_ring->size);
702
703 rx_ring->next_to_alloc = 0;
704 rx_ring->next_to_clean = 0;
705 rx_ring->next_to_use = 0;
706}
707
708/**
709 * iavf_free_rx_resources - Free Rx resources
710 * @rx_ring: ring to clean the resources from
711 *
712 * Free all receive software resources
713 **/
714void iavf_free_rx_resources(struct iavf_ring *rx_ring)
715{
716 iavf_clean_rx_ring(rx_ring);
717 kfree(rx_ring->rx_bi);
718 rx_ring->rx_bi = NULL;
719
720 if (rx_ring->desc) {
721 dma_free_coherent(rx_ring->dev, rx_ring->size,
722 rx_ring->desc, rx_ring->dma);
723 rx_ring->desc = NULL;
724 }
725}
726
727/**
728 * iavf_setup_rx_descriptors - Allocate Rx descriptors
729 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
730 *
731 * Returns 0 on success, negative on failure
732 **/
733int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
734{
735 struct device *dev = rx_ring->dev;
736 int bi_size;
737
738 /* warn if we are about to overwrite the pointer */
739 WARN_ON(rx_ring->rx_bi);
740 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
741 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
742 if (!rx_ring->rx_bi)
743 goto err;
744
745 u64_stats_init(&rx_ring->syncp);
746
747 /* Round up to nearest 4K */
748 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
749 rx_ring->size = ALIGN(rx_ring->size, 4096);
750 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
751 &rx_ring->dma, GFP_KERNEL);
752
753 if (!rx_ring->desc) {
754 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
755 rx_ring->size);
756 goto err;
757 }
758
759 rx_ring->next_to_alloc = 0;
760 rx_ring->next_to_clean = 0;
761 rx_ring->next_to_use = 0;
762
763 return 0;
764err:
765 kfree(rx_ring->rx_bi);
766 rx_ring->rx_bi = NULL;
767 return -ENOMEM;
768}
769
770/**
771 * iavf_release_rx_desc - Store the new tail and head values
772 * @rx_ring: ring to bump
773 * @val: new head index
774 **/
775static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
776{
777 rx_ring->next_to_use = val;
778
779 /* update next to alloc since we have filled the ring */
780 rx_ring->next_to_alloc = val;
781
782 /* Force memory writes to complete before letting h/w
783 * know there are new descriptors to fetch. (Only
784 * applicable for weak-ordered memory model archs,
785 * such as IA-64).
786 */
787 wmb();
788 writel(val, rx_ring->tail);
789}
790
791/**
792 * iavf_rx_offset - Return expected offset into page to access data
793 * @rx_ring: Ring we are requesting offset of
794 *
795 * Returns the offset value for ring into the data buffer.
796 */
797static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
798{
799 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
800}
801
802/**
803 * iavf_alloc_mapped_page - recycle or make a new page
804 * @rx_ring: ring to use
805 * @bi: rx_buffer struct to modify
806 *
807 * Returns true if the page was successfully allocated or
808 * reused.
809 **/
810static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
811 struct iavf_rx_buffer *bi)
812{
813 struct page *page = bi->page;
814 dma_addr_t dma;
815
816 /* since we are recycling buffers we should seldom need to alloc */
817 if (likely(page)) {
818 rx_ring->rx_stats.page_reuse_count++;
819 return true;
820 }
821
822 /* alloc new page for storage */
823 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
824 if (unlikely(!page)) {
825 rx_ring->rx_stats.alloc_page_failed++;
826 return false;
827 }
828
829 /* map page for use */
830 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
831 iavf_rx_pg_size(rx_ring),
832 DMA_FROM_DEVICE,
833 IAVF_RX_DMA_ATTR);
834
835 /* if mapping failed free memory back to system since
836 * there isn't much point in holding memory we can't use
837 */
838 if (dma_mapping_error(rx_ring->dev, dma)) {
839 __free_pages(page, iavf_rx_pg_order(rx_ring));
840 rx_ring->rx_stats.alloc_page_failed++;
841 return false;
842 }
843
844 bi->dma = dma;
845 bi->page = page;
846 bi->page_offset = iavf_rx_offset(rx_ring);
847
848 /* initialize pagecnt_bias to 1 representing we fully own page */
849 bi->pagecnt_bias = 1;
850
851 return true;
852}
853
854/**
855 * iavf_receive_skb - Send a completed packet up the stack
856 * @rx_ring: rx ring in play
857 * @skb: packet to send up
858 * @vlan_tag: vlan tag for packet
859 **/
860static void iavf_receive_skb(struct iavf_ring *rx_ring,
861 struct sk_buff *skb, u16 vlan_tag)
862{
863 struct iavf_q_vector *q_vector = rx_ring->q_vector;
864
865 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
866 (vlan_tag & VLAN_VID_MASK))
867 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
868
869 napi_gro_receive(&q_vector->napi, skb);
870}
871
872/**
873 * iavf_alloc_rx_buffers - Replace used receive buffers
874 * @rx_ring: ring to place buffers on
875 * @cleaned_count: number of buffers to replace
876 *
877 * Returns false if all allocations were successful, true if any fail
878 **/
879bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
880{
881 u16 ntu = rx_ring->next_to_use;
882 union iavf_rx_desc *rx_desc;
883 struct iavf_rx_buffer *bi;
884
885 /* do nothing if no valid netdev defined */
886 if (!rx_ring->netdev || !cleaned_count)
887 return false;
888
889 rx_desc = IAVF_RX_DESC(rx_ring, ntu);
890 bi = &rx_ring->rx_bi[ntu];
891
892 do {
893 if (!iavf_alloc_mapped_page(rx_ring, bi))
894 goto no_buffers;
895
896 /* sync the buffer for use by the device */
897 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
898 bi->page_offset,
899 rx_ring->rx_buf_len,
900 DMA_FROM_DEVICE);
901
902 /* Refresh the desc even if buffer_addrs didn't change
903 * because each write-back erases this info.
904 */
905 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
906
907 rx_desc++;
908 bi++;
909 ntu++;
910 if (unlikely(ntu == rx_ring->count)) {
911 rx_desc = IAVF_RX_DESC(rx_ring, 0);
912 bi = rx_ring->rx_bi;
913 ntu = 0;
914 }
915
916 /* clear the status bits for the next_to_use descriptor */
917 rx_desc->wb.qword1.status_error_len = 0;
918
919 cleaned_count--;
920 } while (cleaned_count);
921
922 if (rx_ring->next_to_use != ntu)
923 iavf_release_rx_desc(rx_ring, ntu);
924
925 return false;
926
927no_buffers:
928 if (rx_ring->next_to_use != ntu)
929 iavf_release_rx_desc(rx_ring, ntu);
930
931 /* make sure to come back via polling to try again after
932 * allocation failure
933 */
934 return true;
935}
936
937/**
938 * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum
939 * @vsi: the VSI we care about
940 * @skb: skb currently being received and modified
941 * @rx_desc: the receive descriptor
942 **/
943static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
944 struct sk_buff *skb,
945 union iavf_rx_desc *rx_desc)
946{
947 struct iavf_rx_ptype_decoded decoded;
948 u32 rx_error, rx_status;
949 bool ipv4, ipv6;
950 u8 ptype;
951 u64 qword;
952
953 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
954 ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >> IAVF_RXD_QW1_PTYPE_SHIFT;
955 rx_error = (qword & IAVF_RXD_QW1_ERROR_MASK) >>
956 IAVF_RXD_QW1_ERROR_SHIFT;
957 rx_status = (qword & IAVF_RXD_QW1_STATUS_MASK) >>
958 IAVF_RXD_QW1_STATUS_SHIFT;
959 decoded = decode_rx_desc_ptype(ptype);
960
961 skb->ip_summed = CHECKSUM_NONE;
962
963 skb_checksum_none_assert(skb);
964
965 /* Rx csum enabled and ip headers found? */
966 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
967 return;
968
969 /* did the hardware decode the packet and checksum? */
970 if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
971 return;
972
973 /* both known and outer_ip must be set for the below code to work */
974 if (!(decoded.known && decoded.outer_ip))
975 return;
976
977 ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
978 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
979 ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
980 (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
981
982 if (ipv4 &&
983 (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
984 BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT))))
985 goto checksum_fail;
986
987 /* likely incorrect csum if alternate IP extension headers found */
988 if (ipv6 &&
989 rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT))
990 /* don't increment checksum err here, non-fatal err */
991 return;
992
993 /* there was some L4 error, count error and punt packet to the stack */
994 if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT))
995 goto checksum_fail;
996
997 /* handle packets that were not able to be checksummed due
998 * to arrival speed, in this case the stack can compute
999 * the csum.
1000 */
1001 if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
1002 return;
1003
1004 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1005 switch (decoded.inner_prot) {
1006 case IAVF_RX_PTYPE_INNER_PROT_TCP:
1007 case IAVF_RX_PTYPE_INNER_PROT_UDP:
1008 case IAVF_RX_PTYPE_INNER_PROT_SCTP:
1009 skb->ip_summed = CHECKSUM_UNNECESSARY;
1010 fallthrough;
1011 default:
1012 break;
1013 }
1014
1015 return;
1016
1017checksum_fail:
1018 vsi->back->hw_csum_rx_error++;
1019}
1020
1021/**
1022 * iavf_ptype_to_htype - get a hash type
1023 * @ptype: the ptype value from the descriptor
1024 *
1025 * Returns a hash type to be used by skb_set_hash
1026 **/
1027static inline int iavf_ptype_to_htype(u8 ptype)
1028{
1029 struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1030
1031 if (!decoded.known)
1032 return PKT_HASH_TYPE_NONE;
1033
1034 if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1035 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1036 return PKT_HASH_TYPE_L4;
1037 else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
1038 decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1039 return PKT_HASH_TYPE_L3;
1040 else
1041 return PKT_HASH_TYPE_L2;
1042}
1043
1044/**
1045 * iavf_rx_hash - set the hash value in the skb
1046 * @ring: descriptor ring
1047 * @rx_desc: specific descriptor
1048 * @skb: skb currently being received and modified
1049 * @rx_ptype: Rx packet type
1050 **/
1051static inline void iavf_rx_hash(struct iavf_ring *ring,
1052 union iavf_rx_desc *rx_desc,
1053 struct sk_buff *skb,
1054 u8 rx_ptype)
1055{
1056 u32 hash;
1057 const __le64 rss_mask =
1058 cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
1059 IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
1060
1061 if (ring->netdev->features & NETIF_F_RXHASH)
1062 return;
1063
1064 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1065 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1066 skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
1067 }
1068}
1069
1070/**
1071 * iavf_process_skb_fields - Populate skb header fields from Rx descriptor
1072 * @rx_ring: rx descriptor ring packet is being transacted on
1073 * @rx_desc: pointer to the EOP Rx descriptor
1074 * @skb: pointer to current skb being populated
1075 * @rx_ptype: the packet type decoded by hardware
1076 *
1077 * This function checks the ring, descriptor, and packet information in
1078 * order to populate the hash, checksum, VLAN, protocol, and
1079 * other fields within the skb.
1080 **/
1081static inline
1082void iavf_process_skb_fields(struct iavf_ring *rx_ring,
1083 union iavf_rx_desc *rx_desc, struct sk_buff *skb,
1084 u8 rx_ptype)
1085{
1086 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1087
1088 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc);
1089
1090 skb_record_rx_queue(skb, rx_ring->queue_index);
1091
1092 /* modifies the skb - consumes the enet header */
1093 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1094}
1095
1096/**
1097 * iavf_cleanup_headers - Correct empty headers
1098 * @rx_ring: rx descriptor ring packet is being transacted on
1099 * @skb: pointer to current skb being fixed
1100 *
1101 * Also address the case where we are pulling data in on pages only
1102 * and as such no data is present in the skb header.
1103 *
1104 * In addition if skb is not at least 60 bytes we need to pad it so that
1105 * it is large enough to qualify as a valid Ethernet frame.
1106 *
1107 * Returns true if an error was encountered and skb was freed.
1108 **/
1109static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
1110{
1111 /* if eth_skb_pad returns an error the skb was freed */
1112 if (eth_skb_pad(skb))
1113 return true;
1114
1115 return false;
1116}
1117
1118/**
1119 * iavf_reuse_rx_page - page flip buffer and store it back on the ring
1120 * @rx_ring: rx descriptor ring to store buffers on
1121 * @old_buff: donor buffer to have page reused
1122 *
1123 * Synchronizes page for reuse by the adapter
1124 **/
1125static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
1126 struct iavf_rx_buffer *old_buff)
1127{
1128 struct iavf_rx_buffer *new_buff;
1129 u16 nta = rx_ring->next_to_alloc;
1130
1131 new_buff = &rx_ring->rx_bi[nta];
1132
1133 /* update, and store next to alloc */
1134 nta++;
1135 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1136
1137 /* transfer page from old buffer to new buffer */
1138 new_buff->dma = old_buff->dma;
1139 new_buff->page = old_buff->page;
1140 new_buff->page_offset = old_buff->page_offset;
1141 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1142}
1143
1144/**
1145 * iavf_page_is_reusable - check if any reuse is possible
1146 * @page: page struct to check
1147 *
1148 * A page is not reusable if it was allocated under low memory
1149 * conditions, or it's not in the same NUMA node as this CPU.
1150 */
1151static inline bool iavf_page_is_reusable(struct page *page)
1152{
1153 return (page_to_nid(page) == numa_mem_id()) &&
1154 !page_is_pfmemalloc(page);
1155}
1156
1157/**
1158 * iavf_can_reuse_rx_page - Determine if this page can be reused by
1159 * the adapter for another receive
1160 *
1161 * @rx_buffer: buffer containing the page
1162 *
1163 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1164 * an unused region in the page.
1165 *
1166 * For small pages, @truesize will be a constant value, half the size
1167 * of the memory at page. We'll attempt to alternate between high and
1168 * low halves of the page, with one half ready for use by the hardware
1169 * and the other half being consumed by the stack. We use the page
1170 * ref count to determine whether the stack has finished consuming the
1171 * portion of this page that was passed up with a previous packet. If
1172 * the page ref count is >1, we'll assume the "other" half page is
1173 * still busy, and this page cannot be reused.
1174 *
1175 * For larger pages, @truesize will be the actual space used by the
1176 * received packet (adjusted upward to an even multiple of the cache
1177 * line size). This will advance through the page by the amount
1178 * actually consumed by the received packets while there is still
1179 * space for a buffer. Each region of larger pages will be used at
1180 * most once, after which the page will not be reused.
1181 *
1182 * In either case, if the page is reusable its refcount is increased.
1183 **/
1184static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
1185{
1186 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1187 struct page *page = rx_buffer->page;
1188
1189 /* Is any reuse possible? */
1190 if (unlikely(!iavf_page_is_reusable(page)))
1191 return false;
1192
1193#if (PAGE_SIZE < 8192)
1194 /* if we are only owner of page we can reuse it */
1195 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1196 return false;
1197#else
1198#define IAVF_LAST_OFFSET \
1199 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
1200 if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
1201 return false;
1202#endif
1203
1204 /* If we have drained the page fragment pool we need to update
1205 * the pagecnt_bias and page count so that we fully restock the
1206 * number of references the driver holds.
1207 */
1208 if (unlikely(!pagecnt_bias)) {
1209 page_ref_add(page, USHRT_MAX);
1210 rx_buffer->pagecnt_bias = USHRT_MAX;
1211 }
1212
1213 return true;
1214}
1215
1216/**
1217 * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
1218 * @rx_ring: rx descriptor ring to transact packets on
1219 * @rx_buffer: buffer containing page to add
1220 * @skb: sk_buff to place the data into
1221 * @size: packet length from rx_desc
1222 *
1223 * This function will add the data contained in rx_buffer->page to the skb.
1224 * It will just attach the page as a frag to the skb.
1225 *
1226 * The function will then update the page offset.
1227 **/
1228static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
1229 struct iavf_rx_buffer *rx_buffer,
1230 struct sk_buff *skb,
1231 unsigned int size)
1232{
1233#if (PAGE_SIZE < 8192)
1234 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1235#else
1236 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
1237#endif
1238
1239 if (!size)
1240 return;
1241
1242 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1243 rx_buffer->page_offset, size, truesize);
1244
1245 /* page is being used so we must update the page offset */
1246#if (PAGE_SIZE < 8192)
1247 rx_buffer->page_offset ^= truesize;
1248#else
1249 rx_buffer->page_offset += truesize;
1250#endif
1251}
1252
1253/**
1254 * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1255 * @rx_ring: rx descriptor ring to transact packets on
1256 * @size: size of buffer to add to skb
1257 *
1258 * This function will pull an Rx buffer from the ring and synchronize it
1259 * for use by the CPU.
1260 */
1261static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
1262 const unsigned int size)
1263{
1264 struct iavf_rx_buffer *rx_buffer;
1265
1266 if (!size)
1267 return NULL;
1268
1269 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1270 prefetchw(rx_buffer->page);
1271
1272 /* we are reusing so sync this buffer for CPU use */
1273 dma_sync_single_range_for_cpu(rx_ring->dev,
1274 rx_buffer->dma,
1275 rx_buffer->page_offset,
1276 size,
1277 DMA_FROM_DEVICE);
1278
1279 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1280 rx_buffer->pagecnt_bias--;
1281
1282 return rx_buffer;
1283}
1284
1285/**
1286 * iavf_construct_skb - Allocate skb and populate it
1287 * @rx_ring: rx descriptor ring to transact packets on
1288 * @rx_buffer: rx buffer to pull data from
1289 * @size: size of buffer to add to skb
1290 *
1291 * This function allocates an skb. It then populates it with the page
1292 * data from the current receive descriptor, taking care to set up the
1293 * skb correctly.
1294 */
1295static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
1296 struct iavf_rx_buffer *rx_buffer,
1297 unsigned int size)
1298{
1299 void *va;
1300#if (PAGE_SIZE < 8192)
1301 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1302#else
1303 unsigned int truesize = SKB_DATA_ALIGN(size);
1304#endif
1305 unsigned int headlen;
1306 struct sk_buff *skb;
1307
1308 if (!rx_buffer)
1309 return NULL;
1310 /* prefetch first cache line of first page */
1311 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1312 prefetch(va);
1313#if L1_CACHE_BYTES < 128
1314 prefetch(va + L1_CACHE_BYTES);
1315#endif
1316
1317 /* allocate a skb to store the frags */
1318 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1319 IAVF_RX_HDR_SIZE,
1320 GFP_ATOMIC | __GFP_NOWARN);
1321 if (unlikely(!skb))
1322 return NULL;
1323
1324 /* Determine available headroom for copy */
1325 headlen = size;
1326 if (headlen > IAVF_RX_HDR_SIZE)
1327 headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
1328
1329 /* align pull length to size of long to optimize memcpy performance */
1330 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1331
1332 /* update all of the pointers */
1333 size -= headlen;
1334 if (size) {
1335 skb_add_rx_frag(skb, 0, rx_buffer->page,
1336 rx_buffer->page_offset + headlen,
1337 size, truesize);
1338
1339 /* buffer is used by skb, update page_offset */
1340#if (PAGE_SIZE < 8192)
1341 rx_buffer->page_offset ^= truesize;
1342#else
1343 rx_buffer->page_offset += truesize;
1344#endif
1345 } else {
1346 /* buffer is unused, reset bias back to rx_buffer */
1347 rx_buffer->pagecnt_bias++;
1348 }
1349
1350 return skb;
1351}
1352
1353/**
1354 * iavf_build_skb - Build skb around an existing buffer
1355 * @rx_ring: Rx descriptor ring to transact packets on
1356 * @rx_buffer: Rx buffer to pull data from
1357 * @size: size of buffer to add to skb
1358 *
1359 * This function builds an skb around an existing Rx buffer, taking care
1360 * to set up the skb correctly and avoid any memcpy overhead.
1361 */
1362static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
1363 struct iavf_rx_buffer *rx_buffer,
1364 unsigned int size)
1365{
1366 void *va;
1367#if (PAGE_SIZE < 8192)
1368 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
1369#else
1370 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1371 SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
1372#endif
1373 struct sk_buff *skb;
1374
1375 if (!rx_buffer)
1376 return NULL;
1377 /* prefetch first cache line of first page */
1378 va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1379 prefetch(va);
1380#if L1_CACHE_BYTES < 128
1381 prefetch(va + L1_CACHE_BYTES);
1382#endif
1383 /* build an skb around the page buffer */
1384 skb = build_skb(va - IAVF_SKB_PAD, truesize);
1385 if (unlikely(!skb))
1386 return NULL;
1387
1388 /* update pointers within the skb to store the data */
1389 skb_reserve(skb, IAVF_SKB_PAD);
1390 __skb_put(skb, size);
1391
1392 /* buffer is used by skb, update page_offset */
1393#if (PAGE_SIZE < 8192)
1394 rx_buffer->page_offset ^= truesize;
1395#else
1396 rx_buffer->page_offset += truesize;
1397#endif
1398
1399 return skb;
1400}
1401
1402/**
1403 * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
1404 * @rx_ring: rx descriptor ring to transact packets on
1405 * @rx_buffer: rx buffer to pull data from
1406 *
1407 * This function will clean up the contents of the rx_buffer. It will
1408 * either recycle the buffer or unmap it and free the associated resources.
1409 */
1410static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
1411 struct iavf_rx_buffer *rx_buffer)
1412{
1413 if (!rx_buffer)
1414 return;
1415
1416 if (iavf_can_reuse_rx_page(rx_buffer)) {
1417 /* hand second half of page back to the ring */
1418 iavf_reuse_rx_page(rx_ring, rx_buffer);
1419 rx_ring->rx_stats.page_reuse_count++;
1420 } else {
1421 /* we are not reusing the buffer so unmap it */
1422 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1423 iavf_rx_pg_size(rx_ring),
1424 DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
1425 __page_frag_cache_drain(rx_buffer->page,
1426 rx_buffer->pagecnt_bias);
1427 }
1428
1429 /* clear contents of buffer_info */
1430 rx_buffer->page = NULL;
1431}
1432
1433/**
1434 * iavf_is_non_eop - process handling of non-EOP buffers
1435 * @rx_ring: Rx ring being processed
1436 * @rx_desc: Rx descriptor for current buffer
1437 * @skb: Current socket buffer containing buffer in progress
1438 *
1439 * This function updates next to clean. If the buffer is an EOP buffer
1440 * this function exits returning false, otherwise it will place the
1441 * sk_buff in the next buffer to be chained and return true indicating
1442 * that this is in fact a non-EOP buffer.
1443 **/
1444static bool iavf_is_non_eop(struct iavf_ring *rx_ring,
1445 union iavf_rx_desc *rx_desc,
1446 struct sk_buff *skb)
1447{
1448 u32 ntc = rx_ring->next_to_clean + 1;
1449
1450 /* fetch, update, and store next to clean */
1451 ntc = (ntc < rx_ring->count) ? ntc : 0;
1452 rx_ring->next_to_clean = ntc;
1453
1454 prefetch(IAVF_RX_DESC(rx_ring, ntc));
1455
1456 /* if we are the last buffer then there is nothing else to do */
1457#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT)
1458 if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF)))
1459 return false;
1460
1461 rx_ring->rx_stats.non_eop_descs++;
1462
1463 return true;
1464}
1465
1466/**
1467 * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1468 * @rx_ring: rx descriptor ring to transact packets on
1469 * @budget: Total limit on number of packets to process
1470 *
1471 * This function provides a "bounce buffer" approach to Rx interrupt
1472 * processing. The advantage to this is that on systems that have
1473 * expensive overhead for IOMMU access this provides a means of avoiding
1474 * it by maintaining the mapping of the page to the system.
1475 *
1476 * Returns amount of work completed
1477 **/
1478static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
1479{
1480 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1481 struct sk_buff *skb = rx_ring->skb;
1482 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring);
1483 bool failure = false;
1484
1485 while (likely(total_rx_packets < (unsigned int)budget)) {
1486 struct iavf_rx_buffer *rx_buffer;
1487 union iavf_rx_desc *rx_desc;
1488 unsigned int size;
1489 u16 vlan_tag;
1490 u8 rx_ptype;
1491 u64 qword;
1492
1493 /* return some buffers to hardware, one at a time is too slow */
1494 if (cleaned_count >= IAVF_RX_BUFFER_WRITE) {
1495 failure = failure ||
1496 iavf_alloc_rx_buffers(rx_ring, cleaned_count);
1497 cleaned_count = 0;
1498 }
1499
1500 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
1501
1502 /* status_error_len will always be zero for unused descriptors
1503 * because it's cleared in cleanup, and overlaps with hdr_addr
1504 * which is always zero because packet split isn't used, if the
1505 * hardware wrote DD then the length will be non-zero
1506 */
1507 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1508
1509 /* This memory barrier is needed to keep us from reading
1510 * any other fields out of the rx_desc until we have
1511 * verified the descriptor has been written back.
1512 */
1513 dma_rmb();
1514#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT)
1515 if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD))
1516 break;
1517
1518 size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>
1519 IAVF_RXD_QW1_LENGTH_PBUF_SHIFT;
1520
1521 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
1522 rx_buffer = iavf_get_rx_buffer(rx_ring, size);
1523
1524 /* retrieve a buffer from the ring */
1525 if (skb)
1526 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
1527 else if (ring_uses_build_skb(rx_ring))
1528 skb = iavf_build_skb(rx_ring, rx_buffer, size);
1529 else
1530 skb = iavf_construct_skb(rx_ring, rx_buffer, size);
1531
1532 /* exit if we failed to retrieve a buffer */
1533 if (!skb) {
1534 rx_ring->rx_stats.alloc_buff_failed++;
1535 if (rx_buffer)
1536 rx_buffer->pagecnt_bias++;
1537 break;
1538 }
1539
1540 iavf_put_rx_buffer(rx_ring, rx_buffer);
1541 cleaned_count++;
1542
1543 if (iavf_is_non_eop(rx_ring, rx_desc, skb))
1544 continue;
1545
1546 /* ERR_MASK will only have valid bits if EOP set, and
1547 * what we are doing here is actually checking
1548 * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1549 * the error field
1550 */
1551 if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) {
1552 dev_kfree_skb_any(skb);
1553 skb = NULL;
1554 continue;
1555 }
1556
1557 if (iavf_cleanup_headers(rx_ring, skb)) {
1558 skb = NULL;
1559 continue;
1560 }
1561
1562 /* probably a little skewed due to removing CRC */
1563 total_rx_bytes += skb->len;
1564
1565 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1566 rx_ptype = (qword & IAVF_RXD_QW1_PTYPE_MASK) >>
1567 IAVF_RXD_QW1_PTYPE_SHIFT;
1568
1569 /* populate checksum, VLAN, and protocol */
1570 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1571
1572
1573 vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1574 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1575
1576 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
1577 iavf_receive_skb(rx_ring, skb, vlan_tag);
1578 skb = NULL;
1579
1580 /* update budget accounting */
1581 total_rx_packets++;
1582 }
1583
1584 rx_ring->skb = skb;
1585
1586 u64_stats_update_begin(&rx_ring->syncp);
1587 rx_ring->stats.packets += total_rx_packets;
1588 rx_ring->stats.bytes += total_rx_bytes;
1589 u64_stats_update_end(&rx_ring->syncp);
1590 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1591 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1592
1593 /* guarantee a trip back through this routine if there was a failure */
1594 return failure ? budget : (int)total_rx_packets;
1595}
1596
1597static inline u32 iavf_buildreg_itr(const int type, u16 itr)
1598{
1599 u32 val;
1600
1601 /* We don't bother with setting the CLEARPBA bit as the data sheet
1602 * points out doing so is "meaningless since it was already
1603 * auto-cleared". The auto-clearing happens when the interrupt is
1604 * asserted.
1605 *
1606 * Hardware errata 28 for also indicates that writing to a
1607 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
1608 * an event in the PBA anyway so we need to rely on the automask
1609 * to hold pending events for us until the interrupt is re-enabled
1610 *
1611 * The itr value is reported in microseconds, and the register
1612 * value is recorded in 2 microsecond units. For this reason we
1613 * only need to shift by the interval shift - 1 instead of the
1614 * full value.
1615 */
1616 itr &= IAVF_ITR_MASK;
1617
1618 val = IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
1619 (type << IAVF_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1620 (itr << (IAVF_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
1621
1622 return val;
1623}
1624
1625/* a small macro to shorten up some long lines */
1626#define INTREG IAVF_VFINT_DYN_CTLN1
1627
1628/* The act of updating the ITR will cause it to immediately trigger. In order
1629 * to prevent this from throwing off adaptive update statistics we defer the
1630 * update so that it can only happen so often. So after either Tx or Rx are
1631 * updated we make the adaptive scheme wait until either the ITR completely
1632 * expires via the next_update expiration or we have been through at least
1633 * 3 interrupts.
1634 */
1635#define ITR_COUNTDOWN_START 3
1636
1637/**
1638 * iavf_update_enable_itr - Update itr and re-enable MSIX interrupt
1639 * @vsi: the VSI we care about
1640 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1641 *
1642 **/
1643static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
1644 struct iavf_q_vector *q_vector)
1645{
1646 struct iavf_hw *hw = &vsi->back->hw;
1647 u32 intval;
1648
1649 /* These will do nothing if dynamic updates are not enabled */
1650 iavf_update_itr(q_vector, &q_vector->tx);
1651 iavf_update_itr(q_vector, &q_vector->rx);
1652
1653 /* This block of logic allows us to get away with only updating
1654 * one ITR value with each interrupt. The idea is to perform a
1655 * pseudo-lazy update with the following criteria.
1656 *
1657 * 1. Rx is given higher priority than Tx if both are in same state
1658 * 2. If we must reduce an ITR that is given highest priority.
1659 * 3. We then give priority to increasing ITR based on amount.
1660 */
1661 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
1662 /* Rx ITR needs to be reduced, this is highest priority */
1663 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1664 q_vector->rx.target_itr);
1665 q_vector->rx.current_itr = q_vector->rx.target_itr;
1666 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1667 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
1668 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
1669 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
1670 /* Tx ITR needs to be reduced, this is second priority
1671 * Tx ITR needs to be increased more than Rx, fourth priority
1672 */
1673 intval = iavf_buildreg_itr(IAVF_TX_ITR,
1674 q_vector->tx.target_itr);
1675 q_vector->tx.current_itr = q_vector->tx.target_itr;
1676 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1677 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
1678 /* Rx ITR needs to be increased, third priority */
1679 intval = iavf_buildreg_itr(IAVF_RX_ITR,
1680 q_vector->rx.target_itr);
1681 q_vector->rx.current_itr = q_vector->rx.target_itr;
1682 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1683 } else {
1684 /* No ITR update, lowest priority */
1685 intval = iavf_buildreg_itr(IAVF_ITR_NONE, 0);
1686 if (q_vector->itr_countdown)
1687 q_vector->itr_countdown--;
1688 }
1689
1690 if (!test_bit(__IAVF_VSI_DOWN, vsi->state))
1691 wr32(hw, INTREG(q_vector->reg_idx), intval);
1692}
1693
1694/**
1695 * iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
1696 * @napi: napi struct with our devices info in it
1697 * @budget: amount of work driver is allowed to do this pass, in packets
1698 *
1699 * This function will clean all queues associated with a q_vector.
1700 *
1701 * Returns the amount of work done
1702 **/
1703int iavf_napi_poll(struct napi_struct *napi, int budget)
1704{
1705 struct iavf_q_vector *q_vector =
1706 container_of(napi, struct iavf_q_vector, napi);
1707 struct iavf_vsi *vsi = q_vector->vsi;
1708 struct iavf_ring *ring;
1709 bool clean_complete = true;
1710 bool arm_wb = false;
1711 int budget_per_ring;
1712 int work_done = 0;
1713
1714 if (test_bit(__IAVF_VSI_DOWN, vsi->state)) {
1715 napi_complete(napi);
1716 return 0;
1717 }
1718
1719 /* Since the actual Tx work is minimal, we can give the Tx a larger
1720 * budget and be more aggressive about cleaning up the Tx descriptors.
1721 */
1722 iavf_for_each_ring(ring, q_vector->tx) {
1723 if (!iavf_clean_tx_irq(vsi, ring, budget)) {
1724 clean_complete = false;
1725 continue;
1726 }
1727 arm_wb |= ring->arm_wb;
1728 ring->arm_wb = false;
1729 }
1730
1731 /* Handle case where we are called by netpoll with a budget of 0 */
1732 if (budget <= 0)
1733 goto tx_only;
1734
1735 /* We attempt to distribute budget to each Rx queue fairly, but don't
1736 * allow the budget to go below 1 because that would exit polling early.
1737 */
1738 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1739
1740 iavf_for_each_ring(ring, q_vector->rx) {
1741 int cleaned = iavf_clean_rx_irq(ring, budget_per_ring);
1742
1743 work_done += cleaned;
1744 /* if we clean as many as budgeted, we must not be done */
1745 if (cleaned >= budget_per_ring)
1746 clean_complete = false;
1747 }
1748
1749 /* If work not completed, return budget and polling will return */
1750 if (!clean_complete) {
1751 int cpu_id = smp_processor_id();
1752
1753 /* It is possible that the interrupt affinity has changed but,
1754 * if the cpu is pegged at 100%, polling will never exit while
1755 * traffic continues and the interrupt will be stuck on this
1756 * cpu. We check to make sure affinity is correct before we
1757 * continue to poll, otherwise we must stop polling so the
1758 * interrupt can move to the correct cpu.
1759 */
1760 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
1761 /* Tell napi that we are done polling */
1762 napi_complete_done(napi, work_done);
1763
1764 /* Force an interrupt */
1765 iavf_force_wb(vsi, q_vector);
1766
1767 /* Return budget-1 so that polling stops */
1768 return budget - 1;
1769 }
1770tx_only:
1771 if (arm_wb) {
1772 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
1773 iavf_enable_wb_on_itr(vsi, q_vector);
1774 }
1775 return budget;
1776 }
1777
1778 if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
1779 q_vector->arm_wb_state = false;
1780
1781 /* Exit the polling mode, but don't re-enable interrupts if stack might
1782 * poll us due to busy-polling
1783 */
1784 if (likely(napi_complete_done(napi, work_done)))
1785 iavf_update_enable_itr(vsi, q_vector);
1786
1787 return min(work_done, budget - 1);
1788}
1789
1790/**
1791 * iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1792 * @skb: send buffer
1793 * @tx_ring: ring to send buffer on
1794 * @flags: the tx flags to be set
1795 *
1796 * Checks the skb and set up correspondingly several generic transmit flags
1797 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1798 *
1799 * Returns error code indicate the frame should be dropped upon error and the
1800 * otherwise returns 0 to indicate the flags has been set properly.
1801 **/
1802static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
1803 struct iavf_ring *tx_ring,
1804 u32 *flags)
1805{
1806 __be16 protocol = skb->protocol;
1807 u32 tx_flags = 0;
1808
1809 if (protocol == htons(ETH_P_8021Q) &&
1810 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1811 /* When HW VLAN acceleration is turned off by the user the
1812 * stack sets the protocol to 8021q so that the driver
1813 * can take any steps required to support the SW only
1814 * VLAN handling. In our case the driver doesn't need
1815 * to take any further steps so just set the protocol
1816 * to the encapsulated ethertype.
1817 */
1818 skb->protocol = vlan_get_protocol(skb);
1819 goto out;
1820 }
1821
1822 /* if we have a HW VLAN tag being added, default to the HW one */
1823 if (skb_vlan_tag_present(skb)) {
1824 tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
1825 tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
1826 /* else if it is a SW VLAN, check the next protocol and store the tag */
1827 } else if (protocol == htons(ETH_P_8021Q)) {
1828 struct vlan_hdr *vhdr, _vhdr;
1829
1830 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1831 if (!vhdr)
1832 return -EINVAL;
1833
1834 protocol = vhdr->h_vlan_encapsulated_proto;
1835 tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
1836 tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
1837 }
1838
1839out:
1840 *flags = tx_flags;
1841 return 0;
1842}
1843
1844/**
1845 * iavf_tso - set up the tso context descriptor
1846 * @first: pointer to first Tx buffer for xmit
1847 * @hdr_len: ptr to the size of the packet header
1848 * @cd_type_cmd_tso_mss: Quad Word 1
1849 *
1850 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1851 **/
1852static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
1853 u64 *cd_type_cmd_tso_mss)
1854{
1855 struct sk_buff *skb = first->skb;
1856 u64 cd_cmd, cd_tso_len, cd_mss;
1857 union {
1858 struct iphdr *v4;
1859 struct ipv6hdr *v6;
1860 unsigned char *hdr;
1861 } ip;
1862 union {
1863 struct tcphdr *tcp;
1864 struct udphdr *udp;
1865 unsigned char *hdr;
1866 } l4;
1867 u32 paylen, l4_offset;
1868 u16 gso_segs, gso_size;
1869 int err;
1870
1871 if (skb->ip_summed != CHECKSUM_PARTIAL)
1872 return 0;
1873
1874 if (!skb_is_gso(skb))
1875 return 0;
1876
1877 err = skb_cow_head(skb, 0);
1878 if (err < 0)
1879 return err;
1880
1881 ip.hdr = skb_network_header(skb);
1882 l4.hdr = skb_transport_header(skb);
1883
1884 /* initialize outer IP header fields */
1885 if (ip.v4->version == 4) {
1886 ip.v4->tot_len = 0;
1887 ip.v4->check = 0;
1888 } else {
1889 ip.v6->payload_len = 0;
1890 }
1891
1892 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1893 SKB_GSO_GRE_CSUM |
1894 SKB_GSO_IPXIP4 |
1895 SKB_GSO_IPXIP6 |
1896 SKB_GSO_UDP_TUNNEL |
1897 SKB_GSO_UDP_TUNNEL_CSUM)) {
1898 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1899 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1900 l4.udp->len = 0;
1901
1902 /* determine offset of outer transport header */
1903 l4_offset = l4.hdr - skb->data;
1904
1905 /* remove payload length from outer checksum */
1906 paylen = skb->len - l4_offset;
1907 csum_replace_by_diff(&l4.udp->check,
1908 (__force __wsum)htonl(paylen));
1909 }
1910
1911 /* reset pointers to inner headers */
1912 ip.hdr = skb_inner_network_header(skb);
1913 l4.hdr = skb_inner_transport_header(skb);
1914
1915 /* initialize inner IP header fields */
1916 if (ip.v4->version == 4) {
1917 ip.v4->tot_len = 0;
1918 ip.v4->check = 0;
1919 } else {
1920 ip.v6->payload_len = 0;
1921 }
1922 }
1923
1924 /* determine offset of inner transport header */
1925 l4_offset = l4.hdr - skb->data;
1926
1927 /* remove payload length from inner checksum */
1928 paylen = skb->len - l4_offset;
1929 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1930
1931 /* compute length of segmentation header */
1932 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1933
1934 /* pull values out of skb_shinfo */
1935 gso_size = skb_shinfo(skb)->gso_size;
1936 gso_segs = skb_shinfo(skb)->gso_segs;
1937
1938 /* update GSO size and bytecount with header size */
1939 first->gso_segs = gso_segs;
1940 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1941
1942 /* find the field values */
1943 cd_cmd = IAVF_TX_CTX_DESC_TSO;
1944 cd_tso_len = skb->len - *hdr_len;
1945 cd_mss = gso_size;
1946 *cd_type_cmd_tso_mss |= (cd_cmd << IAVF_TXD_CTX_QW1_CMD_SHIFT) |
1947 (cd_tso_len << IAVF_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1948 (cd_mss << IAVF_TXD_CTX_QW1_MSS_SHIFT);
1949 return 1;
1950}
1951
1952/**
1953 * iavf_tx_enable_csum - Enable Tx checksum offloads
1954 * @skb: send buffer
1955 * @tx_flags: pointer to Tx flags currently set
1956 * @td_cmd: Tx descriptor command bits to set
1957 * @td_offset: Tx descriptor header offsets to set
1958 * @tx_ring: Tx descriptor ring
1959 * @cd_tunneling: ptr to context desc bits
1960 **/
1961static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
1962 u32 *td_cmd, u32 *td_offset,
1963 struct iavf_ring *tx_ring,
1964 u32 *cd_tunneling)
1965{
1966 union {
1967 struct iphdr *v4;
1968 struct ipv6hdr *v6;
1969 unsigned char *hdr;
1970 } ip;
1971 union {
1972 struct tcphdr *tcp;
1973 struct udphdr *udp;
1974 unsigned char *hdr;
1975 } l4;
1976 unsigned char *exthdr;
1977 u32 offset, cmd = 0;
1978 __be16 frag_off;
1979 u8 l4_proto = 0;
1980
1981 if (skb->ip_summed != CHECKSUM_PARTIAL)
1982 return 0;
1983
1984 ip.hdr = skb_network_header(skb);
1985 l4.hdr = skb_transport_header(skb);
1986
1987 /* compute outer L2 header size */
1988 offset = ((ip.hdr - skb->data) / 2) << IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
1989
1990 if (skb->encapsulation) {
1991 u32 tunnel = 0;
1992 /* define outer network header type */
1993 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
1994 tunnel |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
1995 IAVF_TX_CTX_EXT_IP_IPV4 :
1996 IAVF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1997
1998 l4_proto = ip.v4->protocol;
1999 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2000 tunnel |= IAVF_TX_CTX_EXT_IP_IPV6;
2001
2002 exthdr = ip.hdr + sizeof(*ip.v6);
2003 l4_proto = ip.v6->nexthdr;
2004 if (l4.hdr != exthdr)
2005 ipv6_skip_exthdr(skb, exthdr - skb->data,
2006 &l4_proto, &frag_off);
2007 }
2008
2009 /* define outer transport */
2010 switch (l4_proto) {
2011 case IPPROTO_UDP:
2012 tunnel |= IAVF_TXD_CTX_UDP_TUNNELING;
2013 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2014 break;
2015 case IPPROTO_GRE:
2016 tunnel |= IAVF_TXD_CTX_GRE_TUNNELING;
2017 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2018 break;
2019 case IPPROTO_IPIP:
2020 case IPPROTO_IPV6:
2021 *tx_flags |= IAVF_TX_FLAGS_VXLAN_TUNNEL;
2022 l4.hdr = skb_inner_network_header(skb);
2023 break;
2024 default:
2025 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2026 return -1;
2027
2028 skb_checksum_help(skb);
2029 return 0;
2030 }
2031
2032 /* compute outer L3 header size */
2033 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2034 IAVF_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2035
2036 /* switch IP header pointer from outer to inner header */
2037 ip.hdr = skb_inner_network_header(skb);
2038
2039 /* compute tunnel header size */
2040 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2041 IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
2042
2043 /* indicate if we need to offload outer UDP header */
2044 if ((*tx_flags & IAVF_TX_FLAGS_TSO) &&
2045 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2046 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2047 tunnel |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
2048
2049 /* record tunnel offload values */
2050 *cd_tunneling |= tunnel;
2051
2052 /* switch L4 header pointer from outer to inner */
2053 l4.hdr = skb_inner_transport_header(skb);
2054 l4_proto = 0;
2055
2056 /* reset type as we transition from outer to inner headers */
2057 *tx_flags &= ~(IAVF_TX_FLAGS_IPV4 | IAVF_TX_FLAGS_IPV6);
2058 if (ip.v4->version == 4)
2059 *tx_flags |= IAVF_TX_FLAGS_IPV4;
2060 if (ip.v6->version == 6)
2061 *tx_flags |= IAVF_TX_FLAGS_IPV6;
2062 }
2063
2064 /* Enable IP checksum offloads */
2065 if (*tx_flags & IAVF_TX_FLAGS_IPV4) {
2066 l4_proto = ip.v4->protocol;
2067 /* the stack computes the IP header already, the only time we
2068 * need the hardware to recompute it is in the case of TSO.
2069 */
2070 cmd |= (*tx_flags & IAVF_TX_FLAGS_TSO) ?
2071 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
2072 IAVF_TX_DESC_CMD_IIPT_IPV4;
2073 } else if (*tx_flags & IAVF_TX_FLAGS_IPV6) {
2074 cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6;
2075
2076 exthdr = ip.hdr + sizeof(*ip.v6);
2077 l4_proto = ip.v6->nexthdr;
2078 if (l4.hdr != exthdr)
2079 ipv6_skip_exthdr(skb, exthdr - skb->data,
2080 &l4_proto, &frag_off);
2081 }
2082
2083 /* compute inner L3 header size */
2084 offset |= ((l4.hdr - ip.hdr) / 4) << IAVF_TX_DESC_LENGTH_IPLEN_SHIFT;
2085
2086 /* Enable L4 checksum offloads */
2087 switch (l4_proto) {
2088 case IPPROTO_TCP:
2089 /* enable checksum offloads */
2090 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
2091 offset |= l4.tcp->doff << IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2092 break;
2093 case IPPROTO_SCTP:
2094 /* enable SCTP checksum offload */
2095 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP;
2096 offset |= (sizeof(struct sctphdr) >> 2) <<
2097 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2098 break;
2099 case IPPROTO_UDP:
2100 /* enable UDP checksum offload */
2101 cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
2102 offset |= (sizeof(struct udphdr) >> 2) <<
2103 IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2104 break;
2105 default:
2106 if (*tx_flags & IAVF_TX_FLAGS_TSO)
2107 return -1;
2108 skb_checksum_help(skb);
2109 return 0;
2110 }
2111
2112 *td_cmd |= cmd;
2113 *td_offset |= offset;
2114
2115 return 1;
2116}
2117
2118/**
2119 * iavf_create_tx_ctx Build the Tx context descriptor
2120 * @tx_ring: ring to create the descriptor on
2121 * @cd_type_cmd_tso_mss: Quad Word 1
2122 * @cd_tunneling: Quad Word 0 - bits 0-31
2123 * @cd_l2tag2: Quad Word 0 - bits 32-63
2124 **/
2125static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
2126 const u64 cd_type_cmd_tso_mss,
2127 const u32 cd_tunneling, const u32 cd_l2tag2)
2128{
2129 struct iavf_tx_context_desc *context_desc;
2130 int i = tx_ring->next_to_use;
2131
2132 if ((cd_type_cmd_tso_mss == IAVF_TX_DESC_DTYPE_CONTEXT) &&
2133 !cd_tunneling && !cd_l2tag2)
2134 return;
2135
2136 /* grab the next descriptor */
2137 context_desc = IAVF_TX_CTXTDESC(tx_ring, i);
2138
2139 i++;
2140 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2141
2142 /* cpu_to_le32 and assign to struct fields */
2143 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2144 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2145 context_desc->rsvd = cpu_to_le16(0);
2146 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2147}
2148
2149/**
2150 * __iavf_chk_linearize - Check if there are more than 8 buffers per packet
2151 * @skb: send buffer
2152 *
2153 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2154 * and so we need to figure out the cases where we need to linearize the skb.
2155 *
2156 * For TSO we need to count the TSO header and segment payload separately.
2157 * As such we need to check cases where we have 7 fragments or more as we
2158 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2159 * the segment payload in the first descriptor, and another 7 for the
2160 * fragments.
2161 **/
2162bool __iavf_chk_linearize(struct sk_buff *skb)
2163{
2164 const skb_frag_t *frag, *stale;
2165 int nr_frags, sum;
2166
2167 /* no need to check if number of frags is less than 7 */
2168 nr_frags = skb_shinfo(skb)->nr_frags;
2169 if (nr_frags < (IAVF_MAX_BUFFER_TXD - 1))
2170 return false;
2171
2172 /* We need to walk through the list and validate that each group
2173 * of 6 fragments totals at least gso_size.
2174 */
2175 nr_frags -= IAVF_MAX_BUFFER_TXD - 2;
2176 frag = &skb_shinfo(skb)->frags[0];
2177
2178 /* Initialize size to the negative value of gso_size minus 1. We
2179 * use this as the worst case scenerio in which the frag ahead
2180 * of us only provides one byte which is why we are limited to 6
2181 * descriptors for a single transmit as the header and previous
2182 * fragment are already consuming 2 descriptors.
2183 */
2184 sum = 1 - skb_shinfo(skb)->gso_size;
2185
2186 /* Add size of frags 0 through 4 to create our initial sum */
2187 sum += skb_frag_size(frag++);
2188 sum += skb_frag_size(frag++);
2189 sum += skb_frag_size(frag++);
2190 sum += skb_frag_size(frag++);
2191 sum += skb_frag_size(frag++);
2192
2193 /* Walk through fragments adding latest fragment, testing it, and
2194 * then removing stale fragments from the sum.
2195 */
2196 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2197 int stale_size = skb_frag_size(stale);
2198
2199 sum += skb_frag_size(frag++);
2200
2201 /* The stale fragment may present us with a smaller
2202 * descriptor than the actual fragment size. To account
2203 * for that we need to remove all the data on the front and
2204 * figure out what the remainder would be in the last
2205 * descriptor associated with the fragment.
2206 */
2207 if (stale_size > IAVF_MAX_DATA_PER_TXD) {
2208 int align_pad = -(skb_frag_off(stale)) &
2209 (IAVF_MAX_READ_REQ_SIZE - 1);
2210
2211 sum -= align_pad;
2212 stale_size -= align_pad;
2213
2214 do {
2215 sum -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2216 stale_size -= IAVF_MAX_DATA_PER_TXD_ALIGNED;
2217 } while (stale_size > IAVF_MAX_DATA_PER_TXD);
2218 }
2219
2220 /* if sum is negative we failed to make sufficient progress */
2221 if (sum < 0)
2222 return true;
2223
2224 if (!nr_frags--)
2225 break;
2226
2227 sum -= stale_size;
2228 }
2229
2230 return false;
2231}
2232
2233/**
2234 * __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
2235 * @tx_ring: the ring to be checked
2236 * @size: the size buffer we want to assure is available
2237 *
2238 * Returns -EBUSY if a stop is needed, else 0
2239 **/
2240int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
2241{
2242 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2243 /* Memory barrier before checking head and tail */
2244 smp_mb();
2245
2246 /* Check again in a case another CPU has just made room available. */
2247 if (likely(IAVF_DESC_UNUSED(tx_ring) < size))
2248 return -EBUSY;
2249
2250 /* A reprieve! - use start_queue because it doesn't call schedule */
2251 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2252 ++tx_ring->tx_stats.restart_queue;
2253 return 0;
2254}
2255
2256/**
2257 * iavf_tx_map - Build the Tx descriptor
2258 * @tx_ring: ring to send buffer on
2259 * @skb: send buffer
2260 * @first: first buffer info buffer to use
2261 * @tx_flags: collected send information
2262 * @hdr_len: size of the packet header
2263 * @td_cmd: the command field in the descriptor
2264 * @td_offset: offset for checksum or crc
2265 **/
2266static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
2267 struct iavf_tx_buffer *first, u32 tx_flags,
2268 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2269{
2270 unsigned int data_len = skb->data_len;
2271 unsigned int size = skb_headlen(skb);
2272 skb_frag_t *frag;
2273 struct iavf_tx_buffer *tx_bi;
2274 struct iavf_tx_desc *tx_desc;
2275 u16 i = tx_ring->next_to_use;
2276 u32 td_tag = 0;
2277 dma_addr_t dma;
2278
2279 if (tx_flags & IAVF_TX_FLAGS_HW_VLAN) {
2280 td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1;
2281 td_tag = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
2282 IAVF_TX_FLAGS_VLAN_SHIFT;
2283 }
2284
2285 first->tx_flags = tx_flags;
2286
2287 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2288
2289 tx_desc = IAVF_TX_DESC(tx_ring, i);
2290 tx_bi = first;
2291
2292 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2293 unsigned int max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2294
2295 if (dma_mapping_error(tx_ring->dev, dma))
2296 goto dma_error;
2297
2298 /* record length, and DMA address */
2299 dma_unmap_len_set(tx_bi, len, size);
2300 dma_unmap_addr_set(tx_bi, dma, dma);
2301
2302 /* align size to end of page */
2303 max_data += -dma & (IAVF_MAX_READ_REQ_SIZE - 1);
2304 tx_desc->buffer_addr = cpu_to_le64(dma);
2305
2306 while (unlikely(size > IAVF_MAX_DATA_PER_TXD)) {
2307 tx_desc->cmd_type_offset_bsz =
2308 build_ctob(td_cmd, td_offset,
2309 max_data, td_tag);
2310
2311 tx_desc++;
2312 i++;
2313
2314 if (i == tx_ring->count) {
2315 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2316 i = 0;
2317 }
2318
2319 dma += max_data;
2320 size -= max_data;
2321
2322 max_data = IAVF_MAX_DATA_PER_TXD_ALIGNED;
2323 tx_desc->buffer_addr = cpu_to_le64(dma);
2324 }
2325
2326 if (likely(!data_len))
2327 break;
2328
2329 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2330 size, td_tag);
2331
2332 tx_desc++;
2333 i++;
2334
2335 if (i == tx_ring->count) {
2336 tx_desc = IAVF_TX_DESC(tx_ring, 0);
2337 i = 0;
2338 }
2339
2340 size = skb_frag_size(frag);
2341 data_len -= size;
2342
2343 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2344 DMA_TO_DEVICE);
2345
2346 tx_bi = &tx_ring->tx_bi[i];
2347 }
2348
2349 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2350
2351 i++;
2352 if (i == tx_ring->count)
2353 i = 0;
2354
2355 tx_ring->next_to_use = i;
2356
2357 iavf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2358
2359 /* write last descriptor with RS and EOP bits */
2360 td_cmd |= IAVF_TXD_CMD;
2361 tx_desc->cmd_type_offset_bsz =
2362 build_ctob(td_cmd, td_offset, size, td_tag);
2363
2364 skb_tx_timestamp(skb);
2365
2366 /* Force memory writes to complete before letting h/w know there
2367 * are new descriptors to fetch.
2368 *
2369 * We also use this memory barrier to make certain all of the
2370 * status bits have been updated before next_to_watch is written.
2371 */
2372 wmb();
2373
2374 /* set next_to_watch value indicating a packet is present */
2375 first->next_to_watch = tx_desc;
2376
2377 /* notify HW of packet */
2378 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
2379 writel(i, tx_ring->tail);
2380 }
2381
2382 return;
2383
2384dma_error:
2385 dev_info(tx_ring->dev, "TX DMA map failed\n");
2386
2387 /* clear dma mappings for failed tx_bi map */
2388 for (;;) {
2389 tx_bi = &tx_ring->tx_bi[i];
2390 iavf_unmap_and_free_tx_resource(tx_ring, tx_bi);
2391 if (tx_bi == first)
2392 break;
2393 if (i == 0)
2394 i = tx_ring->count;
2395 i--;
2396 }
2397
2398 tx_ring->next_to_use = i;
2399}
2400
2401/**
2402 * iavf_xmit_frame_ring - Sends buffer on Tx ring
2403 * @skb: send buffer
2404 * @tx_ring: ring to send buffer on
2405 *
2406 * Returns NETDEV_TX_OK if sent, else an error code
2407 **/
2408static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
2409 struct iavf_ring *tx_ring)
2410{
2411 u64 cd_type_cmd_tso_mss = IAVF_TX_DESC_DTYPE_CONTEXT;
2412 u32 cd_tunneling = 0, cd_l2tag2 = 0;
2413 struct iavf_tx_buffer *first;
2414 u32 td_offset = 0;
2415 u32 tx_flags = 0;
2416 __be16 protocol;
2417 u32 td_cmd = 0;
2418 u8 hdr_len = 0;
2419 int tso, count;
2420
2421 /* prefetch the data, we'll need it later */
2422 prefetch(skb->data);
2423
2424 iavf_trace(xmit_frame_ring, skb, tx_ring);
2425
2426 count = iavf_xmit_descriptor_count(skb);
2427 if (iavf_chk_linearize(skb, count)) {
2428 if (__skb_linearize(skb)) {
2429 dev_kfree_skb_any(skb);
2430 return NETDEV_TX_OK;
2431 }
2432 count = iavf_txd_use_count(skb->len);
2433 tx_ring->tx_stats.tx_linearize++;
2434 }
2435
2436 /* need: 1 descriptor per page * PAGE_SIZE/IAVF_MAX_DATA_PER_TXD,
2437 * + 1 desc for skb_head_len/IAVF_MAX_DATA_PER_TXD,
2438 * + 4 desc gap to avoid the cache line where head is,
2439 * + 1 desc for context descriptor,
2440 * otherwise try next time
2441 */
2442 if (iavf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2443 tx_ring->tx_stats.tx_busy++;
2444 return NETDEV_TX_BUSY;
2445 }
2446
2447 /* record the location of the first descriptor for this packet */
2448 first = &tx_ring->tx_bi[tx_ring->next_to_use];
2449 first->skb = skb;
2450 first->bytecount = skb->len;
2451 first->gso_segs = 1;
2452
2453 /* prepare the xmit flags */
2454 if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2455 goto out_drop;
2456
2457 /* obtain protocol of skb */
2458 protocol = vlan_get_protocol(skb);
2459
2460 /* setup IPv4/IPv6 offloads */
2461 if (protocol == htons(ETH_P_IP))
2462 tx_flags |= IAVF_TX_FLAGS_IPV4;
2463 else if (protocol == htons(ETH_P_IPV6))
2464 tx_flags |= IAVF_TX_FLAGS_IPV6;
2465
2466 tso = iavf_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
2467
2468 if (tso < 0)
2469 goto out_drop;
2470 else if (tso)
2471 tx_flags |= IAVF_TX_FLAGS_TSO;
2472
2473 /* Always offload the checksum, since it's in the data descriptor */
2474 tso = iavf_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2475 tx_ring, &cd_tunneling);
2476 if (tso < 0)
2477 goto out_drop;
2478
2479 /* always enable CRC insertion offload */
2480 td_cmd |= IAVF_TX_DESC_CMD_ICRC;
2481
2482 iavf_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2483 cd_tunneling, cd_l2tag2);
2484
2485 iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2486 td_cmd, td_offset);
2487
2488 return NETDEV_TX_OK;
2489
2490out_drop:
2491 iavf_trace(xmit_frame_ring_drop, first->skb, tx_ring);
2492 dev_kfree_skb_any(first->skb);
2493 first->skb = NULL;
2494 return NETDEV_TX_OK;
2495}
2496
2497/**
2498 * iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2499 * @skb: send buffer
2500 * @netdev: network interface device structure
2501 *
2502 * Returns NETDEV_TX_OK if sent, else an error code
2503 **/
2504netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2505{
2506 struct iavf_adapter *adapter = netdev_priv(netdev);
2507 struct iavf_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
2508
2509 /* hardware can't handle really short frames, hardware padding works
2510 * beyond this point
2511 */
2512 if (unlikely(skb->len < IAVF_MIN_TX_LEN)) {
2513 if (skb_pad(skb, IAVF_MIN_TX_LEN - skb->len))
2514 return NETDEV_TX_OK;
2515 skb->len = IAVF_MIN_TX_LEN;
2516 skb_set_tail_pointer(skb, IAVF_MIN_TX_LEN);
2517 }
2518
2519 return iavf_xmit_frame_ring(skb, tx_ring);
2520}