Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include <linux/prefetch.h>
5#include <linux/bpf_trace.h>
6#include <net/mpls.h>
7#include <net/xdp.h>
8#include "i40e.h"
9#include "i40e_trace.h"
10#include "i40e_prototype.h"
11#include "i40e_txrx_common.h"
12#include "i40e_xsk.h"
13
14#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
15/**
16 * i40e_fdir - Generate a Flow Director descriptor based on fdata
17 * @tx_ring: Tx ring to send buffer on
18 * @fdata: Flow director filter data
19 * @add: Indicate if we are adding a rule or deleting one
20 *
21 **/
22static void i40e_fdir(struct i40e_ring *tx_ring,
23 struct i40e_fdir_filter *fdata, bool add)
24{
25 struct i40e_filter_program_desc *fdir_desc;
26 struct i40e_pf *pf = tx_ring->vsi->back;
27 u32 flex_ptype, dtype_cmd;
28 u16 i;
29
30 /* grab the next descriptor */
31 i = tx_ring->next_to_use;
32 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
33
34 i++;
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
36
37 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
38 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
39
40 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
41 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
42
43 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
44 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
45
46 /* Use LAN VSI Id if not programmed by user */
47 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
48 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
49 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
50
51 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
52
53 dtype_cmd |= add ?
54 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
55 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
56 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
58
59 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
60 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
61
62 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
63 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
64
65 if (fdata->cnt_index) {
66 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
67 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
68 ((u32)fdata->cnt_index <<
69 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
70 }
71
72 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
73 fdir_desc->rsvd = cpu_to_le32(0);
74 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
75 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
76}
77
78#define I40E_FD_CLEAN_DELAY 10
79/**
80 * i40e_program_fdir_filter - Program a Flow Director filter
81 * @fdir_data: Packet data that will be filter parameters
82 * @raw_packet: the pre-allocated packet buffer for FDir
83 * @pf: The PF pointer
84 * @add: True for add/update, False for remove
85 **/
86static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
87 u8 *raw_packet, struct i40e_pf *pf,
88 bool add)
89{
90 struct i40e_tx_buffer *tx_buf, *first;
91 struct i40e_tx_desc *tx_desc;
92 struct i40e_ring *tx_ring;
93 struct i40e_vsi *vsi;
94 struct device *dev;
95 dma_addr_t dma;
96 u32 td_cmd = 0;
97 u16 i;
98
99 /* find existing FDIR VSI */
100 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
101 if (!vsi)
102 return -ENOENT;
103
104 tx_ring = vsi->tx_rings[0];
105 dev = tx_ring->dev;
106
107 /* we need two descriptors to add/del a filter and we can wait */
108 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
109 if (!i)
110 return -EAGAIN;
111 msleep_interruptible(1);
112 }
113
114 dma = dma_map_single(dev, raw_packet,
115 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
116 if (dma_mapping_error(dev, dma))
117 goto dma_fail;
118
119 /* grab the next descriptor */
120 i = tx_ring->next_to_use;
121 first = &tx_ring->tx_bi[i];
122 i40e_fdir(tx_ring, fdir_data, add);
123
124 /* Now program a dummy descriptor */
125 i = tx_ring->next_to_use;
126 tx_desc = I40E_TX_DESC(tx_ring, i);
127 tx_buf = &tx_ring->tx_bi[i];
128
129 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
130
131 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
132
133 /* record length, and DMA address */
134 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
135 dma_unmap_addr_set(tx_buf, dma, dma);
136
137 tx_desc->buffer_addr = cpu_to_le64(dma);
138 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
139
140 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
141 tx_buf->raw_buf = (void *)raw_packet;
142
143 tx_desc->cmd_type_offset_bsz =
144 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
145
146 /* Force memory writes to complete before letting h/w
147 * know there are new descriptors to fetch.
148 */
149 wmb();
150
151 /* Mark the data descriptor to be watched */
152 first->next_to_watch = tx_desc;
153
154 writel(tx_ring->next_to_use, tx_ring->tail);
155 return 0;
156
157dma_fail:
158 return -1;
159}
160
161/**
162 * i40e_create_dummy_packet - Constructs dummy packet for HW
163 * @dummy_packet: preallocated space for dummy packet
164 * @ipv4: is layer 3 packet of version 4 or 6
165 * @l4proto: next level protocol used in data portion of l3
166 * @data: filter data
167 *
168 * Returns address of layer 4 protocol dummy packet.
169 **/
170static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
171 struct i40e_fdir_filter *data)
172{
173 bool is_vlan = !!data->vlan_tag;
174 struct vlan_hdr vlan;
175 struct ipv6hdr ipv6;
176 struct ethhdr eth;
177 struct iphdr ip;
178 u8 *tmp;
179
180 if (ipv4) {
181 eth.h_proto = cpu_to_be16(ETH_P_IP);
182 ip.protocol = l4proto;
183 ip.version = 0x4;
184 ip.ihl = 0x5;
185
186 ip.daddr = data->dst_ip;
187 ip.saddr = data->src_ip;
188 } else {
189 eth.h_proto = cpu_to_be16(ETH_P_IPV6);
190 ipv6.nexthdr = l4proto;
191 ipv6.version = 0x6;
192
193 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
194 sizeof(__be32) * 4);
195 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
196 sizeof(__be32) * 4);
197 }
198
199 if (is_vlan) {
200 vlan.h_vlan_TCI = data->vlan_tag;
201 vlan.h_vlan_encapsulated_proto = eth.h_proto;
202 eth.h_proto = data->vlan_etype;
203 }
204
205 tmp = dummy_packet;
206 memcpy(tmp, ð, sizeof(eth));
207 tmp += sizeof(eth);
208
209 if (is_vlan) {
210 memcpy(tmp, &vlan, sizeof(vlan));
211 tmp += sizeof(vlan);
212 }
213
214 if (ipv4) {
215 memcpy(tmp, &ip, sizeof(ip));
216 tmp += sizeof(ip);
217 } else {
218 memcpy(tmp, &ipv6, sizeof(ipv6));
219 tmp += sizeof(ipv6);
220 }
221
222 return tmp;
223}
224
225/**
226 * i40e_create_dummy_udp_packet - helper function to create UDP packet
227 * @raw_packet: preallocated space for dummy packet
228 * @ipv4: is layer 3 packet of version 4 or 6
229 * @l4proto: next level protocol used in data portion of l3
230 * @data: filter data
231 *
232 * Helper function to populate udp fields.
233 **/
234static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
235 struct i40e_fdir_filter *data)
236{
237 struct udphdr *udp;
238 u8 *tmp;
239
240 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
241 udp = (struct udphdr *)(tmp);
242 udp->dest = data->dst_port;
243 udp->source = data->src_port;
244}
245
246/**
247 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
248 * @raw_packet: preallocated space for dummy packet
249 * @ipv4: is layer 3 packet of version 4 or 6
250 * @l4proto: next level protocol used in data portion of l3
251 * @data: filter data
252 *
253 * Helper function to populate tcp fields.
254 **/
255static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
256 struct i40e_fdir_filter *data)
257{
258 struct tcphdr *tcp;
259 u8 *tmp;
260 /* Dummy tcp packet */
261 static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
262 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
263
264 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
265
266 tcp = (struct tcphdr *)tmp;
267 memcpy(tcp, tcp_packet, sizeof(tcp_packet));
268 tcp->dest = data->dst_port;
269 tcp->source = data->src_port;
270}
271
272/**
273 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
274 * @raw_packet: preallocated space for dummy packet
275 * @ipv4: is layer 3 packet of version 4 or 6
276 * @l4proto: next level protocol used in data portion of l3
277 * @data: filter data
278 *
279 * Helper function to populate sctp fields.
280 **/
281static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
282 u8 l4proto,
283 struct i40e_fdir_filter *data)
284{
285 struct sctphdr *sctp;
286 u8 *tmp;
287
288 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
289
290 sctp = (struct sctphdr *)tmp;
291 sctp->dest = data->dst_port;
292 sctp->source = data->src_port;
293}
294
295/**
296 * i40e_prepare_fdir_filter - Prepare and program fdir filter
297 * @pf: physical function to attach filter to
298 * @fd_data: filter data
299 * @add: add or delete filter
300 * @packet_addr: address of dummy packet, used in filtering
301 * @payload_offset: offset from dummy packet address to user defined data
302 * @pctype: Packet type for which filter is used
303 *
304 * Helper function to offset data of dummy packet, program it and
305 * handle errors.
306 **/
307static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
308 struct i40e_fdir_filter *fd_data,
309 bool add, char *packet_addr,
310 int payload_offset, u8 pctype)
311{
312 int ret;
313
314 if (fd_data->flex_filter) {
315 u8 *payload;
316 __be16 pattern = fd_data->flex_word;
317 u16 off = fd_data->flex_offset;
318
319 payload = packet_addr + payload_offset;
320
321 /* If user provided vlan, offset payload by vlan header length */
322 if (!!fd_data->vlan_tag)
323 payload += VLAN_HLEN;
324
325 *((__force __be16 *)(payload + off)) = pattern;
326 }
327
328 fd_data->pctype = pctype;
329 ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
330 if (ret) {
331 dev_info(&pf->pdev->dev,
332 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
333 fd_data->pctype, fd_data->fd_id, ret);
334 /* Free the packet buffer since it wasn't added to the ring */
335 return -EOPNOTSUPP;
336 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
337 if (add)
338 dev_info(&pf->pdev->dev,
339 "Filter OK for PCTYPE %d loc = %d\n",
340 fd_data->pctype, fd_data->fd_id);
341 else
342 dev_info(&pf->pdev->dev,
343 "Filter deleted for PCTYPE %d loc = %d\n",
344 fd_data->pctype, fd_data->fd_id);
345 }
346
347 return ret;
348}
349
350/**
351 * i40e_change_filter_num - Prepare and program fdir filter
352 * @ipv4: is layer 3 packet of version 4 or 6
353 * @add: add or delete filter
354 * @ipv4_filter_num: field to update
355 * @ipv6_filter_num: field to update
356 *
357 * Update filter number field for pf.
358 **/
359static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
360 u16 *ipv6_filter_num)
361{
362 if (add) {
363 if (ipv4)
364 (*ipv4_filter_num)++;
365 else
366 (*ipv6_filter_num)++;
367 } else {
368 if (ipv4)
369 (*ipv4_filter_num)--;
370 else
371 (*ipv6_filter_num)--;
372 }
373}
374
375#define I40E_UDPIP_DUMMY_PACKET_LEN 42
376#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
377/**
378 * i40e_add_del_fdir_udp - Add/Remove UDP filters
379 * @vsi: pointer to the targeted VSI
380 * @fd_data: the flow director data required for the FDir descriptor
381 * @add: true adds a filter, false removes it
382 * @ipv4: true is v4, false is v6
383 *
384 * Returns 0 if the filters were successfully added or removed
385 **/
386static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
387 struct i40e_fdir_filter *fd_data,
388 bool add,
389 bool ipv4)
390{
391 struct i40e_pf *pf = vsi->back;
392 u8 *raw_packet;
393 int ret;
394
395 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
396 if (!raw_packet)
397 return -ENOMEM;
398
399 i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
400
401 if (ipv4)
402 ret = i40e_prepare_fdir_filter
403 (pf, fd_data, add, raw_packet,
404 I40E_UDPIP_DUMMY_PACKET_LEN,
405 I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
406 else
407 ret = i40e_prepare_fdir_filter
408 (pf, fd_data, add, raw_packet,
409 I40E_UDPIP6_DUMMY_PACKET_LEN,
410 I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
411
412 if (ret) {
413 kfree(raw_packet);
414 return ret;
415 }
416
417 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
418 &pf->fd_udp6_filter_cnt);
419
420 return 0;
421}
422
423#define I40E_TCPIP_DUMMY_PACKET_LEN 54
424#define I40E_TCPIP6_DUMMY_PACKET_LEN 74
425/**
426 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
427 * @vsi: pointer to the targeted VSI
428 * @fd_data: the flow director data required for the FDir descriptor
429 * @add: true adds a filter, false removes it
430 * @ipv4: true is v4, false is v6
431 *
432 * Returns 0 if the filters were successfully added or removed
433 **/
434static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
435 struct i40e_fdir_filter *fd_data,
436 bool add,
437 bool ipv4)
438{
439 struct i40e_pf *pf = vsi->back;
440 u8 *raw_packet;
441 int ret;
442
443 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
444 if (!raw_packet)
445 return -ENOMEM;
446
447 i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
448 if (ipv4)
449 ret = i40e_prepare_fdir_filter
450 (pf, fd_data, add, raw_packet,
451 I40E_TCPIP_DUMMY_PACKET_LEN,
452 I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
453 else
454 ret = i40e_prepare_fdir_filter
455 (pf, fd_data, add, raw_packet,
456 I40E_TCPIP6_DUMMY_PACKET_LEN,
457 I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
458
459 if (ret) {
460 kfree(raw_packet);
461 return ret;
462 }
463
464 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
465 &pf->fd_tcp6_filter_cnt);
466
467 if (add) {
468 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
469 I40E_DEBUG_FD & pf->hw.debug_mask)
470 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
471 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
472 }
473 return 0;
474}
475
476#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
477#define I40E_SCTPIP6_DUMMY_PACKET_LEN 66
478/**
479 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
480 * a specific flow spec
481 * @vsi: pointer to the targeted VSI
482 * @fd_data: the flow director data required for the FDir descriptor
483 * @add: true adds a filter, false removes it
484 * @ipv4: true is v4, false is v6
485 *
486 * Returns 0 if the filters were successfully added or removed
487 **/
488static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
489 struct i40e_fdir_filter *fd_data,
490 bool add,
491 bool ipv4)
492{
493 struct i40e_pf *pf = vsi->back;
494 u8 *raw_packet;
495 int ret;
496
497 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
498 if (!raw_packet)
499 return -ENOMEM;
500
501 i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
502
503 if (ipv4)
504 ret = i40e_prepare_fdir_filter
505 (pf, fd_data, add, raw_packet,
506 I40E_SCTPIP_DUMMY_PACKET_LEN,
507 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
508 else
509 ret = i40e_prepare_fdir_filter
510 (pf, fd_data, add, raw_packet,
511 I40E_SCTPIP6_DUMMY_PACKET_LEN,
512 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
513
514 if (ret) {
515 kfree(raw_packet);
516 return ret;
517 }
518
519 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
520 &pf->fd_sctp6_filter_cnt);
521
522 return 0;
523}
524
525#define I40E_IP_DUMMY_PACKET_LEN 34
526#define I40E_IP6_DUMMY_PACKET_LEN 54
527/**
528 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
529 * a specific flow spec
530 * @vsi: pointer to the targeted VSI
531 * @fd_data: the flow director data required for the FDir descriptor
532 * @add: true adds a filter, false removes it
533 * @ipv4: true is v4, false is v6
534 *
535 * Returns 0 if the filters were successfully added or removed
536 **/
537static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
538 struct i40e_fdir_filter *fd_data,
539 bool add,
540 bool ipv4)
541{
542 struct i40e_pf *pf = vsi->back;
543 int payload_offset;
544 u8 *raw_packet;
545 int iter_start;
546 int iter_end;
547 int ret;
548 int i;
549
550 if (ipv4) {
551 iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
552 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
553 } else {
554 iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
555 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
556 }
557
558 for (i = iter_start; i <= iter_end; i++) {
559 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
560 if (!raw_packet)
561 return -ENOMEM;
562
563 /* IPv6 no header option differs from IPv4 */
564 (void)i40e_create_dummy_packet
565 (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
566 fd_data);
567
568 payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
569 I40E_IP6_DUMMY_PACKET_LEN;
570 ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
571 payload_offset, i);
572 if (ret)
573 goto err;
574 }
575
576 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
577 &pf->fd_ip6_filter_cnt);
578
579 return 0;
580err:
581 kfree(raw_packet);
582 return ret;
583}
584
585/**
586 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
587 * @vsi: pointer to the targeted VSI
588 * @input: filter to add or delete
589 * @add: true adds a filter, false removes it
590 *
591 **/
592int i40e_add_del_fdir(struct i40e_vsi *vsi,
593 struct i40e_fdir_filter *input, bool add)
594{
595 enum ip_ver { ipv6 = 0, ipv4 = 1 };
596 struct i40e_pf *pf = vsi->back;
597 int ret;
598
599 switch (input->flow_type & ~FLOW_EXT) {
600 case TCP_V4_FLOW:
601 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
602 break;
603 case UDP_V4_FLOW:
604 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
605 break;
606 case SCTP_V4_FLOW:
607 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
608 break;
609 case TCP_V6_FLOW:
610 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
611 break;
612 case UDP_V6_FLOW:
613 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
614 break;
615 case SCTP_V6_FLOW:
616 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
617 break;
618 case IP_USER_FLOW:
619 switch (input->ipl4_proto) {
620 case IPPROTO_TCP:
621 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
622 break;
623 case IPPROTO_UDP:
624 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
625 break;
626 case IPPROTO_SCTP:
627 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
628 break;
629 case IPPROTO_IP:
630 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
631 break;
632 default:
633 /* We cannot support masking based on protocol */
634 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
635 input->ipl4_proto);
636 return -EINVAL;
637 }
638 break;
639 case IPV6_USER_FLOW:
640 switch (input->ipl4_proto) {
641 case IPPROTO_TCP:
642 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
643 break;
644 case IPPROTO_UDP:
645 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
646 break;
647 case IPPROTO_SCTP:
648 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
649 break;
650 case IPPROTO_IP:
651 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
652 break;
653 default:
654 /* We cannot support masking based on protocol */
655 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
656 input->ipl4_proto);
657 return -EINVAL;
658 }
659 break;
660 default:
661 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
662 input->flow_type);
663 return -EINVAL;
664 }
665
666 /* The buffer allocated here will be normally be freed by
667 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
668 * completion. In the event of an error adding the buffer to the FDIR
669 * ring, it will immediately be freed. It may also be freed by
670 * i40e_clean_tx_ring() when closing the VSI.
671 */
672 return ret;
673}
674
675/**
676 * i40e_fd_handle_status - check the Programming Status for FD
677 * @rx_ring: the Rx ring for this descriptor
678 * @qword0_raw: qword0
679 * @qword1: qword1 after le_to_cpu
680 * @prog_id: the id originally used for programming
681 *
682 * This is used to verify if the FD programming or invalidation
683 * requested by SW to the HW is successful or not and take actions accordingly.
684 **/
685static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
686 u64 qword1, u8 prog_id)
687{
688 struct i40e_pf *pf = rx_ring->vsi->back;
689 struct pci_dev *pdev = pf->pdev;
690 struct i40e_16b_rx_wb_qw0 *qw0;
691 u32 fcnt_prog, fcnt_avail;
692 u32 error;
693
694 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
695 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
696 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
697
698 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
699 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
700 if (qw0->hi_dword.fd_id != 0 ||
701 (I40E_DEBUG_FD & pf->hw.debug_mask))
702 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
703 pf->fd_inv);
704
705 /* Check if the programming error is for ATR.
706 * If so, auto disable ATR and set a state for
707 * flush in progress. Next time we come here if flush is in
708 * progress do nothing, once flush is complete the state will
709 * be cleared.
710 */
711 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
712 return;
713
714 pf->fd_add_err++;
715 /* store the current atr filter count */
716 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
717
718 if (qw0->hi_dword.fd_id == 0 &&
719 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
720 /* These set_bit() calls aren't atomic with the
721 * test_bit() here, but worse case we potentially
722 * disable ATR and queue a flush right after SB
723 * support is re-enabled. That shouldn't cause an
724 * issue in practice
725 */
726 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
727 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
728 }
729
730 /* filter programming failed most likely due to table full */
731 fcnt_prog = i40e_get_global_fd_count(pf);
732 fcnt_avail = pf->fdir_pf_filter_count;
733 /* If ATR is running fcnt_prog can quickly change,
734 * if we are very close to full, it makes sense to disable
735 * FD ATR/SB and then re-enable it when there is room.
736 */
737 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
738 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
739 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
740 pf->state))
741 if (I40E_DEBUG_FD & pf->hw.debug_mask)
742 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
743 }
744 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
745 if (I40E_DEBUG_FD & pf->hw.debug_mask)
746 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
747 qw0->hi_dword.fd_id);
748 }
749}
750
751/**
752 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
753 * @ring: the ring that owns the buffer
754 * @tx_buffer: the buffer to free
755 **/
756static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
757 struct i40e_tx_buffer *tx_buffer)
758{
759 if (tx_buffer->skb) {
760 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
761 kfree(tx_buffer->raw_buf);
762 else if (ring_is_xdp(ring))
763 xdp_return_frame(tx_buffer->xdpf);
764 else
765 dev_kfree_skb_any(tx_buffer->skb);
766 if (dma_unmap_len(tx_buffer, len))
767 dma_unmap_single(ring->dev,
768 dma_unmap_addr(tx_buffer, dma),
769 dma_unmap_len(tx_buffer, len),
770 DMA_TO_DEVICE);
771 } else if (dma_unmap_len(tx_buffer, len)) {
772 dma_unmap_page(ring->dev,
773 dma_unmap_addr(tx_buffer, dma),
774 dma_unmap_len(tx_buffer, len),
775 DMA_TO_DEVICE);
776 }
777
778 tx_buffer->next_to_watch = NULL;
779 tx_buffer->skb = NULL;
780 dma_unmap_len_set(tx_buffer, len, 0);
781 /* tx_buffer must be completely set up in the transmit path */
782}
783
784/**
785 * i40e_clean_tx_ring - Free any empty Tx buffers
786 * @tx_ring: ring to be cleaned
787 **/
788void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
789{
790 unsigned long bi_size;
791 u16 i;
792
793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
794 i40e_xsk_clean_tx_ring(tx_ring);
795 } else {
796 /* ring already cleared, nothing to do */
797 if (!tx_ring->tx_bi)
798 return;
799
800 /* Free all the Tx ring sk_buffs */
801 for (i = 0; i < tx_ring->count; i++)
802 i40e_unmap_and_free_tx_resource(tx_ring,
803 &tx_ring->tx_bi[i]);
804 }
805
806 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
807 memset(tx_ring->tx_bi, 0, bi_size);
808
809 /* Zero out the descriptor ring */
810 memset(tx_ring->desc, 0, tx_ring->size);
811
812 tx_ring->next_to_use = 0;
813 tx_ring->next_to_clean = 0;
814
815 if (!tx_ring->netdev)
816 return;
817
818 /* cleanup Tx queue statistics */
819 netdev_tx_reset_queue(txring_txq(tx_ring));
820}
821
822/**
823 * i40e_free_tx_resources - Free Tx resources per queue
824 * @tx_ring: Tx descriptor ring for a specific queue
825 *
826 * Free all transmit software resources
827 **/
828void i40e_free_tx_resources(struct i40e_ring *tx_ring)
829{
830 i40e_clean_tx_ring(tx_ring);
831 kfree(tx_ring->tx_bi);
832 tx_ring->tx_bi = NULL;
833
834 if (tx_ring->desc) {
835 dma_free_coherent(tx_ring->dev, tx_ring->size,
836 tx_ring->desc, tx_ring->dma);
837 tx_ring->desc = NULL;
838 }
839}
840
841/**
842 * i40e_get_tx_pending - how many tx descriptors not processed
843 * @ring: the ring of descriptors
844 * @in_sw: use SW variables
845 *
846 * Since there is no access to the ring head register
847 * in XL710, we need to use our local copies
848 **/
849u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
850{
851 u32 head, tail;
852
853 if (!in_sw) {
854 head = i40e_get_head(ring);
855 tail = readl(ring->tail);
856 } else {
857 head = ring->next_to_clean;
858 tail = ring->next_to_use;
859 }
860
861 if (head != tail)
862 return (head < tail) ?
863 tail - head : (tail + ring->count - head);
864
865 return 0;
866}
867
868/**
869 * i40e_detect_recover_hung - Function to detect and recover hung_queues
870 * @vsi: pointer to vsi struct with tx queues
871 *
872 * VSI has netdev and netdev has TX queues. This function is to check each of
873 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
874 **/
875void i40e_detect_recover_hung(struct i40e_vsi *vsi)
876{
877 struct i40e_ring *tx_ring = NULL;
878 struct net_device *netdev;
879 unsigned int i;
880 int packets;
881
882 if (!vsi)
883 return;
884
885 if (test_bit(__I40E_VSI_DOWN, vsi->state))
886 return;
887
888 netdev = vsi->netdev;
889 if (!netdev)
890 return;
891
892 if (!netif_carrier_ok(netdev))
893 return;
894
895 for (i = 0; i < vsi->num_queue_pairs; i++) {
896 tx_ring = vsi->tx_rings[i];
897 if (tx_ring && tx_ring->desc) {
898 /* If packet counter has not changed the queue is
899 * likely stalled, so force an interrupt for this
900 * queue.
901 *
902 * prev_pkt_ctr would be negative if there was no
903 * pending work.
904 */
905 packets = tx_ring->stats.packets & INT_MAX;
906 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
907 i40e_force_wb(vsi, tx_ring->q_vector);
908 continue;
909 }
910
911 /* Memory barrier between read of packet count and call
912 * to i40e_get_tx_pending()
913 */
914 smp_rmb();
915 tx_ring->tx_stats.prev_pkt_ctr =
916 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
917 }
918 }
919}
920
921/**
922 * i40e_clean_tx_irq - Reclaim resources after transmit completes
923 * @vsi: the VSI we care about
924 * @tx_ring: Tx ring to clean
925 * @napi_budget: Used to determine if we are in netpoll
926 * @tx_cleaned: Out parameter set to the number of TXes cleaned
927 *
928 * Returns true if there's any budget left (e.g. the clean is finished)
929 **/
930static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
931 struct i40e_ring *tx_ring, int napi_budget,
932 unsigned int *tx_cleaned)
933{
934 int i = tx_ring->next_to_clean;
935 struct i40e_tx_buffer *tx_buf;
936 struct i40e_tx_desc *tx_head;
937 struct i40e_tx_desc *tx_desc;
938 unsigned int total_bytes = 0, total_packets = 0;
939 unsigned int budget = vsi->work_limit;
940
941 tx_buf = &tx_ring->tx_bi[i];
942 tx_desc = I40E_TX_DESC(tx_ring, i);
943 i -= tx_ring->count;
944
945 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
946
947 do {
948 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
949
950 /* if next_to_watch is not set then there is no work pending */
951 if (!eop_desc)
952 break;
953
954 /* prevent any other reads prior to eop_desc */
955 smp_rmb();
956
957 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
958 /* we have caught up to head, no work left to do */
959 if (tx_head == tx_desc)
960 break;
961
962 /* clear next_to_watch to prevent false hangs */
963 tx_buf->next_to_watch = NULL;
964
965 /* update the statistics for this packet */
966 total_bytes += tx_buf->bytecount;
967 total_packets += tx_buf->gso_segs;
968
969 /* free the skb/XDP data */
970 if (ring_is_xdp(tx_ring))
971 xdp_return_frame(tx_buf->xdpf);
972 else
973 napi_consume_skb(tx_buf->skb, napi_budget);
974
975 /* unmap skb header data */
976 dma_unmap_single(tx_ring->dev,
977 dma_unmap_addr(tx_buf, dma),
978 dma_unmap_len(tx_buf, len),
979 DMA_TO_DEVICE);
980
981 /* clear tx_buffer data */
982 tx_buf->skb = NULL;
983 dma_unmap_len_set(tx_buf, len, 0);
984
985 /* unmap remaining buffers */
986 while (tx_desc != eop_desc) {
987 i40e_trace(clean_tx_irq_unmap,
988 tx_ring, tx_desc, tx_buf);
989
990 tx_buf++;
991 tx_desc++;
992 i++;
993 if (unlikely(!i)) {
994 i -= tx_ring->count;
995 tx_buf = tx_ring->tx_bi;
996 tx_desc = I40E_TX_DESC(tx_ring, 0);
997 }
998
999 /* unmap any remaining paged data */
1000 if (dma_unmap_len(tx_buf, len)) {
1001 dma_unmap_page(tx_ring->dev,
1002 dma_unmap_addr(tx_buf, dma),
1003 dma_unmap_len(tx_buf, len),
1004 DMA_TO_DEVICE);
1005 dma_unmap_len_set(tx_buf, len, 0);
1006 }
1007 }
1008
1009 /* move us one more past the eop_desc for start of next pkt */
1010 tx_buf++;
1011 tx_desc++;
1012 i++;
1013 if (unlikely(!i)) {
1014 i -= tx_ring->count;
1015 tx_buf = tx_ring->tx_bi;
1016 tx_desc = I40E_TX_DESC(tx_ring, 0);
1017 }
1018
1019 prefetch(tx_desc);
1020
1021 /* update budget accounting */
1022 budget--;
1023 } while (likely(budget));
1024
1025 i += tx_ring->count;
1026 tx_ring->next_to_clean = i;
1027 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
1028 i40e_arm_wb(tx_ring, vsi, budget);
1029
1030 if (ring_is_xdp(tx_ring))
1031 return !!budget;
1032
1033 /* notify netdev of completed buffers */
1034 netdev_tx_completed_queue(txring_txq(tx_ring),
1035 total_packets, total_bytes);
1036
1037#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
1038 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1039 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
1040 /* Make sure that anybody stopping the queue after this
1041 * sees the new next_to_clean.
1042 */
1043 smp_mb();
1044 if (__netif_subqueue_stopped(tx_ring->netdev,
1045 tx_ring->queue_index) &&
1046 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
1047 netif_wake_subqueue(tx_ring->netdev,
1048 tx_ring->queue_index);
1049 ++tx_ring->tx_stats.restart_queue;
1050 }
1051 }
1052
1053 *tx_cleaned = total_packets;
1054 return !!budget;
1055}
1056
1057/**
1058 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1059 * @vsi: the VSI we care about
1060 * @q_vector: the vector on which to enable writeback
1061 *
1062 **/
1063static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
1064 struct i40e_q_vector *q_vector)
1065{
1066 u16 flags = q_vector->tx.ring[0].flags;
1067 u32 val;
1068
1069 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
1070 return;
1071
1072 if (q_vector->arm_wb_state)
1073 return;
1074
1075 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1076 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
1077 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
1078
1079 wr32(&vsi->back->hw,
1080 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
1081 val);
1082 } else {
1083 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
1084 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
1085
1086 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1087 }
1088 q_vector->arm_wb_state = true;
1089}
1090
1091/**
1092 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1093 * @vsi: the VSI we care about
1094 * @q_vector: the vector on which to force writeback
1095 *
1096 **/
1097void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
1098{
1099 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1100 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1101 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
1102 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1103 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
1104 /* allow 00 to be written to the index */
1105
1106 wr32(&vsi->back->hw,
1107 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
1108 } else {
1109 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1110 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1111 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1112 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1113 /* allow 00 to be written to the index */
1114
1115 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1116 }
1117}
1118
1119static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1120 struct i40e_ring_container *rc)
1121{
1122 return &q_vector->rx == rc;
1123}
1124
1125static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1126{
1127 unsigned int divisor;
1128
1129 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1130 case I40E_LINK_SPEED_40GB:
1131 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1132 break;
1133 case I40E_LINK_SPEED_25GB:
1134 case I40E_LINK_SPEED_20GB:
1135 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1136 break;
1137 default:
1138 case I40E_LINK_SPEED_10GB:
1139 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1140 break;
1141 case I40E_LINK_SPEED_1GB:
1142 case I40E_LINK_SPEED_100MB:
1143 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1144 break;
1145 }
1146
1147 return divisor;
1148}
1149
1150/**
1151 * i40e_update_itr - update the dynamic ITR value based on statistics
1152 * @q_vector: structure containing interrupt and ring information
1153 * @rc: structure containing ring performance data
1154 *
1155 * Stores a new ITR value based on packets and byte
1156 * counts during the last interrupt. The advantage of per interrupt
1157 * computation is faster updates and more accurate ITR for the current
1158 * traffic pattern. Constants in this function were computed
1159 * based on theoretical maximum wire speed and thresholds were set based
1160 * on testing data as well as attempting to minimize response time
1161 * while increasing bulk throughput.
1162 **/
1163static void i40e_update_itr(struct i40e_q_vector *q_vector,
1164 struct i40e_ring_container *rc)
1165{
1166 unsigned int avg_wire_size, packets, bytes, itr;
1167 unsigned long next_update = jiffies;
1168
1169 /* If we don't have any rings just leave ourselves set for maximum
1170 * possible latency so we take ourselves out of the equation.
1171 */
1172 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1173 return;
1174
1175 /* For Rx we want to push the delay up and default to low latency.
1176 * for Tx we want to pull the delay down and default to high latency.
1177 */
1178 itr = i40e_container_is_rx(q_vector, rc) ?
1179 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1180 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1181
1182 /* If we didn't update within up to 1 - 2 jiffies we can assume
1183 * that either packets are coming in so slow there hasn't been
1184 * any work, or that there is so much work that NAPI is dealing
1185 * with interrupt moderation and we don't need to do anything.
1186 */
1187 if (time_after(next_update, rc->next_update))
1188 goto clear_counts;
1189
1190 /* If itr_countdown is set it means we programmed an ITR within
1191 * the last 4 interrupt cycles. This has a side effect of us
1192 * potentially firing an early interrupt. In order to work around
1193 * this we need to throw out any data received for a few
1194 * interrupts following the update.
1195 */
1196 if (q_vector->itr_countdown) {
1197 itr = rc->target_itr;
1198 goto clear_counts;
1199 }
1200
1201 packets = rc->total_packets;
1202 bytes = rc->total_bytes;
1203
1204 if (i40e_container_is_rx(q_vector, rc)) {
1205 /* If Rx there are 1 to 4 packets and bytes are less than
1206 * 9000 assume insufficient data to use bulk rate limiting
1207 * approach unless Tx is already in bulk rate limiting. We
1208 * are likely latency driven.
1209 */
1210 if (packets && packets < 4 && bytes < 9000 &&
1211 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1212 itr = I40E_ITR_ADAPTIVE_LATENCY;
1213 goto adjust_by_size;
1214 }
1215 } else if (packets < 4) {
1216 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1217 * bulk mode and we are receiving 4 or fewer packets just
1218 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1219 * that the Rx can relax.
1220 */
1221 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1222 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1223 I40E_ITR_ADAPTIVE_MAX_USECS)
1224 goto clear_counts;
1225 } else if (packets > 32) {
1226 /* If we have processed over 32 packets in a single interrupt
1227 * for Tx assume we need to switch over to "bulk" mode.
1228 */
1229 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1230 }
1231
1232 /* We have no packets to actually measure against. This means
1233 * either one of the other queues on this vector is active or
1234 * we are a Tx queue doing TSO with too high of an interrupt rate.
1235 *
1236 * Between 4 and 56 we can assume that our current interrupt delay
1237 * is only slightly too low. As such we should increase it by a small
1238 * fixed amount.
1239 */
1240 if (packets < 56) {
1241 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1242 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1243 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1244 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1245 }
1246 goto clear_counts;
1247 }
1248
1249 if (packets <= 256) {
1250 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1251 itr &= I40E_ITR_MASK;
1252
1253 /* Between 56 and 112 is our "goldilocks" zone where we are
1254 * working out "just right". Just report that our current
1255 * ITR is good for us.
1256 */
1257 if (packets <= 112)
1258 goto clear_counts;
1259
1260 /* If packet count is 128 or greater we are likely looking
1261 * at a slight overrun of the delay we want. Try halving
1262 * our delay to see if that will cut the number of packets
1263 * in half per interrupt.
1264 */
1265 itr /= 2;
1266 itr &= I40E_ITR_MASK;
1267 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1268 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1269
1270 goto clear_counts;
1271 }
1272
1273 /* The paths below assume we are dealing with a bulk ITR since
1274 * number of packets is greater than 256. We are just going to have
1275 * to compute a value and try to bring the count under control,
1276 * though for smaller packet sizes there isn't much we can do as
1277 * NAPI polling will likely be kicking in sooner rather than later.
1278 */
1279 itr = I40E_ITR_ADAPTIVE_BULK;
1280
1281adjust_by_size:
1282 /* If packet counts are 256 or greater we can assume we have a gross
1283 * overestimation of what the rate should be. Instead of trying to fine
1284 * tune it just use the formula below to try and dial in an exact value
1285 * give the current packet size of the frame.
1286 */
1287 avg_wire_size = bytes / packets;
1288
1289 /* The following is a crude approximation of:
1290 * wmem_default / (size + overhead) = desired_pkts_per_int
1291 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1292 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1293 *
1294 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1295 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1296 * formula down to
1297 *
1298 * (170 * (size + 24)) / (size + 640) = ITR
1299 *
1300 * We first do some math on the packet size and then finally bitshift
1301 * by 8 after rounding up. We also have to account for PCIe link speed
1302 * difference as ITR scales based on this.
1303 */
1304 if (avg_wire_size <= 60) {
1305 /* Start at 250k ints/sec */
1306 avg_wire_size = 4096;
1307 } else if (avg_wire_size <= 380) {
1308 /* 250K ints/sec to 60K ints/sec */
1309 avg_wire_size *= 40;
1310 avg_wire_size += 1696;
1311 } else if (avg_wire_size <= 1084) {
1312 /* 60K ints/sec to 36K ints/sec */
1313 avg_wire_size *= 15;
1314 avg_wire_size += 11452;
1315 } else if (avg_wire_size <= 1980) {
1316 /* 36K ints/sec to 30K ints/sec */
1317 avg_wire_size *= 5;
1318 avg_wire_size += 22420;
1319 } else {
1320 /* plateau at a limit of 30K ints/sec */
1321 avg_wire_size = 32256;
1322 }
1323
1324 /* If we are in low latency mode halve our delay which doubles the
1325 * rate to somewhere between 100K to 16K ints/sec
1326 */
1327 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1328 avg_wire_size /= 2;
1329
1330 /* Resultant value is 256 times larger than it needs to be. This
1331 * gives us room to adjust the value as needed to either increase
1332 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1333 *
1334 * Use addition as we have already recorded the new latency flag
1335 * for the ITR value.
1336 */
1337 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1338 I40E_ITR_ADAPTIVE_MIN_INC;
1339
1340 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1341 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1342 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1343 }
1344
1345clear_counts:
1346 /* write back value */
1347 rc->target_itr = itr;
1348
1349 /* next update should occur within next jiffy */
1350 rc->next_update = next_update + 1;
1351
1352 rc->total_bytes = 0;
1353 rc->total_packets = 0;
1354}
1355
1356static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1357{
1358 return &rx_ring->rx_bi[idx];
1359}
1360
1361/**
1362 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1363 * @rx_ring: rx descriptor ring to store buffers on
1364 * @old_buff: donor buffer to have page reused
1365 *
1366 * Synchronizes page for reuse by the adapter
1367 **/
1368static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1369 struct i40e_rx_buffer *old_buff)
1370{
1371 struct i40e_rx_buffer *new_buff;
1372 u16 nta = rx_ring->next_to_alloc;
1373
1374 new_buff = i40e_rx_bi(rx_ring, nta);
1375
1376 /* update, and store next to alloc */
1377 nta++;
1378 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1379
1380 /* transfer page from old buffer to new buffer */
1381 new_buff->dma = old_buff->dma;
1382 new_buff->page = old_buff->page;
1383 new_buff->page_offset = old_buff->page_offset;
1384 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1385
1386 /* clear contents of buffer_info */
1387 old_buff->page = NULL;
1388}
1389
1390/**
1391 * i40e_clean_programming_status - clean the programming status descriptor
1392 * @rx_ring: the rx ring that has this descriptor
1393 * @qword0_raw: qword0
1394 * @qword1: qword1 representing status_error_len in CPU ordering
1395 *
1396 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1397 * status being successful or not and take actions accordingly. FCoE should
1398 * handle its context/filter programming/invalidation status and take actions.
1399 *
1400 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1401 **/
1402void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1403 u64 qword1)
1404{
1405 u8 id;
1406
1407 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1408 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1409
1410 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1411 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1412}
1413
1414/**
1415 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1416 * @tx_ring: the tx ring to set up
1417 *
1418 * Return 0 on success, negative on error
1419 **/
1420int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1421{
1422 struct device *dev = tx_ring->dev;
1423 int bi_size;
1424
1425 if (!dev)
1426 return -ENOMEM;
1427
1428 /* warn if we are about to overwrite the pointer */
1429 WARN_ON(tx_ring->tx_bi);
1430 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1431 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1432 if (!tx_ring->tx_bi)
1433 goto err;
1434
1435 u64_stats_init(&tx_ring->syncp);
1436
1437 /* round up to nearest 4K */
1438 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1439 /* add u32 for head writeback, align after this takes care of
1440 * guaranteeing this is at least one cache line in size
1441 */
1442 tx_ring->size += sizeof(u32);
1443 tx_ring->size = ALIGN(tx_ring->size, 4096);
1444 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1445 &tx_ring->dma, GFP_KERNEL);
1446 if (!tx_ring->desc) {
1447 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1448 tx_ring->size);
1449 goto err;
1450 }
1451
1452 tx_ring->next_to_use = 0;
1453 tx_ring->next_to_clean = 0;
1454 tx_ring->tx_stats.prev_pkt_ctr = -1;
1455 return 0;
1456
1457err:
1458 kfree(tx_ring->tx_bi);
1459 tx_ring->tx_bi = NULL;
1460 return -ENOMEM;
1461}
1462
1463static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1464{
1465 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1466}
1467
1468/**
1469 * i40e_clean_rx_ring - Free Rx buffers
1470 * @rx_ring: ring to be cleaned
1471 **/
1472void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1473{
1474 u16 i;
1475
1476 /* ring already cleared, nothing to do */
1477 if (!rx_ring->rx_bi)
1478 return;
1479
1480 dev_kfree_skb(rx_ring->skb);
1481 rx_ring->skb = NULL;
1482
1483 if (rx_ring->xsk_pool) {
1484 i40e_xsk_clean_rx_ring(rx_ring);
1485 goto skip_free;
1486 }
1487
1488 /* Free all the Rx ring sk_buffs */
1489 for (i = 0; i < rx_ring->count; i++) {
1490 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1491
1492 if (!rx_bi->page)
1493 continue;
1494
1495 /* Invalidate cache lines that may have been written to by
1496 * device so that we avoid corrupting memory.
1497 */
1498 dma_sync_single_range_for_cpu(rx_ring->dev,
1499 rx_bi->dma,
1500 rx_bi->page_offset,
1501 rx_ring->rx_buf_len,
1502 DMA_FROM_DEVICE);
1503
1504 /* free resources associated with mapping */
1505 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1506 i40e_rx_pg_size(rx_ring),
1507 DMA_FROM_DEVICE,
1508 I40E_RX_DMA_ATTR);
1509
1510 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1511
1512 rx_bi->page = NULL;
1513 rx_bi->page_offset = 0;
1514 }
1515
1516skip_free:
1517 if (rx_ring->xsk_pool)
1518 i40e_clear_rx_bi_zc(rx_ring);
1519 else
1520 i40e_clear_rx_bi(rx_ring);
1521
1522 /* Zero out the descriptor ring */
1523 memset(rx_ring->desc, 0, rx_ring->size);
1524
1525 rx_ring->next_to_alloc = 0;
1526 rx_ring->next_to_clean = 0;
1527 rx_ring->next_to_use = 0;
1528}
1529
1530/**
1531 * i40e_free_rx_resources - Free Rx resources
1532 * @rx_ring: ring to clean the resources from
1533 *
1534 * Free all receive software resources
1535 **/
1536void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1537{
1538 i40e_clean_rx_ring(rx_ring);
1539 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1540 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1541 rx_ring->xdp_prog = NULL;
1542 kfree(rx_ring->rx_bi);
1543 rx_ring->rx_bi = NULL;
1544
1545 if (rx_ring->desc) {
1546 dma_free_coherent(rx_ring->dev, rx_ring->size,
1547 rx_ring->desc, rx_ring->dma);
1548 rx_ring->desc = NULL;
1549 }
1550}
1551
1552/**
1553 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1554 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1555 *
1556 * Returns 0 on success, negative on failure
1557 **/
1558int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1559{
1560 struct device *dev = rx_ring->dev;
1561 int err;
1562
1563 u64_stats_init(&rx_ring->syncp);
1564
1565 /* Round up to nearest 4K */
1566 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
1567 rx_ring->size = ALIGN(rx_ring->size, 4096);
1568 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1569 &rx_ring->dma, GFP_KERNEL);
1570
1571 if (!rx_ring->desc) {
1572 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1573 rx_ring->size);
1574 return -ENOMEM;
1575 }
1576
1577 rx_ring->next_to_alloc = 0;
1578 rx_ring->next_to_clean = 0;
1579 rx_ring->next_to_use = 0;
1580
1581 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1582 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1583 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1584 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
1585 if (err < 0)
1586 return err;
1587 }
1588
1589 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1590
1591 rx_ring->rx_bi =
1592 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
1593 if (!rx_ring->rx_bi)
1594 return -ENOMEM;
1595
1596 return 0;
1597}
1598
1599/**
1600 * i40e_release_rx_desc - Store the new tail and head values
1601 * @rx_ring: ring to bump
1602 * @val: new head index
1603 **/
1604void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1605{
1606 rx_ring->next_to_use = val;
1607
1608 /* update next to alloc since we have filled the ring */
1609 rx_ring->next_to_alloc = val;
1610
1611 /* Force memory writes to complete before letting h/w
1612 * know there are new descriptors to fetch. (Only
1613 * applicable for weak-ordered memory model archs,
1614 * such as IA-64).
1615 */
1616 wmb();
1617 writel(val, rx_ring->tail);
1618}
1619
1620static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1621 unsigned int size)
1622{
1623 unsigned int truesize;
1624
1625#if (PAGE_SIZE < 8192)
1626 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1627#else
1628 truesize = rx_ring->rx_offset ?
1629 SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
1630 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1631 SKB_DATA_ALIGN(size);
1632#endif
1633 return truesize;
1634}
1635
1636/**
1637 * i40e_alloc_mapped_page - recycle or make a new page
1638 * @rx_ring: ring to use
1639 * @bi: rx_buffer struct to modify
1640 *
1641 * Returns true if the page was successfully allocated or
1642 * reused.
1643 **/
1644static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1645 struct i40e_rx_buffer *bi)
1646{
1647 struct page *page = bi->page;
1648 dma_addr_t dma;
1649
1650 /* since we are recycling buffers we should seldom need to alloc */
1651 if (likely(page)) {
1652 rx_ring->rx_stats.page_reuse_count++;
1653 return true;
1654 }
1655
1656 /* alloc new page for storage */
1657 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1658 if (unlikely(!page)) {
1659 rx_ring->rx_stats.alloc_page_failed++;
1660 return false;
1661 }
1662
1663 rx_ring->rx_stats.page_alloc_count++;
1664
1665 /* map page for use */
1666 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1667 i40e_rx_pg_size(rx_ring),
1668 DMA_FROM_DEVICE,
1669 I40E_RX_DMA_ATTR);
1670
1671 /* if mapping failed free memory back to system since
1672 * there isn't much point in holding memory we can't use
1673 */
1674 if (dma_mapping_error(rx_ring->dev, dma)) {
1675 __free_pages(page, i40e_rx_pg_order(rx_ring));
1676 rx_ring->rx_stats.alloc_page_failed++;
1677 return false;
1678 }
1679
1680 bi->dma = dma;
1681 bi->page = page;
1682 bi->page_offset = rx_ring->rx_offset;
1683 page_ref_add(page, USHRT_MAX - 1);
1684 bi->pagecnt_bias = USHRT_MAX;
1685
1686 return true;
1687}
1688
1689/**
1690 * i40e_alloc_rx_buffers - Replace used receive buffers
1691 * @rx_ring: ring to place buffers on
1692 * @cleaned_count: number of buffers to replace
1693 *
1694 * Returns false if all allocations were successful, true if any fail
1695 **/
1696bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1697{
1698 u16 ntu = rx_ring->next_to_use;
1699 union i40e_rx_desc *rx_desc;
1700 struct i40e_rx_buffer *bi;
1701
1702 /* do nothing if no valid netdev defined */
1703 if (!rx_ring->netdev || !cleaned_count)
1704 return false;
1705
1706 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1707 bi = i40e_rx_bi(rx_ring, ntu);
1708
1709 do {
1710 if (!i40e_alloc_mapped_page(rx_ring, bi))
1711 goto no_buffers;
1712
1713 /* sync the buffer for use by the device */
1714 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1715 bi->page_offset,
1716 rx_ring->rx_buf_len,
1717 DMA_FROM_DEVICE);
1718
1719 /* Refresh the desc even if buffer_addrs didn't change
1720 * because each write-back erases this info.
1721 */
1722 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1723
1724 rx_desc++;
1725 bi++;
1726 ntu++;
1727 if (unlikely(ntu == rx_ring->count)) {
1728 rx_desc = I40E_RX_DESC(rx_ring, 0);
1729 bi = i40e_rx_bi(rx_ring, 0);
1730 ntu = 0;
1731 }
1732
1733 /* clear the status bits for the next_to_use descriptor */
1734 rx_desc->wb.qword1.status_error_len = 0;
1735
1736 cleaned_count--;
1737 } while (cleaned_count);
1738
1739 if (rx_ring->next_to_use != ntu)
1740 i40e_release_rx_desc(rx_ring, ntu);
1741
1742 return false;
1743
1744no_buffers:
1745 if (rx_ring->next_to_use != ntu)
1746 i40e_release_rx_desc(rx_ring, ntu);
1747
1748 /* make sure to come back via polling to try again after
1749 * allocation failure
1750 */
1751 return true;
1752}
1753
1754/**
1755 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1756 * @vsi: the VSI we care about
1757 * @skb: skb currently being received and modified
1758 * @rx_desc: the receive descriptor
1759 **/
1760static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1761 struct sk_buff *skb,
1762 union i40e_rx_desc *rx_desc)
1763{
1764 struct i40e_rx_ptype_decoded decoded;
1765 u32 rx_error, rx_status;
1766 bool ipv4, ipv6;
1767 u8 ptype;
1768 u64 qword;
1769
1770 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1771 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1772 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1773 I40E_RXD_QW1_ERROR_SHIFT;
1774 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1775 I40E_RXD_QW1_STATUS_SHIFT;
1776 decoded = decode_rx_desc_ptype(ptype);
1777
1778 skb->ip_summed = CHECKSUM_NONE;
1779
1780 skb_checksum_none_assert(skb);
1781
1782 /* Rx csum enabled and ip headers found? */
1783 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1784 return;
1785
1786 /* did the hardware decode the packet and checksum? */
1787 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1788 return;
1789
1790 /* both known and outer_ip must be set for the below code to work */
1791 if (!(decoded.known && decoded.outer_ip))
1792 return;
1793
1794 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1795 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1796 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1797 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1798
1799 if (ipv4 &&
1800 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1801 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1802 goto checksum_fail;
1803
1804 /* likely incorrect csum if alternate IP extension headers found */
1805 if (ipv6 &&
1806 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1807 /* don't increment checksum err here, non-fatal err */
1808 return;
1809
1810 /* there was some L4 error, count error and punt packet to the stack */
1811 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1812 goto checksum_fail;
1813
1814 /* handle packets that were not able to be checksummed due
1815 * to arrival speed, in this case the stack can compute
1816 * the csum.
1817 */
1818 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1819 return;
1820
1821 /* If there is an outer header present that might contain a checksum
1822 * we need to bump the checksum level by 1 to reflect the fact that
1823 * we are indicating we validated the inner checksum.
1824 */
1825 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1826 skb->csum_level = 1;
1827
1828 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1829 switch (decoded.inner_prot) {
1830 case I40E_RX_PTYPE_INNER_PROT_TCP:
1831 case I40E_RX_PTYPE_INNER_PROT_UDP:
1832 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1833 skb->ip_summed = CHECKSUM_UNNECESSARY;
1834 fallthrough;
1835 default:
1836 break;
1837 }
1838
1839 return;
1840
1841checksum_fail:
1842 vsi->back->hw_csum_rx_error++;
1843}
1844
1845/**
1846 * i40e_ptype_to_htype - get a hash type
1847 * @ptype: the ptype value from the descriptor
1848 *
1849 * Returns a hash type to be used by skb_set_hash
1850 **/
1851static inline int i40e_ptype_to_htype(u8 ptype)
1852{
1853 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1854
1855 if (!decoded.known)
1856 return PKT_HASH_TYPE_NONE;
1857
1858 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1859 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1860 return PKT_HASH_TYPE_L4;
1861 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1862 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1863 return PKT_HASH_TYPE_L3;
1864 else
1865 return PKT_HASH_TYPE_L2;
1866}
1867
1868/**
1869 * i40e_rx_hash - set the hash value in the skb
1870 * @ring: descriptor ring
1871 * @rx_desc: specific descriptor
1872 * @skb: skb currently being received and modified
1873 * @rx_ptype: Rx packet type
1874 **/
1875static inline void i40e_rx_hash(struct i40e_ring *ring,
1876 union i40e_rx_desc *rx_desc,
1877 struct sk_buff *skb,
1878 u8 rx_ptype)
1879{
1880 u32 hash;
1881 const __le64 rss_mask =
1882 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1883 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1884
1885 if (!(ring->netdev->features & NETIF_F_RXHASH))
1886 return;
1887
1888 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1889 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1890 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1891 }
1892}
1893
1894/**
1895 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1896 * @rx_ring: rx descriptor ring packet is being transacted on
1897 * @rx_desc: pointer to the EOP Rx descriptor
1898 * @skb: pointer to current skb being populated
1899 *
1900 * This function checks the ring, descriptor, and packet information in
1901 * order to populate the hash, checksum, VLAN, protocol, and
1902 * other fields within the skb.
1903 **/
1904void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1905 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1906{
1907 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1908 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1909 I40E_RXD_QW1_STATUS_SHIFT;
1910 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1911 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1912 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1913 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1914 I40E_RXD_QW1_PTYPE_SHIFT;
1915
1916 if (unlikely(tsynvalid))
1917 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1918
1919 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1920
1921 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1922
1923 skb_record_rx_queue(skb, rx_ring->queue_index);
1924
1925 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1926 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1927
1928 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1929 le16_to_cpu(vlan_tag));
1930 }
1931
1932 /* modifies the skb - consumes the enet header */
1933 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1934}
1935
1936/**
1937 * i40e_cleanup_headers - Correct empty headers
1938 * @rx_ring: rx descriptor ring packet is being transacted on
1939 * @skb: pointer to current skb being fixed
1940 * @rx_desc: pointer to the EOP Rx descriptor
1941 *
1942 * In addition if skb is not at least 60 bytes we need to pad it so that
1943 * it is large enough to qualify as a valid Ethernet frame.
1944 *
1945 * Returns true if an error was encountered and skb was freed.
1946 **/
1947static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1948 union i40e_rx_desc *rx_desc)
1949
1950{
1951 /* ERR_MASK will only have valid bits if EOP set, and
1952 * what we are doing here is actually checking
1953 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1954 * the error field
1955 */
1956 if (unlikely(i40e_test_staterr(rx_desc,
1957 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1958 dev_kfree_skb_any(skb);
1959 return true;
1960 }
1961
1962 /* if eth_skb_pad returns an error the skb was freed */
1963 if (eth_skb_pad(skb))
1964 return true;
1965
1966 return false;
1967}
1968
1969/**
1970 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1971 * @rx_buffer: buffer containing the page
1972 * @rx_stats: rx stats structure for the rx ring
1973 * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
1974 *
1975 * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
1976 * which will assign the current buffer to the buffer that next_to_alloc is
1977 * pointing to; otherwise, the DMA mapping needs to be destroyed and
1978 * page freed.
1979 *
1980 * rx_stats will be updated to indicate whether the page was waived
1981 * or busy if it could not be reused.
1982 */
1983static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1984 struct i40e_rx_queue_stats *rx_stats,
1985 int rx_buffer_pgcnt)
1986{
1987 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1988 struct page *page = rx_buffer->page;
1989
1990 /* Is any reuse possible? */
1991 if (!dev_page_is_reusable(page)) {
1992 rx_stats->page_waive_count++;
1993 return false;
1994 }
1995
1996#if (PAGE_SIZE < 8192)
1997 /* if we are only owner of page we can reuse it */
1998 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
1999 rx_stats->page_busy_count++;
2000 return false;
2001 }
2002#else
2003#define I40E_LAST_OFFSET \
2004 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
2005 if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
2006 rx_stats->page_busy_count++;
2007 return false;
2008 }
2009#endif
2010
2011 /* If we have drained the page fragment pool we need to update
2012 * the pagecnt_bias and page count so that we fully restock the
2013 * number of references the driver holds.
2014 */
2015 if (unlikely(pagecnt_bias == 1)) {
2016 page_ref_add(page, USHRT_MAX - 1);
2017 rx_buffer->pagecnt_bias = USHRT_MAX;
2018 }
2019
2020 return true;
2021}
2022
2023/**
2024 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
2025 * @rx_ring: rx descriptor ring to transact packets on
2026 * @rx_buffer: buffer containing page to add
2027 * @skb: sk_buff to place the data into
2028 * @size: packet length from rx_desc
2029 *
2030 * This function will add the data contained in rx_buffer->page to the skb.
2031 * It will just attach the page as a frag to the skb.
2032 *
2033 * The function will then update the page offset.
2034 **/
2035static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
2036 struct i40e_rx_buffer *rx_buffer,
2037 struct sk_buff *skb,
2038 unsigned int size)
2039{
2040#if (PAGE_SIZE < 8192)
2041 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2042#else
2043 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
2044#endif
2045
2046 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
2047 rx_buffer->page_offset, size, truesize);
2048
2049 /* page is being used so we must update the page offset */
2050#if (PAGE_SIZE < 8192)
2051 rx_buffer->page_offset ^= truesize;
2052#else
2053 rx_buffer->page_offset += truesize;
2054#endif
2055}
2056
2057/**
2058 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2059 * @rx_ring: rx descriptor ring to transact packets on
2060 * @size: size of buffer to add to skb
2061 * @rx_buffer_pgcnt: buffer page refcount
2062 *
2063 * This function will pull an Rx buffer from the ring and synchronize it
2064 * for use by the CPU.
2065 */
2066static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2067 const unsigned int size,
2068 int *rx_buffer_pgcnt)
2069{
2070 struct i40e_rx_buffer *rx_buffer;
2071
2072 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2073 *rx_buffer_pgcnt =
2074#if (PAGE_SIZE < 8192)
2075 page_count(rx_buffer->page);
2076#else
2077 0;
2078#endif
2079 prefetch_page_address(rx_buffer->page);
2080
2081 /* we are reusing so sync this buffer for CPU use */
2082 dma_sync_single_range_for_cpu(rx_ring->dev,
2083 rx_buffer->dma,
2084 rx_buffer->page_offset,
2085 size,
2086 DMA_FROM_DEVICE);
2087
2088 /* We have pulled a buffer for use, so decrement pagecnt_bias */
2089 rx_buffer->pagecnt_bias--;
2090
2091 return rx_buffer;
2092}
2093
2094/**
2095 * i40e_construct_skb - Allocate skb and populate it
2096 * @rx_ring: rx descriptor ring to transact packets on
2097 * @rx_buffer: rx buffer to pull data from
2098 * @xdp: xdp_buff pointing to the data
2099 *
2100 * This function allocates an skb. It then populates it with the page
2101 * data from the current receive descriptor, taking care to set up the
2102 * skb correctly.
2103 */
2104static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2105 struct i40e_rx_buffer *rx_buffer,
2106 struct xdp_buff *xdp)
2107{
2108 unsigned int size = xdp->data_end - xdp->data;
2109#if (PAGE_SIZE < 8192)
2110 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2111#else
2112 unsigned int truesize = SKB_DATA_ALIGN(size);
2113#endif
2114 unsigned int headlen;
2115 struct sk_buff *skb;
2116
2117 /* prefetch first cache line of first page */
2118 net_prefetch(xdp->data);
2119
2120 /* Note, we get here by enabling legacy-rx via:
2121 *
2122 * ethtool --set-priv-flags <dev> legacy-rx on
2123 *
2124 * In this mode, we currently get 0 extra XDP headroom as
2125 * opposed to having legacy-rx off, where we process XDP
2126 * packets going to stack via i40e_build_skb(). The latter
2127 * provides us currently with 192 bytes of headroom.
2128 *
2129 * For i40e_construct_skb() mode it means that the
2130 * xdp->data_meta will always point to xdp->data, since
2131 * the helper cannot expand the head. Should this ever
2132 * change in future for legacy-rx mode on, then lets also
2133 * add xdp->data_meta handling here.
2134 */
2135
2136 /* allocate a skb to store the frags */
2137 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2138 I40E_RX_HDR_SIZE,
2139 GFP_ATOMIC | __GFP_NOWARN);
2140 if (unlikely(!skb))
2141 return NULL;
2142
2143 /* Determine available headroom for copy */
2144 headlen = size;
2145 if (headlen > I40E_RX_HDR_SIZE)
2146 headlen = eth_get_headlen(skb->dev, xdp->data,
2147 I40E_RX_HDR_SIZE);
2148
2149 /* align pull length to size of long to optimize memcpy performance */
2150 memcpy(__skb_put(skb, headlen), xdp->data,
2151 ALIGN(headlen, sizeof(long)));
2152
2153 /* update all of the pointers */
2154 size -= headlen;
2155 if (size) {
2156 skb_add_rx_frag(skb, 0, rx_buffer->page,
2157 rx_buffer->page_offset + headlen,
2158 size, truesize);
2159
2160 /* buffer is used by skb, update page_offset */
2161#if (PAGE_SIZE < 8192)
2162 rx_buffer->page_offset ^= truesize;
2163#else
2164 rx_buffer->page_offset += truesize;
2165#endif
2166 } else {
2167 /* buffer is unused, reset bias back to rx_buffer */
2168 rx_buffer->pagecnt_bias++;
2169 }
2170
2171 return skb;
2172}
2173
2174/**
2175 * i40e_build_skb - Build skb around an existing buffer
2176 * @rx_ring: Rx descriptor ring to transact packets on
2177 * @rx_buffer: Rx buffer to pull data from
2178 * @xdp: xdp_buff pointing to the data
2179 *
2180 * This function builds an skb around an existing Rx buffer, taking care
2181 * to set up the skb correctly and avoid any memcpy overhead.
2182 */
2183static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2184 struct i40e_rx_buffer *rx_buffer,
2185 struct xdp_buff *xdp)
2186{
2187 unsigned int metasize = xdp->data - xdp->data_meta;
2188#if (PAGE_SIZE < 8192)
2189 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2190#else
2191 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2192 SKB_DATA_ALIGN(xdp->data_end -
2193 xdp->data_hard_start);
2194#endif
2195 struct sk_buff *skb;
2196
2197 /* Prefetch first cache line of first page. If xdp->data_meta
2198 * is unused, this points exactly as xdp->data, otherwise we
2199 * likely have a consumer accessing first few bytes of meta
2200 * data, and then actual data.
2201 */
2202 net_prefetch(xdp->data_meta);
2203
2204 /* build an skb around the page buffer */
2205 skb = napi_build_skb(xdp->data_hard_start, truesize);
2206 if (unlikely(!skb))
2207 return NULL;
2208
2209 /* update pointers within the skb to store the data */
2210 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2211 __skb_put(skb, xdp->data_end - xdp->data);
2212 if (metasize)
2213 skb_metadata_set(skb, metasize);
2214
2215 /* buffer is used by skb, update page_offset */
2216#if (PAGE_SIZE < 8192)
2217 rx_buffer->page_offset ^= truesize;
2218#else
2219 rx_buffer->page_offset += truesize;
2220#endif
2221
2222 return skb;
2223}
2224
2225/**
2226 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2227 * @rx_ring: rx descriptor ring to transact packets on
2228 * @rx_buffer: rx buffer to pull data from
2229 * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
2230 *
2231 * This function will clean up the contents of the rx_buffer. It will
2232 * either recycle the buffer or unmap it and free the associated resources.
2233 */
2234static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2235 struct i40e_rx_buffer *rx_buffer,
2236 int rx_buffer_pgcnt)
2237{
2238 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
2239 /* hand second half of page back to the ring */
2240 i40e_reuse_rx_page(rx_ring, rx_buffer);
2241 } else {
2242 /* we are not reusing the buffer so unmap it */
2243 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2244 i40e_rx_pg_size(rx_ring),
2245 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2246 __page_frag_cache_drain(rx_buffer->page,
2247 rx_buffer->pagecnt_bias);
2248 /* clear contents of buffer_info */
2249 rx_buffer->page = NULL;
2250 }
2251}
2252
2253/**
2254 * i40e_is_non_eop - process handling of non-EOP buffers
2255 * @rx_ring: Rx ring being processed
2256 * @rx_desc: Rx descriptor for current buffer
2257 *
2258 * If the buffer is an EOP buffer, this function exits returning false,
2259 * otherwise return true indicating that this is in fact a non-EOP buffer.
2260 */
2261static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2262 union i40e_rx_desc *rx_desc)
2263{
2264 /* if we are the last buffer then there is nothing else to do */
2265#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2266 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2267 return false;
2268
2269 rx_ring->rx_stats.non_eop_descs++;
2270
2271 return true;
2272}
2273
2274static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2275 struct i40e_ring *xdp_ring);
2276
2277int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2278{
2279 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2280
2281 if (unlikely(!xdpf))
2282 return I40E_XDP_CONSUMED;
2283
2284 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2285}
2286
2287/**
2288 * i40e_run_xdp - run an XDP program
2289 * @rx_ring: Rx ring being processed
2290 * @xdp: XDP buffer containing the frame
2291 * @xdp_prog: XDP program to run
2292 **/
2293static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
2294{
2295 int err, result = I40E_XDP_PASS;
2296 struct i40e_ring *xdp_ring;
2297 u32 act;
2298
2299 if (!xdp_prog)
2300 goto xdp_out;
2301
2302 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2303
2304 act = bpf_prog_run_xdp(xdp_prog, xdp);
2305 switch (act) {
2306 case XDP_PASS:
2307 break;
2308 case XDP_TX:
2309 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2310 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2311 if (result == I40E_XDP_CONSUMED)
2312 goto out_failure;
2313 break;
2314 case XDP_REDIRECT:
2315 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2316 if (err)
2317 goto out_failure;
2318 result = I40E_XDP_REDIR;
2319 break;
2320 default:
2321 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
2322 fallthrough;
2323 case XDP_ABORTED:
2324out_failure:
2325 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2326 fallthrough; /* handle aborts by dropping packet */
2327 case XDP_DROP:
2328 result = I40E_XDP_CONSUMED;
2329 break;
2330 }
2331xdp_out:
2332 return result;
2333}
2334
2335/**
2336 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2337 * @rx_ring: Rx ring
2338 * @rx_buffer: Rx buffer to adjust
2339 * @size: Size of adjustment
2340 **/
2341static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2342 struct i40e_rx_buffer *rx_buffer,
2343 unsigned int size)
2344{
2345 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
2346
2347#if (PAGE_SIZE < 8192)
2348 rx_buffer->page_offset ^= truesize;
2349#else
2350 rx_buffer->page_offset += truesize;
2351#endif
2352}
2353
2354/**
2355 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2356 * @xdp_ring: XDP Tx ring
2357 *
2358 * This function updates the XDP Tx ring tail register.
2359 **/
2360void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2361{
2362 /* Force memory writes to complete before letting h/w
2363 * know there are new descriptors to fetch.
2364 */
2365 wmb();
2366 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2367}
2368
2369/**
2370 * i40e_update_rx_stats - Update Rx ring statistics
2371 * @rx_ring: rx descriptor ring
2372 * @total_rx_bytes: number of bytes received
2373 * @total_rx_packets: number of packets received
2374 *
2375 * This function updates the Rx ring statistics.
2376 **/
2377void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2378 unsigned int total_rx_bytes,
2379 unsigned int total_rx_packets)
2380{
2381 u64_stats_update_begin(&rx_ring->syncp);
2382 rx_ring->stats.packets += total_rx_packets;
2383 rx_ring->stats.bytes += total_rx_bytes;
2384 u64_stats_update_end(&rx_ring->syncp);
2385 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2386 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2387}
2388
2389/**
2390 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2391 * @rx_ring: Rx ring
2392 * @xdp_res: Result of the receive batch
2393 *
2394 * This function bumps XDP Tx tail and/or flush redirect map, and
2395 * should be called when a batch of packets has been processed in the
2396 * napi loop.
2397 **/
2398void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2399{
2400 if (xdp_res & I40E_XDP_REDIR)
2401 xdp_do_flush_map();
2402
2403 if (xdp_res & I40E_XDP_TX) {
2404 struct i40e_ring *xdp_ring =
2405 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2406
2407 i40e_xdp_ring_update_tail(xdp_ring);
2408 }
2409}
2410
2411/**
2412 * i40e_inc_ntc: Advance the next_to_clean index
2413 * @rx_ring: Rx ring
2414 **/
2415static void i40e_inc_ntc(struct i40e_ring *rx_ring)
2416{
2417 u32 ntc = rx_ring->next_to_clean + 1;
2418
2419 ntc = (ntc < rx_ring->count) ? ntc : 0;
2420 rx_ring->next_to_clean = ntc;
2421 prefetch(I40E_RX_DESC(rx_ring, ntc));
2422}
2423
2424/**
2425 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2426 * @rx_ring: rx descriptor ring to transact packets on
2427 * @budget: Total limit on number of packets to process
2428 * @rx_cleaned: Out parameter of the number of packets processed
2429 *
2430 * This function provides a "bounce buffer" approach to Rx interrupt
2431 * processing. The advantage to this is that on systems that have
2432 * expensive overhead for IOMMU access this provides a means of avoiding
2433 * it by maintaining the mapping of the page to the system.
2434 *
2435 * Returns amount of work completed
2436 **/
2437static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
2438 unsigned int *rx_cleaned)
2439{
2440 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2441 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2442 unsigned int offset = rx_ring->rx_offset;
2443 struct sk_buff *skb = rx_ring->skb;
2444 unsigned int xdp_xmit = 0;
2445 struct bpf_prog *xdp_prog;
2446 bool failure = false;
2447 struct xdp_buff xdp;
2448 int xdp_res = 0;
2449
2450#if (PAGE_SIZE < 8192)
2451 frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
2452#endif
2453 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
2454
2455 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2456
2457 while (likely(total_rx_packets < (unsigned int)budget)) {
2458 struct i40e_rx_buffer *rx_buffer;
2459 union i40e_rx_desc *rx_desc;
2460 int rx_buffer_pgcnt;
2461 unsigned int size;
2462 u64 qword;
2463
2464 /* return some buffers to hardware, one at a time is too slow */
2465 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2466 failure = failure ||
2467 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2468 cleaned_count = 0;
2469 }
2470
2471 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2472
2473 /* status_error_len will always be zero for unused descriptors
2474 * because it's cleared in cleanup, and overlaps with hdr_addr
2475 * which is always zero because packet split isn't used, if the
2476 * hardware wrote DD then the length will be non-zero
2477 */
2478 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2479
2480 /* This memory barrier is needed to keep us from reading
2481 * any other fields out of the rx_desc until we have
2482 * verified the descriptor has been written back.
2483 */
2484 dma_rmb();
2485
2486 if (i40e_rx_is_programming_status(qword)) {
2487 i40e_clean_programming_status(rx_ring,
2488 rx_desc->raw.qword[0],
2489 qword);
2490 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2491 i40e_inc_ntc(rx_ring);
2492 i40e_reuse_rx_page(rx_ring, rx_buffer);
2493 cleaned_count++;
2494 continue;
2495 }
2496
2497 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2498 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2499 if (!size)
2500 break;
2501
2502 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2503 rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2504
2505 /* retrieve a buffer from the ring */
2506 if (!skb) {
2507 unsigned char *hard_start;
2508
2509 hard_start = page_address(rx_buffer->page) +
2510 rx_buffer->page_offset - offset;
2511 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
2512 xdp_buff_clear_frags_flag(&xdp);
2513#if (PAGE_SIZE > 4096)
2514 /* At larger PAGE_SIZE, frame_sz depend on len size */
2515 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2516#endif
2517 xdp_res = i40e_run_xdp(rx_ring, &xdp, xdp_prog);
2518 }
2519
2520 if (xdp_res) {
2521 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2522 xdp_xmit |= xdp_res;
2523 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2524 } else {
2525 rx_buffer->pagecnt_bias++;
2526 }
2527 total_rx_bytes += size;
2528 total_rx_packets++;
2529 } else if (skb) {
2530 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2531 } else if (ring_uses_build_skb(rx_ring)) {
2532 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2533 } else {
2534 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2535 }
2536
2537 /* exit if we failed to retrieve a buffer */
2538 if (!xdp_res && !skb) {
2539 rx_ring->rx_stats.alloc_buff_failed++;
2540 rx_buffer->pagecnt_bias++;
2541 break;
2542 }
2543
2544 i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2545 cleaned_count++;
2546
2547 i40e_inc_ntc(rx_ring);
2548 if (i40e_is_non_eop(rx_ring, rx_desc))
2549 continue;
2550
2551 if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2552 skb = NULL;
2553 continue;
2554 }
2555
2556 /* probably a little skewed due to removing CRC */
2557 total_rx_bytes += skb->len;
2558
2559 /* populate checksum, VLAN, and protocol */
2560 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2561
2562 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2563 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2564 skb = NULL;
2565
2566 /* update budget accounting */
2567 total_rx_packets++;
2568 }
2569
2570 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2571 rx_ring->skb = skb;
2572
2573 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2574
2575 *rx_cleaned = total_rx_packets;
2576
2577 /* guarantee a trip back through this routine if there was a failure */
2578 return failure ? budget : (int)total_rx_packets;
2579}
2580
2581static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2582{
2583 u32 val;
2584
2585 /* We don't bother with setting the CLEARPBA bit as the data sheet
2586 * points out doing so is "meaningless since it was already
2587 * auto-cleared". The auto-clearing happens when the interrupt is
2588 * asserted.
2589 *
2590 * Hardware errata 28 for also indicates that writing to a
2591 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2592 * an event in the PBA anyway so we need to rely on the automask
2593 * to hold pending events for us until the interrupt is re-enabled
2594 *
2595 * The itr value is reported in microseconds, and the register
2596 * value is recorded in 2 microsecond units. For this reason we
2597 * only need to shift by the interval shift - 1 instead of the
2598 * full value.
2599 */
2600 itr &= I40E_ITR_MASK;
2601
2602 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2603 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2604 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2605
2606 return val;
2607}
2608
2609/* a small macro to shorten up some long lines */
2610#define INTREG I40E_PFINT_DYN_CTLN
2611
2612/* The act of updating the ITR will cause it to immediately trigger. In order
2613 * to prevent this from throwing off adaptive update statistics we defer the
2614 * update so that it can only happen so often. So after either Tx or Rx are
2615 * updated we make the adaptive scheme wait until either the ITR completely
2616 * expires via the next_update expiration or we have been through at least
2617 * 3 interrupts.
2618 */
2619#define ITR_COUNTDOWN_START 3
2620
2621/**
2622 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2623 * @vsi: the VSI we care about
2624 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2625 *
2626 **/
2627static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2628 struct i40e_q_vector *q_vector)
2629{
2630 struct i40e_hw *hw = &vsi->back->hw;
2631 u32 intval;
2632
2633 /* If we don't have MSIX, then we only need to re-enable icr0 */
2634 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2635 i40e_irq_dynamic_enable_icr0(vsi->back);
2636 return;
2637 }
2638
2639 /* These will do nothing if dynamic updates are not enabled */
2640 i40e_update_itr(q_vector, &q_vector->tx);
2641 i40e_update_itr(q_vector, &q_vector->rx);
2642
2643 /* This block of logic allows us to get away with only updating
2644 * one ITR value with each interrupt. The idea is to perform a
2645 * pseudo-lazy update with the following criteria.
2646 *
2647 * 1. Rx is given higher priority than Tx if both are in same state
2648 * 2. If we must reduce an ITR that is given highest priority.
2649 * 3. We then give priority to increasing ITR based on amount.
2650 */
2651 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2652 /* Rx ITR needs to be reduced, this is highest priority */
2653 intval = i40e_buildreg_itr(I40E_RX_ITR,
2654 q_vector->rx.target_itr);
2655 q_vector->rx.current_itr = q_vector->rx.target_itr;
2656 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2657 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2658 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2659 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2660 /* Tx ITR needs to be reduced, this is second priority
2661 * Tx ITR needs to be increased more than Rx, fourth priority
2662 */
2663 intval = i40e_buildreg_itr(I40E_TX_ITR,
2664 q_vector->tx.target_itr);
2665 q_vector->tx.current_itr = q_vector->tx.target_itr;
2666 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2667 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2668 /* Rx ITR needs to be increased, third priority */
2669 intval = i40e_buildreg_itr(I40E_RX_ITR,
2670 q_vector->rx.target_itr);
2671 q_vector->rx.current_itr = q_vector->rx.target_itr;
2672 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2673 } else {
2674 /* No ITR update, lowest priority */
2675 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2676 if (q_vector->itr_countdown)
2677 q_vector->itr_countdown--;
2678 }
2679
2680 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2681 wr32(hw, INTREG(q_vector->reg_idx), intval);
2682}
2683
2684/**
2685 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2686 * @napi: napi struct with our devices info in it
2687 * @budget: amount of work driver is allowed to do this pass, in packets
2688 *
2689 * This function will clean all queues associated with a q_vector.
2690 *
2691 * Returns the amount of work done
2692 **/
2693int i40e_napi_poll(struct napi_struct *napi, int budget)
2694{
2695 struct i40e_q_vector *q_vector =
2696 container_of(napi, struct i40e_q_vector, napi);
2697 struct i40e_vsi *vsi = q_vector->vsi;
2698 struct i40e_ring *ring;
2699 bool tx_clean_complete = true;
2700 bool rx_clean_complete = true;
2701 unsigned int tx_cleaned = 0;
2702 unsigned int rx_cleaned = 0;
2703 bool clean_complete = true;
2704 bool arm_wb = false;
2705 int budget_per_ring;
2706 int work_done = 0;
2707
2708 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2709 napi_complete(napi);
2710 return 0;
2711 }
2712
2713 /* Since the actual Tx work is minimal, we can give the Tx a larger
2714 * budget and be more aggressive about cleaning up the Tx descriptors.
2715 */
2716 i40e_for_each_ring(ring, q_vector->tx) {
2717 bool wd = ring->xsk_pool ?
2718 i40e_clean_xdp_tx_irq(vsi, ring) :
2719 i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);
2720
2721 if (!wd) {
2722 clean_complete = tx_clean_complete = false;
2723 continue;
2724 }
2725 arm_wb |= ring->arm_wb;
2726 ring->arm_wb = false;
2727 }
2728
2729 /* Handle case where we are called by netpoll with a budget of 0 */
2730 if (budget <= 0)
2731 goto tx_only;
2732
2733 /* normally we have 1 Rx ring per q_vector */
2734 if (unlikely(q_vector->num_ringpairs > 1))
2735 /* We attempt to distribute budget to each Rx queue fairly, but
2736 * don't allow the budget to go below 1 because that would exit
2737 * polling early.
2738 */
2739 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2740 else
2741 /* Max of 1 Rx ring in this q_vector so give it the budget */
2742 budget_per_ring = budget;
2743
2744 i40e_for_each_ring(ring, q_vector->rx) {
2745 int cleaned = ring->xsk_pool ?
2746 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2747 i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
2748
2749 work_done += cleaned;
2750 /* if we clean as many as budgeted, we must not be done */
2751 if (cleaned >= budget_per_ring)
2752 clean_complete = rx_clean_complete = false;
2753 }
2754
2755 if (!i40e_enabled_xdp_vsi(vsi))
2756 trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
2757 tx_cleaned, rx_clean_complete, tx_clean_complete);
2758
2759 /* If work not completed, return budget and polling will return */
2760 if (!clean_complete) {
2761 int cpu_id = smp_processor_id();
2762
2763 /* It is possible that the interrupt affinity has changed but,
2764 * if the cpu is pegged at 100%, polling will never exit while
2765 * traffic continues and the interrupt will be stuck on this
2766 * cpu. We check to make sure affinity is correct before we
2767 * continue to poll, otherwise we must stop polling so the
2768 * interrupt can move to the correct cpu.
2769 */
2770 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2771 /* Tell napi that we are done polling */
2772 napi_complete_done(napi, work_done);
2773
2774 /* Force an interrupt */
2775 i40e_force_wb(vsi, q_vector);
2776
2777 /* Return budget-1 so that polling stops */
2778 return budget - 1;
2779 }
2780tx_only:
2781 if (arm_wb) {
2782 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2783 i40e_enable_wb_on_itr(vsi, q_vector);
2784 }
2785 return budget;
2786 }
2787
2788 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2789 q_vector->arm_wb_state = false;
2790
2791 /* Exit the polling mode, but don't re-enable interrupts if stack might
2792 * poll us due to busy-polling
2793 */
2794 if (likely(napi_complete_done(napi, work_done)))
2795 i40e_update_enable_itr(vsi, q_vector);
2796
2797 return min(work_done, budget - 1);
2798}
2799
2800/**
2801 * i40e_atr - Add a Flow Director ATR filter
2802 * @tx_ring: ring to add programming descriptor to
2803 * @skb: send buffer
2804 * @tx_flags: send tx flags
2805 **/
2806static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2807 u32 tx_flags)
2808{
2809 struct i40e_filter_program_desc *fdir_desc;
2810 struct i40e_pf *pf = tx_ring->vsi->back;
2811 union {
2812 unsigned char *network;
2813 struct iphdr *ipv4;
2814 struct ipv6hdr *ipv6;
2815 } hdr;
2816 struct tcphdr *th;
2817 unsigned int hlen;
2818 u32 flex_ptype, dtype_cmd;
2819 int l4_proto;
2820 u16 i;
2821
2822 /* make sure ATR is enabled */
2823 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2824 return;
2825
2826 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2827 return;
2828
2829 /* if sampling is disabled do nothing */
2830 if (!tx_ring->atr_sample_rate)
2831 return;
2832
2833 /* Currently only IPv4/IPv6 with TCP is supported */
2834 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2835 return;
2836
2837 /* snag network header to get L4 type and address */
2838 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2839 skb_inner_network_header(skb) : skb_network_header(skb);
2840
2841 /* Note: tx_flags gets modified to reflect inner protocols in
2842 * tx_enable_csum function if encap is enabled.
2843 */
2844 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2845 /* access ihl as u8 to avoid unaligned access on ia64 */
2846 hlen = (hdr.network[0] & 0x0F) << 2;
2847 l4_proto = hdr.ipv4->protocol;
2848 } else {
2849 /* find the start of the innermost ipv6 header */
2850 unsigned int inner_hlen = hdr.network - skb->data;
2851 unsigned int h_offset = inner_hlen;
2852
2853 /* this function updates h_offset to the end of the header */
2854 l4_proto =
2855 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2856 /* hlen will contain our best estimate of the tcp header */
2857 hlen = h_offset - inner_hlen;
2858 }
2859
2860 if (l4_proto != IPPROTO_TCP)
2861 return;
2862
2863 th = (struct tcphdr *)(hdr.network + hlen);
2864
2865 /* Due to lack of space, no more new filters can be programmed */
2866 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2867 return;
2868 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2869 /* HW ATR eviction will take care of removing filters on FIN
2870 * and RST packets.
2871 */
2872 if (th->fin || th->rst)
2873 return;
2874 }
2875
2876 tx_ring->atr_count++;
2877
2878 /* sample on all syn/fin/rst packets or once every atr sample rate */
2879 if (!th->fin &&
2880 !th->syn &&
2881 !th->rst &&
2882 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2883 return;
2884
2885 tx_ring->atr_count = 0;
2886
2887 /* grab the next descriptor */
2888 i = tx_ring->next_to_use;
2889 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2890
2891 i++;
2892 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2893
2894 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2895 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2896 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2897 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2898 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2899 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2900 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2901
2902 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2903
2904 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2905
2906 dtype_cmd |= (th->fin || th->rst) ?
2907 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2908 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2909 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2910 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2911
2912 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2913 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2914
2915 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2916 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2917
2918 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2919 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2920 dtype_cmd |=
2921 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2922 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2923 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2924 else
2925 dtype_cmd |=
2926 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2927 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2928 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2929
2930 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2931 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2932
2933 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2934 fdir_desc->rsvd = cpu_to_le32(0);
2935 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2936 fdir_desc->fd_id = cpu_to_le32(0);
2937}
2938
2939/**
2940 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2941 * @skb: send buffer
2942 * @tx_ring: ring to send buffer on
2943 * @flags: the tx flags to be set
2944 *
2945 * Checks the skb and set up correspondingly several generic transmit flags
2946 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2947 *
2948 * Returns error code indicate the frame should be dropped upon error and the
2949 * otherwise returns 0 to indicate the flags has been set properly.
2950 **/
2951static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2952 struct i40e_ring *tx_ring,
2953 u32 *flags)
2954{
2955 __be16 protocol = skb->protocol;
2956 u32 tx_flags = 0;
2957
2958 if (protocol == htons(ETH_P_8021Q) &&
2959 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2960 /* When HW VLAN acceleration is turned off by the user the
2961 * stack sets the protocol to 8021q so that the driver
2962 * can take any steps required to support the SW only
2963 * VLAN handling. In our case the driver doesn't need
2964 * to take any further steps so just set the protocol
2965 * to the encapsulated ethertype.
2966 */
2967 skb->protocol = vlan_get_protocol(skb);
2968 goto out;
2969 }
2970
2971 /* if we have a HW VLAN tag being added, default to the HW one */
2972 if (skb_vlan_tag_present(skb)) {
2973 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2974 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2975 /* else if it is a SW VLAN, check the next protocol and store the tag */
2976 } else if (protocol == htons(ETH_P_8021Q)) {
2977 struct vlan_hdr *vhdr, _vhdr;
2978
2979 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2980 if (!vhdr)
2981 return -EINVAL;
2982
2983 protocol = vhdr->h_vlan_encapsulated_proto;
2984 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2985 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2986 }
2987
2988 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2989 goto out;
2990
2991 /* Insert 802.1p priority into VLAN header */
2992 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2993 (skb->priority != TC_PRIO_CONTROL)) {
2994 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2995 tx_flags |= (skb->priority & 0x7) <<
2996 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2997 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2998 struct vlan_ethhdr *vhdr;
2999 int rc;
3000
3001 rc = skb_cow_head(skb, 0);
3002 if (rc < 0)
3003 return rc;
3004 vhdr = (struct vlan_ethhdr *)skb->data;
3005 vhdr->h_vlan_TCI = htons(tx_flags >>
3006 I40E_TX_FLAGS_VLAN_SHIFT);
3007 } else {
3008 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3009 }
3010 }
3011
3012out:
3013 *flags = tx_flags;
3014 return 0;
3015}
3016
3017/**
3018 * i40e_tso - set up the tso context descriptor
3019 * @first: pointer to first Tx buffer for xmit
3020 * @hdr_len: ptr to the size of the packet header
3021 * @cd_type_cmd_tso_mss: Quad Word 1
3022 *
3023 * Returns 0 if no TSO can happen, 1 if tso is going, or error
3024 **/
3025static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
3026 u64 *cd_type_cmd_tso_mss)
3027{
3028 struct sk_buff *skb = first->skb;
3029 u64 cd_cmd, cd_tso_len, cd_mss;
3030 __be16 protocol;
3031 union {
3032 struct iphdr *v4;
3033 struct ipv6hdr *v6;
3034 unsigned char *hdr;
3035 } ip;
3036 union {
3037 struct tcphdr *tcp;
3038 struct udphdr *udp;
3039 unsigned char *hdr;
3040 } l4;
3041 u32 paylen, l4_offset;
3042 u16 gso_size;
3043 int err;
3044
3045 if (skb->ip_summed != CHECKSUM_PARTIAL)
3046 return 0;
3047
3048 if (!skb_is_gso(skb))
3049 return 0;
3050
3051 err = skb_cow_head(skb, 0);
3052 if (err < 0)
3053 return err;
3054
3055 protocol = vlan_get_protocol(skb);
3056
3057 if (eth_p_mpls(protocol))
3058 ip.hdr = skb_inner_network_header(skb);
3059 else
3060 ip.hdr = skb_network_header(skb);
3061 l4.hdr = skb_checksum_start(skb);
3062
3063 /* initialize outer IP header fields */
3064 if (ip.v4->version == 4) {
3065 ip.v4->tot_len = 0;
3066 ip.v4->check = 0;
3067
3068 first->tx_flags |= I40E_TX_FLAGS_TSO;
3069 } else {
3070 ip.v6->payload_len = 0;
3071 first->tx_flags |= I40E_TX_FLAGS_TSO;
3072 }
3073
3074 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
3075 SKB_GSO_GRE_CSUM |
3076 SKB_GSO_IPXIP4 |
3077 SKB_GSO_IPXIP6 |
3078 SKB_GSO_UDP_TUNNEL |
3079 SKB_GSO_UDP_TUNNEL_CSUM)) {
3080 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3081 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
3082 l4.udp->len = 0;
3083
3084 /* determine offset of outer transport header */
3085 l4_offset = l4.hdr - skb->data;
3086
3087 /* remove payload length from outer checksum */
3088 paylen = skb->len - l4_offset;
3089 csum_replace_by_diff(&l4.udp->check,
3090 (__force __wsum)htonl(paylen));
3091 }
3092
3093 /* reset pointers to inner headers */
3094 ip.hdr = skb_inner_network_header(skb);
3095 l4.hdr = skb_inner_transport_header(skb);
3096
3097 /* initialize inner IP header fields */
3098 if (ip.v4->version == 4) {
3099 ip.v4->tot_len = 0;
3100 ip.v4->check = 0;
3101 } else {
3102 ip.v6->payload_len = 0;
3103 }
3104 }
3105
3106 /* determine offset of inner transport header */
3107 l4_offset = l4.hdr - skb->data;
3108
3109 /* remove payload length from inner checksum */
3110 paylen = skb->len - l4_offset;
3111
3112 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3113 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
3114 /* compute length of segmentation header */
3115 *hdr_len = sizeof(*l4.udp) + l4_offset;
3116 } else {
3117 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3118 /* compute length of segmentation header */
3119 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3120 }
3121
3122 /* pull values out of skb_shinfo */
3123 gso_size = skb_shinfo(skb)->gso_size;
3124
3125 /* update GSO size and bytecount with header size */
3126 first->gso_segs = skb_shinfo(skb)->gso_segs;
3127 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3128
3129 /* find the field values */
3130 cd_cmd = I40E_TX_CTX_DESC_TSO;
3131 cd_tso_len = skb->len - *hdr_len;
3132 cd_mss = gso_size;
3133 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
3134 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3135 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3136 return 1;
3137}
3138
3139/**
3140 * i40e_tsyn - set up the tsyn context descriptor
3141 * @tx_ring: ptr to the ring to send
3142 * @skb: ptr to the skb we're sending
3143 * @tx_flags: the collected send information
3144 * @cd_type_cmd_tso_mss: Quad Word 1
3145 *
3146 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3147 **/
3148static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3149 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3150{
3151 struct i40e_pf *pf;
3152
3153 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3154 return 0;
3155
3156 /* Tx timestamps cannot be sampled when doing TSO */
3157 if (tx_flags & I40E_TX_FLAGS_TSO)
3158 return 0;
3159
3160 /* only timestamp the outbound packet if the user has requested it and
3161 * we are not already transmitting a packet to be timestamped
3162 */
3163 pf = i40e_netdev_to_pf(tx_ring->netdev);
3164 if (!(pf->flags & I40E_FLAG_PTP))
3165 return 0;
3166
3167 if (pf->ptp_tx &&
3168 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3169 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3170 pf->ptp_tx_start = jiffies;
3171 pf->ptp_tx_skb = skb_get(skb);
3172 } else {
3173 pf->tx_hwtstamp_skipped++;
3174 return 0;
3175 }
3176
3177 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3178 I40E_TXD_CTX_QW1_CMD_SHIFT;
3179
3180 return 1;
3181}
3182
3183/**
3184 * i40e_tx_enable_csum - Enable Tx checksum offloads
3185 * @skb: send buffer
3186 * @tx_flags: pointer to Tx flags currently set
3187 * @td_cmd: Tx descriptor command bits to set
3188 * @td_offset: Tx descriptor header offsets to set
3189 * @tx_ring: Tx descriptor ring
3190 * @cd_tunneling: ptr to context desc bits
3191 **/
3192static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3193 u32 *td_cmd, u32 *td_offset,
3194 struct i40e_ring *tx_ring,
3195 u32 *cd_tunneling)
3196{
3197 union {
3198 struct iphdr *v4;
3199 struct ipv6hdr *v6;
3200 unsigned char *hdr;
3201 } ip;
3202 union {
3203 struct tcphdr *tcp;
3204 struct udphdr *udp;
3205 unsigned char *hdr;
3206 } l4;
3207 unsigned char *exthdr;
3208 u32 offset, cmd = 0;
3209 __be16 frag_off;
3210 __be16 protocol;
3211 u8 l4_proto = 0;
3212
3213 if (skb->ip_summed != CHECKSUM_PARTIAL)
3214 return 0;
3215
3216 protocol = vlan_get_protocol(skb);
3217
3218 if (eth_p_mpls(protocol)) {
3219 ip.hdr = skb_inner_network_header(skb);
3220 l4.hdr = skb_checksum_start(skb);
3221 } else {
3222 ip.hdr = skb_network_header(skb);
3223 l4.hdr = skb_transport_header(skb);
3224 }
3225
3226 /* set the tx_flags to indicate the IP protocol type. this is
3227 * required so that checksum header computation below is accurate.
3228 */
3229 if (ip.v4->version == 4)
3230 *tx_flags |= I40E_TX_FLAGS_IPV4;
3231 else
3232 *tx_flags |= I40E_TX_FLAGS_IPV6;
3233
3234 /* compute outer L2 header size */
3235 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3236
3237 if (skb->encapsulation) {
3238 u32 tunnel = 0;
3239 /* define outer network header type */
3240 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3241 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3242 I40E_TX_CTX_EXT_IP_IPV4 :
3243 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3244
3245 l4_proto = ip.v4->protocol;
3246 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3247 int ret;
3248
3249 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3250
3251 exthdr = ip.hdr + sizeof(*ip.v6);
3252 l4_proto = ip.v6->nexthdr;
3253 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
3254 &l4_proto, &frag_off);
3255 if (ret < 0)
3256 return -1;
3257 }
3258
3259 /* define outer transport */
3260 switch (l4_proto) {
3261 case IPPROTO_UDP:
3262 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3263 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3264 break;
3265 case IPPROTO_GRE:
3266 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3267 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3268 break;
3269 case IPPROTO_IPIP:
3270 case IPPROTO_IPV6:
3271 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3272 l4.hdr = skb_inner_network_header(skb);
3273 break;
3274 default:
3275 if (*tx_flags & I40E_TX_FLAGS_TSO)
3276 return -1;
3277
3278 skb_checksum_help(skb);
3279 return 0;
3280 }
3281
3282 /* compute outer L3 header size */
3283 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3284 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3285
3286 /* switch IP header pointer from outer to inner header */
3287 ip.hdr = skb_inner_network_header(skb);
3288
3289 /* compute tunnel header size */
3290 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3291 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3292
3293 /* indicate if we need to offload outer UDP header */
3294 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3295 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3296 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3297 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3298
3299 /* record tunnel offload values */
3300 *cd_tunneling |= tunnel;
3301
3302 /* switch L4 header pointer from outer to inner */
3303 l4.hdr = skb_inner_transport_header(skb);
3304 l4_proto = 0;
3305
3306 /* reset type as we transition from outer to inner headers */
3307 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3308 if (ip.v4->version == 4)
3309 *tx_flags |= I40E_TX_FLAGS_IPV4;
3310 if (ip.v6->version == 6)
3311 *tx_flags |= I40E_TX_FLAGS_IPV6;
3312 }
3313
3314 /* Enable IP checksum offloads */
3315 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3316 l4_proto = ip.v4->protocol;
3317 /* the stack computes the IP header already, the only time we
3318 * need the hardware to recompute it is in the case of TSO.
3319 */
3320 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3321 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3322 I40E_TX_DESC_CMD_IIPT_IPV4;
3323 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3324 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3325
3326 exthdr = ip.hdr + sizeof(*ip.v6);
3327 l4_proto = ip.v6->nexthdr;
3328 if (l4.hdr != exthdr)
3329 ipv6_skip_exthdr(skb, exthdr - skb->data,
3330 &l4_proto, &frag_off);
3331 }
3332
3333 /* compute inner L3 header size */
3334 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3335
3336 /* Enable L4 checksum offloads */
3337 switch (l4_proto) {
3338 case IPPROTO_TCP:
3339 /* enable checksum offloads */
3340 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3341 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3342 break;
3343 case IPPROTO_SCTP:
3344 /* enable SCTP checksum offload */
3345 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3346 offset |= (sizeof(struct sctphdr) >> 2) <<
3347 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3348 break;
3349 case IPPROTO_UDP:
3350 /* enable UDP checksum offload */
3351 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3352 offset |= (sizeof(struct udphdr) >> 2) <<
3353 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3354 break;
3355 default:
3356 if (*tx_flags & I40E_TX_FLAGS_TSO)
3357 return -1;
3358 skb_checksum_help(skb);
3359 return 0;
3360 }
3361
3362 *td_cmd |= cmd;
3363 *td_offset |= offset;
3364
3365 return 1;
3366}
3367
3368/**
3369 * i40e_create_tx_ctx - Build the Tx context descriptor
3370 * @tx_ring: ring to create the descriptor on
3371 * @cd_type_cmd_tso_mss: Quad Word 1
3372 * @cd_tunneling: Quad Word 0 - bits 0-31
3373 * @cd_l2tag2: Quad Word 0 - bits 32-63
3374 **/
3375static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3376 const u64 cd_type_cmd_tso_mss,
3377 const u32 cd_tunneling, const u32 cd_l2tag2)
3378{
3379 struct i40e_tx_context_desc *context_desc;
3380 int i = tx_ring->next_to_use;
3381
3382 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3383 !cd_tunneling && !cd_l2tag2)
3384 return;
3385
3386 /* grab the next descriptor */
3387 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3388
3389 i++;
3390 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3391
3392 /* cpu_to_le32 and assign to struct fields */
3393 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3394 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3395 context_desc->rsvd = cpu_to_le16(0);
3396 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3397}
3398
3399/**
3400 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3401 * @tx_ring: the ring to be checked
3402 * @size: the size buffer we want to assure is available
3403 *
3404 * Returns -EBUSY if a stop is needed, else 0
3405 **/
3406int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3407{
3408 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3409 /* Memory barrier before checking head and tail */
3410 smp_mb();
3411
3412 ++tx_ring->tx_stats.tx_stopped;
3413
3414 /* Check again in a case another CPU has just made room available. */
3415 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3416 return -EBUSY;
3417
3418 /* A reprieve! - use start_queue because it doesn't call schedule */
3419 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3420 ++tx_ring->tx_stats.restart_queue;
3421 return 0;
3422}
3423
3424/**
3425 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3426 * @skb: send buffer
3427 *
3428 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3429 * and so we need to figure out the cases where we need to linearize the skb.
3430 *
3431 * For TSO we need to count the TSO header and segment payload separately.
3432 * As such we need to check cases where we have 7 fragments or more as we
3433 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3434 * the segment payload in the first descriptor, and another 7 for the
3435 * fragments.
3436 **/
3437bool __i40e_chk_linearize(struct sk_buff *skb)
3438{
3439 const skb_frag_t *frag, *stale;
3440 int nr_frags, sum;
3441
3442 /* no need to check if number of frags is less than 7 */
3443 nr_frags = skb_shinfo(skb)->nr_frags;
3444 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3445 return false;
3446
3447 /* We need to walk through the list and validate that each group
3448 * of 6 fragments totals at least gso_size.
3449 */
3450 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3451 frag = &skb_shinfo(skb)->frags[0];
3452
3453 /* Initialize size to the negative value of gso_size minus 1. We
3454 * use this as the worst case scenerio in which the frag ahead
3455 * of us only provides one byte which is why we are limited to 6
3456 * descriptors for a single transmit as the header and previous
3457 * fragment are already consuming 2 descriptors.
3458 */
3459 sum = 1 - skb_shinfo(skb)->gso_size;
3460
3461 /* Add size of frags 0 through 4 to create our initial sum */
3462 sum += skb_frag_size(frag++);
3463 sum += skb_frag_size(frag++);
3464 sum += skb_frag_size(frag++);
3465 sum += skb_frag_size(frag++);
3466 sum += skb_frag_size(frag++);
3467
3468 /* Walk through fragments adding latest fragment, testing it, and
3469 * then removing stale fragments from the sum.
3470 */
3471 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3472 int stale_size = skb_frag_size(stale);
3473
3474 sum += skb_frag_size(frag++);
3475
3476 /* The stale fragment may present us with a smaller
3477 * descriptor than the actual fragment size. To account
3478 * for that we need to remove all the data on the front and
3479 * figure out what the remainder would be in the last
3480 * descriptor associated with the fragment.
3481 */
3482 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3483 int align_pad = -(skb_frag_off(stale)) &
3484 (I40E_MAX_READ_REQ_SIZE - 1);
3485
3486 sum -= align_pad;
3487 stale_size -= align_pad;
3488
3489 do {
3490 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3491 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3492 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3493 }
3494
3495 /* if sum is negative we failed to make sufficient progress */
3496 if (sum < 0)
3497 return true;
3498
3499 if (!nr_frags--)
3500 break;
3501
3502 sum -= stale_size;
3503 }
3504
3505 return false;
3506}
3507
3508/**
3509 * i40e_tx_map - Build the Tx descriptor
3510 * @tx_ring: ring to send buffer on
3511 * @skb: send buffer
3512 * @first: first buffer info buffer to use
3513 * @tx_flags: collected send information
3514 * @hdr_len: size of the packet header
3515 * @td_cmd: the command field in the descriptor
3516 * @td_offset: offset for checksum or crc
3517 *
3518 * Returns 0 on success, -1 on failure to DMA
3519 **/
3520static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3521 struct i40e_tx_buffer *first, u32 tx_flags,
3522 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3523{
3524 unsigned int data_len = skb->data_len;
3525 unsigned int size = skb_headlen(skb);
3526 skb_frag_t *frag;
3527 struct i40e_tx_buffer *tx_bi;
3528 struct i40e_tx_desc *tx_desc;
3529 u16 i = tx_ring->next_to_use;
3530 u32 td_tag = 0;
3531 dma_addr_t dma;
3532 u16 desc_count = 1;
3533
3534 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3535 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3536 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3537 I40E_TX_FLAGS_VLAN_SHIFT;
3538 }
3539
3540 first->tx_flags = tx_flags;
3541
3542 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3543
3544 tx_desc = I40E_TX_DESC(tx_ring, i);
3545 tx_bi = first;
3546
3547 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3548 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3549
3550 if (dma_mapping_error(tx_ring->dev, dma))
3551 goto dma_error;
3552
3553 /* record length, and DMA address */
3554 dma_unmap_len_set(tx_bi, len, size);
3555 dma_unmap_addr_set(tx_bi, dma, dma);
3556
3557 /* align size to end of page */
3558 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3559 tx_desc->buffer_addr = cpu_to_le64(dma);
3560
3561 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3562 tx_desc->cmd_type_offset_bsz =
3563 build_ctob(td_cmd, td_offset,
3564 max_data, td_tag);
3565
3566 tx_desc++;
3567 i++;
3568 desc_count++;
3569
3570 if (i == tx_ring->count) {
3571 tx_desc = I40E_TX_DESC(tx_ring, 0);
3572 i = 0;
3573 }
3574
3575 dma += max_data;
3576 size -= max_data;
3577
3578 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3579 tx_desc->buffer_addr = cpu_to_le64(dma);
3580 }
3581
3582 if (likely(!data_len))
3583 break;
3584
3585 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3586 size, td_tag);
3587
3588 tx_desc++;
3589 i++;
3590 desc_count++;
3591
3592 if (i == tx_ring->count) {
3593 tx_desc = I40E_TX_DESC(tx_ring, 0);
3594 i = 0;
3595 }
3596
3597 size = skb_frag_size(frag);
3598 data_len -= size;
3599
3600 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3601 DMA_TO_DEVICE);
3602
3603 tx_bi = &tx_ring->tx_bi[i];
3604 }
3605
3606 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3607
3608 i++;
3609 if (i == tx_ring->count)
3610 i = 0;
3611
3612 tx_ring->next_to_use = i;
3613
3614 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3615
3616 /* write last descriptor with EOP bit */
3617 td_cmd |= I40E_TX_DESC_CMD_EOP;
3618
3619 /* We OR these values together to check both against 4 (WB_STRIDE)
3620 * below. This is safe since we don't re-use desc_count afterwards.
3621 */
3622 desc_count |= ++tx_ring->packet_stride;
3623
3624 if (desc_count >= WB_STRIDE) {
3625 /* write last descriptor with RS bit set */
3626 td_cmd |= I40E_TX_DESC_CMD_RS;
3627 tx_ring->packet_stride = 0;
3628 }
3629
3630 tx_desc->cmd_type_offset_bsz =
3631 build_ctob(td_cmd, td_offset, size, td_tag);
3632
3633 skb_tx_timestamp(skb);
3634
3635 /* Force memory writes to complete before letting h/w know there
3636 * are new descriptors to fetch.
3637 *
3638 * We also use this memory barrier to make certain all of the
3639 * status bits have been updated before next_to_watch is written.
3640 */
3641 wmb();
3642
3643 /* set next_to_watch value indicating a packet is present */
3644 first->next_to_watch = tx_desc;
3645
3646 /* notify HW of packet */
3647 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3648 writel(i, tx_ring->tail);
3649 }
3650
3651 return 0;
3652
3653dma_error:
3654 dev_info(tx_ring->dev, "TX DMA map failed\n");
3655
3656 /* clear dma mappings for failed tx_bi map */
3657 for (;;) {
3658 tx_bi = &tx_ring->tx_bi[i];
3659 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3660 if (tx_bi == first)
3661 break;
3662 if (i == 0)
3663 i = tx_ring->count;
3664 i--;
3665 }
3666
3667 tx_ring->next_to_use = i;
3668
3669 return -1;
3670}
3671
3672static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
3673 const struct sk_buff *skb,
3674 u16 num_tx_queues)
3675{
3676 u32 jhash_initval_salt = 0xd631614b;
3677 u32 hash;
3678
3679 if (skb->sk && skb->sk->sk_hash)
3680 hash = skb->sk->sk_hash;
3681 else
3682 hash = (__force u16)skb->protocol ^ skb->hash;
3683
3684 hash = jhash_1word(hash, jhash_initval_salt);
3685
3686 return (u16)(((u64)hash * num_tx_queues) >> 32);
3687}
3688
3689u16 i40e_lan_select_queue(struct net_device *netdev,
3690 struct sk_buff *skb,
3691 struct net_device __always_unused *sb_dev)
3692{
3693 struct i40e_netdev_priv *np = netdev_priv(netdev);
3694 struct i40e_vsi *vsi = np->vsi;
3695 struct i40e_hw *hw;
3696 u16 qoffset;
3697 u16 qcount;
3698 u8 tclass;
3699 u16 hash;
3700 u8 prio;
3701
3702 /* is DCB enabled at all? */
3703 if (vsi->tc_config.numtc == 1 ||
3704 i40e_is_tc_mqprio_enabled(vsi->back))
3705 return netdev_pick_tx(netdev, skb, sb_dev);
3706
3707 prio = skb->priority;
3708 hw = &vsi->back->hw;
3709 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
3710 /* sanity check */
3711 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
3712 tclass = 0;
3713
3714 /* select a queue assigned for the given TC */
3715 qcount = vsi->tc_config.tc_info[tclass].qcount;
3716 hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
3717
3718 qoffset = vsi->tc_config.tc_info[tclass].qoffset;
3719 return qoffset + hash;
3720}
3721
3722/**
3723 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3724 * @xdpf: data to transmit
3725 * @xdp_ring: XDP Tx ring
3726 **/
3727static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3728 struct i40e_ring *xdp_ring)
3729{
3730 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
3731 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
3732 u16 i = 0, index = xdp_ring->next_to_use;
3733 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
3734 struct i40e_tx_buffer *tx_bi = tx_head;
3735 struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
3736 void *data = xdpf->data;
3737 u32 size = xdpf->len;
3738
3739 if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
3740 xdp_ring->tx_stats.tx_busy++;
3741 return I40E_XDP_CONSUMED;
3742 }
3743
3744 tx_head->bytecount = xdp_get_frame_len(xdpf);
3745 tx_head->gso_segs = 1;
3746 tx_head->xdpf = xdpf;
3747
3748 for (;;) {
3749 dma_addr_t dma;
3750
3751 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3752 if (dma_mapping_error(xdp_ring->dev, dma))
3753 goto unmap;
3754
3755 /* record length, and DMA address */
3756 dma_unmap_len_set(tx_bi, len, size);
3757 dma_unmap_addr_set(tx_bi, dma, dma);
3758
3759 tx_desc->buffer_addr = cpu_to_le64(dma);
3760 tx_desc->cmd_type_offset_bsz =
3761 build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
3762
3763 if (++index == xdp_ring->count)
3764 index = 0;
3765
3766 if (i == nr_frags)
3767 break;
3768
3769 tx_bi = &xdp_ring->tx_bi[index];
3770 tx_desc = I40E_TX_DESC(xdp_ring, index);
3771
3772 data = skb_frag_address(&sinfo->frags[i]);
3773 size = skb_frag_size(&sinfo->frags[i]);
3774 i++;
3775 }
3776
3777 tx_desc->cmd_type_offset_bsz |=
3778 cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
3779
3780 /* Make certain all of the status bits have been updated
3781 * before next_to_watch is written.
3782 */
3783 smp_wmb();
3784
3785 xdp_ring->xdp_tx_active++;
3786
3787 tx_head->next_to_watch = tx_desc;
3788 xdp_ring->next_to_use = index;
3789
3790 return I40E_XDP_TX;
3791
3792unmap:
3793 for (;;) {
3794 tx_bi = &xdp_ring->tx_bi[index];
3795 if (dma_unmap_len(tx_bi, len))
3796 dma_unmap_page(xdp_ring->dev,
3797 dma_unmap_addr(tx_bi, dma),
3798 dma_unmap_len(tx_bi, len),
3799 DMA_TO_DEVICE);
3800 dma_unmap_len_set(tx_bi, len, 0);
3801 if (tx_bi == tx_head)
3802 break;
3803
3804 if (!index)
3805 index += xdp_ring->count;
3806 index--;
3807 }
3808
3809 return I40E_XDP_CONSUMED;
3810}
3811
3812/**
3813 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3814 * @skb: send buffer
3815 * @tx_ring: ring to send buffer on
3816 *
3817 * Returns NETDEV_TX_OK if sent, else an error code
3818 **/
3819static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3820 struct i40e_ring *tx_ring)
3821{
3822 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3823 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3824 struct i40e_tx_buffer *first;
3825 u32 td_offset = 0;
3826 u32 tx_flags = 0;
3827 u32 td_cmd = 0;
3828 u8 hdr_len = 0;
3829 int tso, count;
3830 int tsyn;
3831
3832 /* prefetch the data, we'll need it later */
3833 prefetch(skb->data);
3834
3835 i40e_trace(xmit_frame_ring, skb, tx_ring);
3836
3837 count = i40e_xmit_descriptor_count(skb);
3838 if (i40e_chk_linearize(skb, count)) {
3839 if (__skb_linearize(skb)) {
3840 dev_kfree_skb_any(skb);
3841 return NETDEV_TX_OK;
3842 }
3843 count = i40e_txd_use_count(skb->len);
3844 tx_ring->tx_stats.tx_linearize++;
3845 }
3846
3847 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3848 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3849 * + 4 desc gap to avoid the cache line where head is,
3850 * + 1 desc for context descriptor,
3851 * otherwise try next time
3852 */
3853 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3854 tx_ring->tx_stats.tx_busy++;
3855 return NETDEV_TX_BUSY;
3856 }
3857
3858 /* record the location of the first descriptor for this packet */
3859 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3860 first->skb = skb;
3861 first->bytecount = skb->len;
3862 first->gso_segs = 1;
3863
3864 /* prepare the xmit flags */
3865 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3866 goto out_drop;
3867
3868 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3869
3870 if (tso < 0)
3871 goto out_drop;
3872 else if (tso)
3873 tx_flags |= I40E_TX_FLAGS_TSO;
3874
3875 /* Always offload the checksum, since it's in the data descriptor */
3876 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3877 tx_ring, &cd_tunneling);
3878 if (tso < 0)
3879 goto out_drop;
3880
3881 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3882
3883 if (tsyn)
3884 tx_flags |= I40E_TX_FLAGS_TSYN;
3885
3886 /* always enable CRC insertion offload */
3887 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3888
3889 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3890 cd_tunneling, cd_l2tag2);
3891
3892 /* Add Flow Director ATR if it's enabled.
3893 *
3894 * NOTE: this must always be directly before the data descriptor.
3895 */
3896 i40e_atr(tx_ring, skb, tx_flags);
3897
3898 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3899 td_cmd, td_offset))
3900 goto cleanup_tx_tstamp;
3901
3902 return NETDEV_TX_OK;
3903
3904out_drop:
3905 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3906 dev_kfree_skb_any(first->skb);
3907 first->skb = NULL;
3908cleanup_tx_tstamp:
3909 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3910 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3911
3912 dev_kfree_skb_any(pf->ptp_tx_skb);
3913 pf->ptp_tx_skb = NULL;
3914 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3915 }
3916
3917 return NETDEV_TX_OK;
3918}
3919
3920/**
3921 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3922 * @skb: send buffer
3923 * @netdev: network interface device structure
3924 *
3925 * Returns NETDEV_TX_OK if sent, else an error code
3926 **/
3927netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3928{
3929 struct i40e_netdev_priv *np = netdev_priv(netdev);
3930 struct i40e_vsi *vsi = np->vsi;
3931 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3932
3933 /* hardware can't handle really short frames, hardware padding works
3934 * beyond this point
3935 */
3936 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3937 return NETDEV_TX_OK;
3938
3939 return i40e_xmit_frame_ring(skb, tx_ring);
3940}
3941
3942/**
3943 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3944 * @dev: netdev
3945 * @n: number of frames
3946 * @frames: array of XDP buffer pointers
3947 * @flags: XDP extra info
3948 *
3949 * Returns number of frames successfully sent. Failed frames
3950 * will be free'ed by XDP core.
3951 *
3952 * For error cases, a negative errno code is returned and no-frames
3953 * are transmitted (caller must handle freeing frames).
3954 **/
3955int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3956 u32 flags)
3957{
3958 struct i40e_netdev_priv *np = netdev_priv(dev);
3959 unsigned int queue_index = smp_processor_id();
3960 struct i40e_vsi *vsi = np->vsi;
3961 struct i40e_pf *pf = vsi->back;
3962 struct i40e_ring *xdp_ring;
3963 int nxmit = 0;
3964 int i;
3965
3966 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3967 return -ENETDOWN;
3968
3969 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3970 test_bit(__I40E_CONFIG_BUSY, pf->state))
3971 return -ENXIO;
3972
3973 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3974 return -EINVAL;
3975
3976 xdp_ring = vsi->xdp_rings[queue_index];
3977
3978 for (i = 0; i < n; i++) {
3979 struct xdp_frame *xdpf = frames[i];
3980 int err;
3981
3982 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3983 if (err != I40E_XDP_TX)
3984 break;
3985 nxmit++;
3986 }
3987
3988 if (unlikely(flags & XDP_XMIT_FLUSH))
3989 i40e_xdp_ring_update_tail(xdp_ring);
3990
3991 return nxmit;
3992}