Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include <linux/bpf_trace.h>
5#include <linux/prefetch.h>
6#include <linux/sctp.h>
7#include <net/mpls.h>
8#include <net/xdp.h>
9#include "i40e_txrx_common.h"
10#include "i40e_trace.h"
11#include "i40e_xsk.h"
12
13#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
14/**
15 * i40e_fdir - Generate a Flow Director descriptor based on fdata
16 * @tx_ring: Tx ring to send buffer on
17 * @fdata: Flow director filter data
18 * @add: Indicate if we are adding a rule or deleting one
19 *
20 **/
21static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
23{
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
27 u16 i;
28
29 /* grab the next descriptor */
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
32
33 i++;
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
35
36 flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index);
37
38 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK,
39 fdata->flex_off);
40
41 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
42
43 /* Use LAN VSI Id if not programmed by user */
44 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
45 fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
46
47 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
48
49 dtype_cmd |= add ?
50 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
51 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
52 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
53 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
54
55 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl);
56
57 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK,
58 fdata->fd_status);
59
60 if (fdata->cnt_index) {
61 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
62 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
63 fdata->cnt_index);
64 }
65
66 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
67 fdir_desc->rsvd = cpu_to_le32(0);
68 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
69 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
70}
71
72#define I40E_FD_CLEAN_DELAY 10
73/**
74 * i40e_program_fdir_filter - Program a Flow Director filter
75 * @fdir_data: Packet data that will be filter parameters
76 * @raw_packet: the pre-allocated packet buffer for FDir
77 * @pf: The PF pointer
78 * @add: True for add/update, False for remove
79 **/
80static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
81 u8 *raw_packet, struct i40e_pf *pf,
82 bool add)
83{
84 struct i40e_tx_buffer *tx_buf, *first;
85 struct i40e_tx_desc *tx_desc;
86 struct i40e_ring *tx_ring;
87 struct i40e_vsi *vsi;
88 struct device *dev;
89 dma_addr_t dma;
90 u32 td_cmd = 0;
91 u16 i;
92
93 /* find existing FDIR VSI */
94 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
95 if (!vsi)
96 return -ENOENT;
97
98 tx_ring = vsi->tx_rings[0];
99 dev = tx_ring->dev;
100
101 /* we need two descriptors to add/del a filter and we can wait */
102 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
103 if (!i)
104 return -EAGAIN;
105 msleep_interruptible(1);
106 }
107
108 dma = dma_map_single(dev, raw_packet,
109 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
110 if (dma_mapping_error(dev, dma))
111 goto dma_fail;
112
113 /* grab the next descriptor */
114 i = tx_ring->next_to_use;
115 first = &tx_ring->tx_bi[i];
116 i40e_fdir(tx_ring, fdir_data, add);
117
118 /* Now program a dummy descriptor */
119 i = tx_ring->next_to_use;
120 tx_desc = I40E_TX_DESC(tx_ring, i);
121 tx_buf = &tx_ring->tx_bi[i];
122
123 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
124
125 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
126
127 /* record length, and DMA address */
128 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
129 dma_unmap_addr_set(tx_buf, dma, dma);
130
131 tx_desc->buffer_addr = cpu_to_le64(dma);
132 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
133
134 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
135 tx_buf->raw_buf = (void *)raw_packet;
136
137 tx_desc->cmd_type_offset_bsz =
138 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
139
140 /* Force memory writes to complete before letting h/w
141 * know there are new descriptors to fetch.
142 */
143 wmb();
144
145 /* Mark the data descriptor to be watched */
146 first->next_to_watch = tx_desc;
147
148 writel(tx_ring->next_to_use, tx_ring->tail);
149 return 0;
150
151dma_fail:
152 return -1;
153}
154
155/**
156 * i40e_create_dummy_packet - Constructs dummy packet for HW
157 * @dummy_packet: preallocated space for dummy packet
158 * @ipv4: is layer 3 packet of version 4 or 6
159 * @l4proto: next level protocol used in data portion of l3
160 * @data: filter data
161 *
162 * Returns address of layer 4 protocol dummy packet.
163 **/
164static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
165 struct i40e_fdir_filter *data)
166{
167 bool is_vlan = !!data->vlan_tag;
168 struct vlan_hdr vlan = {};
169 struct ipv6hdr ipv6 = {};
170 struct ethhdr eth = {};
171 struct iphdr ip = {};
172 u8 *tmp;
173
174 if (ipv4) {
175 eth.h_proto = cpu_to_be16(ETH_P_IP);
176 ip.protocol = l4proto;
177 ip.version = 0x4;
178 ip.ihl = 0x5;
179
180 ip.daddr = data->dst_ip;
181 ip.saddr = data->src_ip;
182 } else {
183 eth.h_proto = cpu_to_be16(ETH_P_IPV6);
184 ipv6.nexthdr = l4proto;
185 ipv6.version = 0x6;
186
187 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
188 sizeof(__be32) * 4);
189 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
190 sizeof(__be32) * 4);
191 }
192
193 if (is_vlan) {
194 vlan.h_vlan_TCI = data->vlan_tag;
195 vlan.h_vlan_encapsulated_proto = eth.h_proto;
196 eth.h_proto = data->vlan_etype;
197 }
198
199 tmp = dummy_packet;
200 memcpy(tmp, ð, sizeof(eth));
201 tmp += sizeof(eth);
202
203 if (is_vlan) {
204 memcpy(tmp, &vlan, sizeof(vlan));
205 tmp += sizeof(vlan);
206 }
207
208 if (ipv4) {
209 memcpy(tmp, &ip, sizeof(ip));
210 tmp += sizeof(ip);
211 } else {
212 memcpy(tmp, &ipv6, sizeof(ipv6));
213 tmp += sizeof(ipv6);
214 }
215
216 return tmp;
217}
218
219/**
220 * i40e_create_dummy_udp_packet - helper function to create UDP packet
221 * @raw_packet: preallocated space for dummy packet
222 * @ipv4: is layer 3 packet of version 4 or 6
223 * @l4proto: next level protocol used in data portion of l3
224 * @data: filter data
225 *
226 * Helper function to populate udp fields.
227 **/
228static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
229 struct i40e_fdir_filter *data)
230{
231 struct udphdr *udp;
232 u8 *tmp;
233
234 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
235 udp = (struct udphdr *)(tmp);
236 udp->dest = data->dst_port;
237 udp->source = data->src_port;
238}
239
240/**
241 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
242 * @raw_packet: preallocated space for dummy packet
243 * @ipv4: is layer 3 packet of version 4 or 6
244 * @l4proto: next level protocol used in data portion of l3
245 * @data: filter data
246 *
247 * Helper function to populate tcp fields.
248 **/
249static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
250 struct i40e_fdir_filter *data)
251{
252 struct tcphdr *tcp;
253 u8 *tmp;
254 /* Dummy tcp packet */
255 static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
256 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
257
258 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
259
260 tcp = (struct tcphdr *)tmp;
261 memcpy(tcp, tcp_packet, sizeof(tcp_packet));
262 tcp->dest = data->dst_port;
263 tcp->source = data->src_port;
264}
265
266/**
267 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
268 * @raw_packet: preallocated space for dummy packet
269 * @ipv4: is layer 3 packet of version 4 or 6
270 * @l4proto: next level protocol used in data portion of l3
271 * @data: filter data
272 *
273 * Helper function to populate sctp fields.
274 **/
275static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
276 u8 l4proto,
277 struct i40e_fdir_filter *data)
278{
279 struct sctphdr *sctp;
280 u8 *tmp;
281
282 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
283
284 sctp = (struct sctphdr *)tmp;
285 sctp->dest = data->dst_port;
286 sctp->source = data->src_port;
287}
288
289/**
290 * i40e_prepare_fdir_filter - Prepare and program fdir filter
291 * @pf: physical function to attach filter to
292 * @fd_data: filter data
293 * @add: add or delete filter
294 * @packet_addr: address of dummy packet, used in filtering
295 * @payload_offset: offset from dummy packet address to user defined data
296 * @pctype: Packet type for which filter is used
297 *
298 * Helper function to offset data of dummy packet, program it and
299 * handle errors.
300 **/
301static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
302 struct i40e_fdir_filter *fd_data,
303 bool add, char *packet_addr,
304 int payload_offset, u8 pctype)
305{
306 int ret;
307
308 if (fd_data->flex_filter) {
309 u8 *payload;
310 __be16 pattern = fd_data->flex_word;
311 u16 off = fd_data->flex_offset;
312
313 payload = packet_addr + payload_offset;
314
315 /* If user provided vlan, offset payload by vlan header length */
316 if (!!fd_data->vlan_tag)
317 payload += VLAN_HLEN;
318
319 *((__force __be16 *)(payload + off)) = pattern;
320 }
321
322 fd_data->pctype = pctype;
323 ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
324 if (ret) {
325 dev_info(&pf->pdev->dev,
326 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
327 fd_data->pctype, fd_data->fd_id, ret);
328 /* Free the packet buffer since it wasn't added to the ring */
329 return -EOPNOTSUPP;
330 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
331 if (add)
332 dev_info(&pf->pdev->dev,
333 "Filter OK for PCTYPE %d loc = %d\n",
334 fd_data->pctype, fd_data->fd_id);
335 else
336 dev_info(&pf->pdev->dev,
337 "Filter deleted for PCTYPE %d loc = %d\n",
338 fd_data->pctype, fd_data->fd_id);
339 }
340
341 return ret;
342}
343
344/**
345 * i40e_change_filter_num - Prepare and program fdir filter
346 * @ipv4: is layer 3 packet of version 4 or 6
347 * @add: add or delete filter
348 * @ipv4_filter_num: field to update
349 * @ipv6_filter_num: field to update
350 *
351 * Update filter number field for pf.
352 **/
353static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
354 u16 *ipv6_filter_num)
355{
356 if (add) {
357 if (ipv4)
358 (*ipv4_filter_num)++;
359 else
360 (*ipv6_filter_num)++;
361 } else {
362 if (ipv4)
363 (*ipv4_filter_num)--;
364 else
365 (*ipv6_filter_num)--;
366 }
367}
368
369#define I40E_UDPIP_DUMMY_PACKET_LEN 42
370#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
371/**
372 * i40e_add_del_fdir_udp - Add/Remove UDP filters
373 * @vsi: pointer to the targeted VSI
374 * @fd_data: the flow director data required for the FDir descriptor
375 * @add: true adds a filter, false removes it
376 * @ipv4: true is v4, false is v6
377 *
378 * Returns 0 if the filters were successfully added or removed
379 **/
380static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
381 struct i40e_fdir_filter *fd_data,
382 bool add,
383 bool ipv4)
384{
385 struct i40e_pf *pf = vsi->back;
386 u8 *raw_packet;
387 int ret;
388
389 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
390 if (!raw_packet)
391 return -ENOMEM;
392
393 i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
394
395 if (ipv4)
396 ret = i40e_prepare_fdir_filter
397 (pf, fd_data, add, raw_packet,
398 I40E_UDPIP_DUMMY_PACKET_LEN,
399 I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
400 else
401 ret = i40e_prepare_fdir_filter
402 (pf, fd_data, add, raw_packet,
403 I40E_UDPIP6_DUMMY_PACKET_LEN,
404 I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
405
406 if (ret) {
407 kfree(raw_packet);
408 return ret;
409 }
410
411 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
412 &pf->fd_udp6_filter_cnt);
413
414 return 0;
415}
416
417#define I40E_TCPIP_DUMMY_PACKET_LEN 54
418#define I40E_TCPIP6_DUMMY_PACKET_LEN 74
419/**
420 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
421 * @vsi: pointer to the targeted VSI
422 * @fd_data: the flow director data required for the FDir descriptor
423 * @add: true adds a filter, false removes it
424 * @ipv4: true is v4, false is v6
425 *
426 * Returns 0 if the filters were successfully added or removed
427 **/
428static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
429 struct i40e_fdir_filter *fd_data,
430 bool add,
431 bool ipv4)
432{
433 struct i40e_pf *pf = vsi->back;
434 u8 *raw_packet;
435 int ret;
436
437 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
438 if (!raw_packet)
439 return -ENOMEM;
440
441 i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
442 if (ipv4)
443 ret = i40e_prepare_fdir_filter
444 (pf, fd_data, add, raw_packet,
445 I40E_TCPIP_DUMMY_PACKET_LEN,
446 I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
447 else
448 ret = i40e_prepare_fdir_filter
449 (pf, fd_data, add, raw_packet,
450 I40E_TCPIP6_DUMMY_PACKET_LEN,
451 I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
452
453 if (ret) {
454 kfree(raw_packet);
455 return ret;
456 }
457
458 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
459 &pf->fd_tcp6_filter_cnt);
460
461 if (add) {
462 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
463 I40E_DEBUG_FD & pf->hw.debug_mask)
464 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
465 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
466 }
467 return 0;
468}
469
470#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
471#define I40E_SCTPIP6_DUMMY_PACKET_LEN 66
472/**
473 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
474 * a specific flow spec
475 * @vsi: pointer to the targeted VSI
476 * @fd_data: the flow director data required for the FDir descriptor
477 * @add: true adds a filter, false removes it
478 * @ipv4: true is v4, false is v6
479 *
480 * Returns 0 if the filters were successfully added or removed
481 **/
482static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
483 struct i40e_fdir_filter *fd_data,
484 bool add,
485 bool ipv4)
486{
487 struct i40e_pf *pf = vsi->back;
488 u8 *raw_packet;
489 int ret;
490
491 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
492 if (!raw_packet)
493 return -ENOMEM;
494
495 i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
496
497 if (ipv4)
498 ret = i40e_prepare_fdir_filter
499 (pf, fd_data, add, raw_packet,
500 I40E_SCTPIP_DUMMY_PACKET_LEN,
501 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
502 else
503 ret = i40e_prepare_fdir_filter
504 (pf, fd_data, add, raw_packet,
505 I40E_SCTPIP6_DUMMY_PACKET_LEN,
506 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
507
508 if (ret) {
509 kfree(raw_packet);
510 return ret;
511 }
512
513 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
514 &pf->fd_sctp6_filter_cnt);
515
516 return 0;
517}
518
519#define I40E_IP_DUMMY_PACKET_LEN 34
520#define I40E_IP6_DUMMY_PACKET_LEN 54
521/**
522 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
523 * a specific flow spec
524 * @vsi: pointer to the targeted VSI
525 * @fd_data: the flow director data required for the FDir descriptor
526 * @add: true adds a filter, false removes it
527 * @ipv4: true is v4, false is v6
528 *
529 * Returns 0 if the filters were successfully added or removed
530 **/
531static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
532 struct i40e_fdir_filter *fd_data,
533 bool add,
534 bool ipv4)
535{
536 struct i40e_pf *pf = vsi->back;
537 int payload_offset;
538 u8 *raw_packet;
539 int iter_start;
540 int iter_end;
541 int ret;
542 int i;
543
544 if (ipv4) {
545 iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
546 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
547 } else {
548 iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
549 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
550 }
551
552 for (i = iter_start; i <= iter_end; i++) {
553 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
554 if (!raw_packet)
555 return -ENOMEM;
556
557 /* IPv6 no header option differs from IPv4 */
558 (void)i40e_create_dummy_packet
559 (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
560 fd_data);
561
562 payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
563 I40E_IP6_DUMMY_PACKET_LEN;
564 ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
565 payload_offset, i);
566 if (ret)
567 goto err;
568 }
569
570 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
571 &pf->fd_ip6_filter_cnt);
572
573 return 0;
574err:
575 kfree(raw_packet);
576 return ret;
577}
578
579/**
580 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
581 * @vsi: pointer to the targeted VSI
582 * @input: filter to add or delete
583 * @add: true adds a filter, false removes it
584 *
585 **/
586int i40e_add_del_fdir(struct i40e_vsi *vsi,
587 struct i40e_fdir_filter *input, bool add)
588{
589 enum ip_ver { ipv6 = 0, ipv4 = 1 };
590 struct i40e_pf *pf = vsi->back;
591 int ret;
592
593 switch (input->flow_type & ~FLOW_EXT) {
594 case TCP_V4_FLOW:
595 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
596 break;
597 case UDP_V4_FLOW:
598 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
599 break;
600 case SCTP_V4_FLOW:
601 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
602 break;
603 case TCP_V6_FLOW:
604 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
605 break;
606 case UDP_V6_FLOW:
607 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
608 break;
609 case SCTP_V6_FLOW:
610 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
611 break;
612 case IP_USER_FLOW:
613 switch (input->ipl4_proto) {
614 case IPPROTO_TCP:
615 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
616 break;
617 case IPPROTO_UDP:
618 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
619 break;
620 case IPPROTO_SCTP:
621 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
622 break;
623 case IPPROTO_IP:
624 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
625 break;
626 default:
627 /* We cannot support masking based on protocol */
628 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
629 input->ipl4_proto);
630 return -EINVAL;
631 }
632 break;
633 case IPV6_USER_FLOW:
634 switch (input->ipl4_proto) {
635 case IPPROTO_TCP:
636 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
637 break;
638 case IPPROTO_UDP:
639 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
640 break;
641 case IPPROTO_SCTP:
642 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
643 break;
644 case IPPROTO_IP:
645 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
646 break;
647 default:
648 /* We cannot support masking based on protocol */
649 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
650 input->ipl4_proto);
651 return -EINVAL;
652 }
653 break;
654 default:
655 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
656 input->flow_type);
657 return -EINVAL;
658 }
659
660 /* The buffer allocated here will be normally be freed by
661 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
662 * completion. In the event of an error adding the buffer to the FDIR
663 * ring, it will immediately be freed. It may also be freed by
664 * i40e_clean_tx_ring() when closing the VSI.
665 */
666 return ret;
667}
668
669/**
670 * i40e_fd_handle_status - check the Programming Status for FD
671 * @rx_ring: the Rx ring for this descriptor
672 * @qword0_raw: qword0
673 * @qword1: qword1 after le_to_cpu
674 * @prog_id: the id originally used for programming
675 *
676 * This is used to verify if the FD programming or invalidation
677 * requested by SW to the HW is successful or not and take actions accordingly.
678 **/
679static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
680 u64 qword1, u8 prog_id)
681{
682 struct i40e_pf *pf = rx_ring->vsi->back;
683 struct pci_dev *pdev = pf->pdev;
684 struct i40e_16b_rx_wb_qw0 *qw0;
685 u32 fcnt_prog, fcnt_avail;
686 u32 error;
687
688 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
689 error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1);
690
691 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
692 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
693 if (qw0->hi_dword.fd_id != 0 ||
694 (I40E_DEBUG_FD & pf->hw.debug_mask))
695 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
696 pf->fd_inv);
697
698 /* Check if the programming error is for ATR.
699 * If so, auto disable ATR and set a state for
700 * flush in progress. Next time we come here if flush is in
701 * progress do nothing, once flush is complete the state will
702 * be cleared.
703 */
704 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
705 return;
706
707 pf->fd_add_err++;
708 /* store the current atr filter count */
709 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
710
711 if (qw0->hi_dword.fd_id == 0 &&
712 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
713 /* These set_bit() calls aren't atomic with the
714 * test_bit() here, but worse case we potentially
715 * disable ATR and queue a flush right after SB
716 * support is re-enabled. That shouldn't cause an
717 * issue in practice
718 */
719 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
720 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
721 }
722
723 /* filter programming failed most likely due to table full */
724 fcnt_prog = i40e_get_global_fd_count(pf);
725 fcnt_avail = pf->fdir_pf_filter_count;
726 /* If ATR is running fcnt_prog can quickly change,
727 * if we are very close to full, it makes sense to disable
728 * FD ATR/SB and then re-enable it when there is room.
729 */
730 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
731 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
732 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
733 pf->state))
734 if (I40E_DEBUG_FD & pf->hw.debug_mask)
735 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
736 }
737 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
738 if (I40E_DEBUG_FD & pf->hw.debug_mask)
739 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
740 qw0->hi_dword.fd_id);
741 }
742}
743
744/**
745 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
746 * @ring: the ring that owns the buffer
747 * @tx_buffer: the buffer to free
748 **/
749static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
750 struct i40e_tx_buffer *tx_buffer)
751{
752 if (tx_buffer->skb) {
753 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
754 kfree(tx_buffer->raw_buf);
755 else if (ring_is_xdp(ring))
756 xdp_return_frame(tx_buffer->xdpf);
757 else
758 dev_kfree_skb_any(tx_buffer->skb);
759 if (dma_unmap_len(tx_buffer, len))
760 dma_unmap_single(ring->dev,
761 dma_unmap_addr(tx_buffer, dma),
762 dma_unmap_len(tx_buffer, len),
763 DMA_TO_DEVICE);
764 } else if (dma_unmap_len(tx_buffer, len)) {
765 dma_unmap_page(ring->dev,
766 dma_unmap_addr(tx_buffer, dma),
767 dma_unmap_len(tx_buffer, len),
768 DMA_TO_DEVICE);
769 }
770
771 tx_buffer->next_to_watch = NULL;
772 tx_buffer->skb = NULL;
773 dma_unmap_len_set(tx_buffer, len, 0);
774 /* tx_buffer must be completely set up in the transmit path */
775}
776
777/**
778 * i40e_clean_tx_ring - Free any empty Tx buffers
779 * @tx_ring: ring to be cleaned
780 **/
781void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
782{
783 unsigned long bi_size;
784 u16 i;
785
786 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
787 i40e_xsk_clean_tx_ring(tx_ring);
788 } else {
789 /* ring already cleared, nothing to do */
790 if (!tx_ring->tx_bi)
791 return;
792
793 /* Free all the Tx ring sk_buffs */
794 for (i = 0; i < tx_ring->count; i++)
795 i40e_unmap_and_free_tx_resource(tx_ring,
796 &tx_ring->tx_bi[i]);
797 }
798
799 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
800 memset(tx_ring->tx_bi, 0, bi_size);
801
802 /* Zero out the descriptor ring */
803 memset(tx_ring->desc, 0, tx_ring->size);
804
805 tx_ring->next_to_use = 0;
806 tx_ring->next_to_clean = 0;
807
808 if (!tx_ring->netdev)
809 return;
810
811 /* cleanup Tx queue statistics */
812 netdev_tx_reset_queue(txring_txq(tx_ring));
813}
814
815/**
816 * i40e_free_tx_resources - Free Tx resources per queue
817 * @tx_ring: Tx descriptor ring for a specific queue
818 *
819 * Free all transmit software resources
820 **/
821void i40e_free_tx_resources(struct i40e_ring *tx_ring)
822{
823 i40e_clean_tx_ring(tx_ring);
824 kfree(tx_ring->tx_bi);
825 tx_ring->tx_bi = NULL;
826
827 if (tx_ring->desc) {
828 dma_free_coherent(tx_ring->dev, tx_ring->size,
829 tx_ring->desc, tx_ring->dma);
830 tx_ring->desc = NULL;
831 }
832}
833
834/**
835 * i40e_get_tx_pending - how many tx descriptors not processed
836 * @ring: the ring of descriptors
837 * @in_sw: use SW variables
838 *
839 * Since there is no access to the ring head register
840 * in XL710, we need to use our local copies
841 **/
842u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
843{
844 u32 head, tail;
845
846 if (!in_sw) {
847 head = i40e_get_head(ring);
848 tail = readl(ring->tail);
849 } else {
850 head = ring->next_to_clean;
851 tail = ring->next_to_use;
852 }
853
854 if (head != tail)
855 return (head < tail) ?
856 tail - head : (tail + ring->count - head);
857
858 return 0;
859}
860
861/**
862 * i40e_detect_recover_hung - Function to detect and recover hung_queues
863 * @vsi: pointer to vsi struct with tx queues
864 *
865 * VSI has netdev and netdev has TX queues. This function is to check each of
866 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
867 **/
868void i40e_detect_recover_hung(struct i40e_vsi *vsi)
869{
870 struct i40e_ring *tx_ring = NULL;
871 struct net_device *netdev;
872 unsigned int i;
873 int packets;
874
875 if (!vsi)
876 return;
877
878 if (test_bit(__I40E_VSI_DOWN, vsi->state))
879 return;
880
881 netdev = vsi->netdev;
882 if (!netdev)
883 return;
884
885 if (!netif_carrier_ok(netdev))
886 return;
887
888 for (i = 0; i < vsi->num_queue_pairs; i++) {
889 tx_ring = vsi->tx_rings[i];
890 if (tx_ring && tx_ring->desc) {
891 /* If packet counter has not changed the queue is
892 * likely stalled, so force an interrupt for this
893 * queue.
894 *
895 * prev_pkt_ctr would be negative if there was no
896 * pending work.
897 */
898 packets = tx_ring->stats.packets & INT_MAX;
899 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
900 i40e_force_wb(vsi, tx_ring->q_vector);
901 continue;
902 }
903
904 /* Memory barrier between read of packet count and call
905 * to i40e_get_tx_pending()
906 */
907 smp_rmb();
908 tx_ring->tx_stats.prev_pkt_ctr =
909 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
910 }
911 }
912}
913
914/**
915 * i40e_clean_tx_irq - Reclaim resources after transmit completes
916 * @vsi: the VSI we care about
917 * @tx_ring: Tx ring to clean
918 * @napi_budget: Used to determine if we are in netpoll
919 * @tx_cleaned: Out parameter set to the number of TXes cleaned
920 *
921 * Returns true if there's any budget left (e.g. the clean is finished)
922 **/
923static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
924 struct i40e_ring *tx_ring, int napi_budget,
925 unsigned int *tx_cleaned)
926{
927 int i = tx_ring->next_to_clean;
928 struct i40e_tx_buffer *tx_buf;
929 struct i40e_tx_desc *tx_head;
930 struct i40e_tx_desc *tx_desc;
931 unsigned int total_bytes = 0, total_packets = 0;
932 unsigned int budget = vsi->work_limit;
933
934 tx_buf = &tx_ring->tx_bi[i];
935 tx_desc = I40E_TX_DESC(tx_ring, i);
936 i -= tx_ring->count;
937
938 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
939
940 do {
941 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
942
943 /* if next_to_watch is not set then there is no work pending */
944 if (!eop_desc)
945 break;
946
947 /* prevent any other reads prior to eop_desc */
948 smp_rmb();
949
950 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
951 /* we have caught up to head, no work left to do */
952 if (tx_head == tx_desc)
953 break;
954
955 /* clear next_to_watch to prevent false hangs */
956 tx_buf->next_to_watch = NULL;
957
958 /* update the statistics for this packet */
959 total_bytes += tx_buf->bytecount;
960 total_packets += tx_buf->gso_segs;
961
962 /* free the skb/XDP data */
963 if (ring_is_xdp(tx_ring))
964 xdp_return_frame(tx_buf->xdpf);
965 else
966 napi_consume_skb(tx_buf->skb, napi_budget);
967
968 /* unmap skb header data */
969 dma_unmap_single(tx_ring->dev,
970 dma_unmap_addr(tx_buf, dma),
971 dma_unmap_len(tx_buf, len),
972 DMA_TO_DEVICE);
973
974 /* clear tx_buffer data */
975 tx_buf->skb = NULL;
976 dma_unmap_len_set(tx_buf, len, 0);
977
978 /* unmap remaining buffers */
979 while (tx_desc != eop_desc) {
980 i40e_trace(clean_tx_irq_unmap,
981 tx_ring, tx_desc, tx_buf);
982
983 tx_buf++;
984 tx_desc++;
985 i++;
986 if (unlikely(!i)) {
987 i -= tx_ring->count;
988 tx_buf = tx_ring->tx_bi;
989 tx_desc = I40E_TX_DESC(tx_ring, 0);
990 }
991
992 /* unmap any remaining paged data */
993 if (dma_unmap_len(tx_buf, len)) {
994 dma_unmap_page(tx_ring->dev,
995 dma_unmap_addr(tx_buf, dma),
996 dma_unmap_len(tx_buf, len),
997 DMA_TO_DEVICE);
998 dma_unmap_len_set(tx_buf, len, 0);
999 }
1000 }
1001
1002 /* move us one more past the eop_desc for start of next pkt */
1003 tx_buf++;
1004 tx_desc++;
1005 i++;
1006 if (unlikely(!i)) {
1007 i -= tx_ring->count;
1008 tx_buf = tx_ring->tx_bi;
1009 tx_desc = I40E_TX_DESC(tx_ring, 0);
1010 }
1011
1012 prefetch(tx_desc);
1013
1014 /* update budget accounting */
1015 budget--;
1016 } while (likely(budget));
1017
1018 i += tx_ring->count;
1019 tx_ring->next_to_clean = i;
1020 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
1021 i40e_arm_wb(tx_ring, vsi, budget);
1022
1023 if (ring_is_xdp(tx_ring))
1024 return !!budget;
1025
1026 /* notify netdev of completed buffers */
1027 netdev_tx_completed_queue(txring_txq(tx_ring),
1028 total_packets, total_bytes);
1029
1030#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
1031 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1032 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
1033 /* Make sure that anybody stopping the queue after this
1034 * sees the new next_to_clean.
1035 */
1036 smp_mb();
1037 if (__netif_subqueue_stopped(tx_ring->netdev,
1038 tx_ring->queue_index) &&
1039 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
1040 netif_wake_subqueue(tx_ring->netdev,
1041 tx_ring->queue_index);
1042 ++tx_ring->tx_stats.restart_queue;
1043 }
1044 }
1045
1046 *tx_cleaned = total_packets;
1047 return !!budget;
1048}
1049
1050/**
1051 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1052 * @vsi: the VSI we care about
1053 * @q_vector: the vector on which to enable writeback
1054 *
1055 **/
1056static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
1057 struct i40e_q_vector *q_vector)
1058{
1059 u16 flags = q_vector->tx.ring[0].flags;
1060 u32 val;
1061
1062 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
1063 return;
1064
1065 if (q_vector->arm_wb_state)
1066 return;
1067
1068 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
1069 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
1070 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
1071
1072 wr32(&vsi->back->hw,
1073 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
1074 val);
1075 } else {
1076 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
1077 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
1078
1079 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1080 }
1081 q_vector->arm_wb_state = true;
1082}
1083
1084/**
1085 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1086 * @vsi: the VSI we care about
1087 * @q_vector: the vector on which to force writeback
1088 *
1089 **/
1090void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
1091{
1092 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
1093 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1094 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
1095 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1096 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
1097 /* allow 00 to be written to the index */
1098
1099 wr32(&vsi->back->hw,
1100 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
1101 } else {
1102 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1103 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1104 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1105 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1106 /* allow 00 to be written to the index */
1107
1108 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1109 }
1110}
1111
1112static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1113 struct i40e_ring_container *rc)
1114{
1115 return &q_vector->rx == rc;
1116}
1117
1118static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1119{
1120 unsigned int divisor;
1121
1122 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1123 case I40E_LINK_SPEED_40GB:
1124 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1125 break;
1126 case I40E_LINK_SPEED_25GB:
1127 case I40E_LINK_SPEED_20GB:
1128 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1129 break;
1130 default:
1131 case I40E_LINK_SPEED_10GB:
1132 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1133 break;
1134 case I40E_LINK_SPEED_1GB:
1135 case I40E_LINK_SPEED_100MB:
1136 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1137 break;
1138 }
1139
1140 return divisor;
1141}
1142
1143/**
1144 * i40e_update_itr - update the dynamic ITR value based on statistics
1145 * @q_vector: structure containing interrupt and ring information
1146 * @rc: structure containing ring performance data
1147 *
1148 * Stores a new ITR value based on packets and byte
1149 * counts during the last interrupt. The advantage of per interrupt
1150 * computation is faster updates and more accurate ITR for the current
1151 * traffic pattern. Constants in this function were computed
1152 * based on theoretical maximum wire speed and thresholds were set based
1153 * on testing data as well as attempting to minimize response time
1154 * while increasing bulk throughput.
1155 **/
1156static void i40e_update_itr(struct i40e_q_vector *q_vector,
1157 struct i40e_ring_container *rc)
1158{
1159 unsigned int avg_wire_size, packets, bytes, itr;
1160 unsigned long next_update = jiffies;
1161
1162 /* If we don't have any rings just leave ourselves set for maximum
1163 * possible latency so we take ourselves out of the equation.
1164 */
1165 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1166 return;
1167
1168 /* For Rx we want to push the delay up and default to low latency.
1169 * for Tx we want to pull the delay down and default to high latency.
1170 */
1171 itr = i40e_container_is_rx(q_vector, rc) ?
1172 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1173 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1174
1175 /* If we didn't update within up to 1 - 2 jiffies we can assume
1176 * that either packets are coming in so slow there hasn't been
1177 * any work, or that there is so much work that NAPI is dealing
1178 * with interrupt moderation and we don't need to do anything.
1179 */
1180 if (time_after(next_update, rc->next_update))
1181 goto clear_counts;
1182
1183 /* If itr_countdown is set it means we programmed an ITR within
1184 * the last 4 interrupt cycles. This has a side effect of us
1185 * potentially firing an early interrupt. In order to work around
1186 * this we need to throw out any data received for a few
1187 * interrupts following the update.
1188 */
1189 if (q_vector->itr_countdown) {
1190 itr = rc->target_itr;
1191 goto clear_counts;
1192 }
1193
1194 packets = rc->total_packets;
1195 bytes = rc->total_bytes;
1196
1197 if (i40e_container_is_rx(q_vector, rc)) {
1198 /* If Rx there are 1 to 4 packets and bytes are less than
1199 * 9000 assume insufficient data to use bulk rate limiting
1200 * approach unless Tx is already in bulk rate limiting. We
1201 * are likely latency driven.
1202 */
1203 if (packets && packets < 4 && bytes < 9000 &&
1204 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1205 itr = I40E_ITR_ADAPTIVE_LATENCY;
1206 goto adjust_by_size;
1207 }
1208 } else if (packets < 4) {
1209 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1210 * bulk mode and we are receiving 4 or fewer packets just
1211 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1212 * that the Rx can relax.
1213 */
1214 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1215 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1216 I40E_ITR_ADAPTIVE_MAX_USECS)
1217 goto clear_counts;
1218 } else if (packets > 32) {
1219 /* If we have processed over 32 packets in a single interrupt
1220 * for Tx assume we need to switch over to "bulk" mode.
1221 */
1222 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1223 }
1224
1225 /* We have no packets to actually measure against. This means
1226 * either one of the other queues on this vector is active or
1227 * we are a Tx queue doing TSO with too high of an interrupt rate.
1228 *
1229 * Between 4 and 56 we can assume that our current interrupt delay
1230 * is only slightly too low. As such we should increase it by a small
1231 * fixed amount.
1232 */
1233 if (packets < 56) {
1234 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1235 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1236 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1237 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1238 }
1239 goto clear_counts;
1240 }
1241
1242 if (packets <= 256) {
1243 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1244 itr &= I40E_ITR_MASK;
1245
1246 /* Between 56 and 112 is our "goldilocks" zone where we are
1247 * working out "just right". Just report that our current
1248 * ITR is good for us.
1249 */
1250 if (packets <= 112)
1251 goto clear_counts;
1252
1253 /* If packet count is 128 or greater we are likely looking
1254 * at a slight overrun of the delay we want. Try halving
1255 * our delay to see if that will cut the number of packets
1256 * in half per interrupt.
1257 */
1258 itr /= 2;
1259 itr &= I40E_ITR_MASK;
1260 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1261 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1262
1263 goto clear_counts;
1264 }
1265
1266 /* The paths below assume we are dealing with a bulk ITR since
1267 * number of packets is greater than 256. We are just going to have
1268 * to compute a value and try to bring the count under control,
1269 * though for smaller packet sizes there isn't much we can do as
1270 * NAPI polling will likely be kicking in sooner rather than later.
1271 */
1272 itr = I40E_ITR_ADAPTIVE_BULK;
1273
1274adjust_by_size:
1275 /* If packet counts are 256 or greater we can assume we have a gross
1276 * overestimation of what the rate should be. Instead of trying to fine
1277 * tune it just use the formula below to try and dial in an exact value
1278 * give the current packet size of the frame.
1279 */
1280 avg_wire_size = bytes / packets;
1281
1282 /* The following is a crude approximation of:
1283 * wmem_default / (size + overhead) = desired_pkts_per_int
1284 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1285 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1286 *
1287 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1288 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1289 * formula down to
1290 *
1291 * (170 * (size + 24)) / (size + 640) = ITR
1292 *
1293 * We first do some math on the packet size and then finally bitshift
1294 * by 8 after rounding up. We also have to account for PCIe link speed
1295 * difference as ITR scales based on this.
1296 */
1297 if (avg_wire_size <= 60) {
1298 /* Start at 250k ints/sec */
1299 avg_wire_size = 4096;
1300 } else if (avg_wire_size <= 380) {
1301 /* 250K ints/sec to 60K ints/sec */
1302 avg_wire_size *= 40;
1303 avg_wire_size += 1696;
1304 } else if (avg_wire_size <= 1084) {
1305 /* 60K ints/sec to 36K ints/sec */
1306 avg_wire_size *= 15;
1307 avg_wire_size += 11452;
1308 } else if (avg_wire_size <= 1980) {
1309 /* 36K ints/sec to 30K ints/sec */
1310 avg_wire_size *= 5;
1311 avg_wire_size += 22420;
1312 } else {
1313 /* plateau at a limit of 30K ints/sec */
1314 avg_wire_size = 32256;
1315 }
1316
1317 /* If we are in low latency mode halve our delay which doubles the
1318 * rate to somewhere between 100K to 16K ints/sec
1319 */
1320 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1321 avg_wire_size /= 2;
1322
1323 /* Resultant value is 256 times larger than it needs to be. This
1324 * gives us room to adjust the value as needed to either increase
1325 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1326 *
1327 * Use addition as we have already recorded the new latency flag
1328 * for the ITR value.
1329 */
1330 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1331 I40E_ITR_ADAPTIVE_MIN_INC;
1332
1333 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1334 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1335 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1336 }
1337
1338clear_counts:
1339 /* write back value */
1340 rc->target_itr = itr;
1341
1342 /* next update should occur within next jiffy */
1343 rc->next_update = next_update + 1;
1344
1345 rc->total_bytes = 0;
1346 rc->total_packets = 0;
1347}
1348
1349static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1350{
1351 return &rx_ring->rx_bi[idx];
1352}
1353
1354/**
1355 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1356 * @rx_ring: rx descriptor ring to store buffers on
1357 * @old_buff: donor buffer to have page reused
1358 *
1359 * Synchronizes page for reuse by the adapter
1360 **/
1361static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1362 struct i40e_rx_buffer *old_buff)
1363{
1364 struct i40e_rx_buffer *new_buff;
1365 u16 nta = rx_ring->next_to_alloc;
1366
1367 new_buff = i40e_rx_bi(rx_ring, nta);
1368
1369 /* update, and store next to alloc */
1370 nta++;
1371 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1372
1373 /* transfer page from old buffer to new buffer */
1374 new_buff->dma = old_buff->dma;
1375 new_buff->page = old_buff->page;
1376 new_buff->page_offset = old_buff->page_offset;
1377 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1378
1379 /* clear contents of buffer_info */
1380 old_buff->page = NULL;
1381}
1382
1383/**
1384 * i40e_clean_programming_status - clean the programming status descriptor
1385 * @rx_ring: the rx ring that has this descriptor
1386 * @qword0_raw: qword0
1387 * @qword1: qword1 representing status_error_len in CPU ordering
1388 *
1389 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1390 * status being successful or not and take actions accordingly. FCoE should
1391 * handle its context/filter programming/invalidation status and take actions.
1392 *
1393 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1394 **/
1395void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1396 u64 qword1)
1397{
1398 u8 id;
1399
1400 id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1);
1401
1402 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1403 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1404}
1405
1406/**
1407 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1408 * @tx_ring: the tx ring to set up
1409 *
1410 * Return 0 on success, negative on error
1411 **/
1412int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1413{
1414 struct device *dev = tx_ring->dev;
1415 int bi_size;
1416
1417 if (!dev)
1418 return -ENOMEM;
1419
1420 /* warn if we are about to overwrite the pointer */
1421 WARN_ON(tx_ring->tx_bi);
1422 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1423 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1424 if (!tx_ring->tx_bi)
1425 goto err;
1426
1427 u64_stats_init(&tx_ring->syncp);
1428
1429 /* round up to nearest 4K */
1430 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1431 /* add u32 for head writeback, align after this takes care of
1432 * guaranteeing this is at least one cache line in size
1433 */
1434 tx_ring->size += sizeof(u32);
1435 tx_ring->size = ALIGN(tx_ring->size, 4096);
1436 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1437 &tx_ring->dma, GFP_KERNEL);
1438 if (!tx_ring->desc) {
1439 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1440 tx_ring->size);
1441 goto err;
1442 }
1443
1444 tx_ring->next_to_use = 0;
1445 tx_ring->next_to_clean = 0;
1446 tx_ring->tx_stats.prev_pkt_ctr = -1;
1447 return 0;
1448
1449err:
1450 kfree(tx_ring->tx_bi);
1451 tx_ring->tx_bi = NULL;
1452 return -ENOMEM;
1453}
1454
1455static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1456{
1457 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1458}
1459
1460/**
1461 * i40e_clean_rx_ring - Free Rx buffers
1462 * @rx_ring: ring to be cleaned
1463 **/
1464void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1465{
1466 u16 i;
1467
1468 /* ring already cleared, nothing to do */
1469 if (!rx_ring->rx_bi)
1470 return;
1471
1472 if (rx_ring->xsk_pool) {
1473 i40e_xsk_clean_rx_ring(rx_ring);
1474 goto skip_free;
1475 }
1476
1477 /* Free all the Rx ring sk_buffs */
1478 for (i = 0; i < rx_ring->count; i++) {
1479 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1480
1481 if (!rx_bi->page)
1482 continue;
1483
1484 /* Invalidate cache lines that may have been written to by
1485 * device so that we avoid corrupting memory.
1486 */
1487 dma_sync_single_range_for_cpu(rx_ring->dev,
1488 rx_bi->dma,
1489 rx_bi->page_offset,
1490 rx_ring->rx_buf_len,
1491 DMA_FROM_DEVICE);
1492
1493 /* free resources associated with mapping */
1494 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1495 i40e_rx_pg_size(rx_ring),
1496 DMA_FROM_DEVICE,
1497 I40E_RX_DMA_ATTR);
1498
1499 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1500
1501 rx_bi->page = NULL;
1502 rx_bi->page_offset = 0;
1503 }
1504
1505skip_free:
1506 if (rx_ring->xsk_pool)
1507 i40e_clear_rx_bi_zc(rx_ring);
1508 else
1509 i40e_clear_rx_bi(rx_ring);
1510
1511 /* Zero out the descriptor ring */
1512 memset(rx_ring->desc, 0, rx_ring->size);
1513
1514 rx_ring->next_to_alloc = 0;
1515 rx_ring->next_to_clean = 0;
1516 rx_ring->next_to_process = 0;
1517 rx_ring->next_to_use = 0;
1518}
1519
1520/**
1521 * i40e_free_rx_resources - Free Rx resources
1522 * @rx_ring: ring to clean the resources from
1523 *
1524 * Free all receive software resources
1525 **/
1526void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1527{
1528 i40e_clean_rx_ring(rx_ring);
1529 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1530 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1531 rx_ring->xdp_prog = NULL;
1532 kfree(rx_ring->rx_bi);
1533 rx_ring->rx_bi = NULL;
1534
1535 if (rx_ring->desc) {
1536 dma_free_coherent(rx_ring->dev, rx_ring->size,
1537 rx_ring->desc, rx_ring->dma);
1538 rx_ring->desc = NULL;
1539 }
1540}
1541
1542/**
1543 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1544 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1545 *
1546 * Returns 0 on success, negative on failure
1547 **/
1548int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1549{
1550 struct device *dev = rx_ring->dev;
1551
1552 u64_stats_init(&rx_ring->syncp);
1553
1554 /* Round up to nearest 4K */
1555 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
1556 rx_ring->size = ALIGN(rx_ring->size, 4096);
1557 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1558 &rx_ring->dma, GFP_KERNEL);
1559
1560 if (!rx_ring->desc) {
1561 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1562 rx_ring->size);
1563 return -ENOMEM;
1564 }
1565
1566 rx_ring->next_to_alloc = 0;
1567 rx_ring->next_to_clean = 0;
1568 rx_ring->next_to_process = 0;
1569 rx_ring->next_to_use = 0;
1570
1571 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1572
1573 rx_ring->rx_bi =
1574 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
1575 if (!rx_ring->rx_bi)
1576 return -ENOMEM;
1577
1578 return 0;
1579}
1580
1581/**
1582 * i40e_release_rx_desc - Store the new tail and head values
1583 * @rx_ring: ring to bump
1584 * @val: new head index
1585 **/
1586void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1587{
1588 rx_ring->next_to_use = val;
1589
1590 /* update next to alloc since we have filled the ring */
1591 rx_ring->next_to_alloc = val;
1592
1593 /* Force memory writes to complete before letting h/w
1594 * know there are new descriptors to fetch. (Only
1595 * applicable for weak-ordered memory model archs,
1596 * such as IA-64).
1597 */
1598 wmb();
1599 writel(val, rx_ring->tail);
1600}
1601
1602#if (PAGE_SIZE >= 8192)
1603static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1604 unsigned int size)
1605{
1606 unsigned int truesize;
1607
1608 truesize = rx_ring->rx_offset ?
1609 SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
1610 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1611 SKB_DATA_ALIGN(size);
1612 return truesize;
1613}
1614#endif
1615
1616/**
1617 * i40e_alloc_mapped_page - recycle or make a new page
1618 * @rx_ring: ring to use
1619 * @bi: rx_buffer struct to modify
1620 *
1621 * Returns true if the page was successfully allocated or
1622 * reused.
1623 **/
1624static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1625 struct i40e_rx_buffer *bi)
1626{
1627 struct page *page = bi->page;
1628 dma_addr_t dma;
1629
1630 /* since we are recycling buffers we should seldom need to alloc */
1631 if (likely(page)) {
1632 rx_ring->rx_stats.page_reuse_count++;
1633 return true;
1634 }
1635
1636 /* alloc new page for storage */
1637 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1638 if (unlikely(!page)) {
1639 rx_ring->rx_stats.alloc_page_failed++;
1640 return false;
1641 }
1642
1643 rx_ring->rx_stats.page_alloc_count++;
1644
1645 /* map page for use */
1646 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1647 i40e_rx_pg_size(rx_ring),
1648 DMA_FROM_DEVICE,
1649 I40E_RX_DMA_ATTR);
1650
1651 /* if mapping failed free memory back to system since
1652 * there isn't much point in holding memory we can't use
1653 */
1654 if (dma_mapping_error(rx_ring->dev, dma)) {
1655 __free_pages(page, i40e_rx_pg_order(rx_ring));
1656 rx_ring->rx_stats.alloc_page_failed++;
1657 return false;
1658 }
1659
1660 bi->dma = dma;
1661 bi->page = page;
1662 bi->page_offset = rx_ring->rx_offset;
1663 page_ref_add(page, USHRT_MAX - 1);
1664 bi->pagecnt_bias = USHRT_MAX;
1665
1666 return true;
1667}
1668
1669/**
1670 * i40e_alloc_rx_buffers - Replace used receive buffers
1671 * @rx_ring: ring to place buffers on
1672 * @cleaned_count: number of buffers to replace
1673 *
1674 * Returns false if all allocations were successful, true if any fail
1675 **/
1676bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1677{
1678 u16 ntu = rx_ring->next_to_use;
1679 union i40e_rx_desc *rx_desc;
1680 struct i40e_rx_buffer *bi;
1681
1682 /* do nothing if no valid netdev defined */
1683 if (!rx_ring->netdev || !cleaned_count)
1684 return false;
1685
1686 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1687 bi = i40e_rx_bi(rx_ring, ntu);
1688
1689 do {
1690 if (!i40e_alloc_mapped_page(rx_ring, bi))
1691 goto no_buffers;
1692
1693 /* sync the buffer for use by the device */
1694 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1695 bi->page_offset,
1696 rx_ring->rx_buf_len,
1697 DMA_FROM_DEVICE);
1698
1699 /* Refresh the desc even if buffer_addrs didn't change
1700 * because each write-back erases this info.
1701 */
1702 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1703
1704 rx_desc++;
1705 bi++;
1706 ntu++;
1707 if (unlikely(ntu == rx_ring->count)) {
1708 rx_desc = I40E_RX_DESC(rx_ring, 0);
1709 bi = i40e_rx_bi(rx_ring, 0);
1710 ntu = 0;
1711 }
1712
1713 /* clear the status bits for the next_to_use descriptor */
1714 rx_desc->wb.qword1.status_error_len = 0;
1715
1716 cleaned_count--;
1717 } while (cleaned_count);
1718
1719 if (rx_ring->next_to_use != ntu)
1720 i40e_release_rx_desc(rx_ring, ntu);
1721
1722 return false;
1723
1724no_buffers:
1725 if (rx_ring->next_to_use != ntu)
1726 i40e_release_rx_desc(rx_ring, ntu);
1727
1728 /* make sure to come back via polling to try again after
1729 * allocation failure
1730 */
1731 return true;
1732}
1733
1734/**
1735 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1736 * @vsi: the VSI we care about
1737 * @skb: skb currently being received and modified
1738 * @rx_desc: the receive descriptor
1739 **/
1740static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1741 struct sk_buff *skb,
1742 union i40e_rx_desc *rx_desc)
1743{
1744 struct i40e_rx_ptype_decoded decoded;
1745 u32 rx_error, rx_status;
1746 bool ipv4, ipv6;
1747 u8 ptype;
1748 u64 qword;
1749
1750 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1751 ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
1752 rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
1753 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
1754 decoded = decode_rx_desc_ptype(ptype);
1755
1756 skb->ip_summed = CHECKSUM_NONE;
1757
1758 skb_checksum_none_assert(skb);
1759
1760 /* Rx csum enabled and ip headers found? */
1761 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1762 return;
1763
1764 /* did the hardware decode the packet and checksum? */
1765 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1766 return;
1767
1768 /* both known and outer_ip must be set for the below code to work */
1769 if (!(decoded.known && decoded.outer_ip))
1770 return;
1771
1772 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1773 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1774 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1775 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1776
1777 if (ipv4 &&
1778 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1779 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1780 goto checksum_fail;
1781
1782 /* likely incorrect csum if alternate IP extension headers found */
1783 if (ipv6 &&
1784 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1785 /* don't increment checksum err here, non-fatal err */
1786 return;
1787
1788 /* there was some L4 error, count error and punt packet to the stack */
1789 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1790 goto checksum_fail;
1791
1792 /* handle packets that were not able to be checksummed due
1793 * to arrival speed, in this case the stack can compute
1794 * the csum.
1795 */
1796 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1797 return;
1798
1799 /* If there is an outer header present that might contain a checksum
1800 * we need to bump the checksum level by 1 to reflect the fact that
1801 * we are indicating we validated the inner checksum.
1802 */
1803 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1804 skb->csum_level = 1;
1805
1806 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1807 switch (decoded.inner_prot) {
1808 case I40E_RX_PTYPE_INNER_PROT_TCP:
1809 case I40E_RX_PTYPE_INNER_PROT_UDP:
1810 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1811 skb->ip_summed = CHECKSUM_UNNECESSARY;
1812 fallthrough;
1813 default:
1814 break;
1815 }
1816
1817 return;
1818
1819checksum_fail:
1820 vsi->back->hw_csum_rx_error++;
1821}
1822
1823/**
1824 * i40e_ptype_to_htype - get a hash type
1825 * @ptype: the ptype value from the descriptor
1826 *
1827 * Returns a hash type to be used by skb_set_hash
1828 **/
1829static inline int i40e_ptype_to_htype(u8 ptype)
1830{
1831 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1832
1833 if (!decoded.known)
1834 return PKT_HASH_TYPE_NONE;
1835
1836 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1837 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1838 return PKT_HASH_TYPE_L4;
1839 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1840 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1841 return PKT_HASH_TYPE_L3;
1842 else
1843 return PKT_HASH_TYPE_L2;
1844}
1845
1846/**
1847 * i40e_rx_hash - set the hash value in the skb
1848 * @ring: descriptor ring
1849 * @rx_desc: specific descriptor
1850 * @skb: skb currently being received and modified
1851 * @rx_ptype: Rx packet type
1852 **/
1853static inline void i40e_rx_hash(struct i40e_ring *ring,
1854 union i40e_rx_desc *rx_desc,
1855 struct sk_buff *skb,
1856 u8 rx_ptype)
1857{
1858 u32 hash;
1859 const __le64 rss_mask =
1860 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1861 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1862
1863 if (!(ring->netdev->features & NETIF_F_RXHASH))
1864 return;
1865
1866 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1867 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1868 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1869 }
1870}
1871
1872/**
1873 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1874 * @rx_ring: rx descriptor ring packet is being transacted on
1875 * @rx_desc: pointer to the EOP Rx descriptor
1876 * @skb: pointer to current skb being populated
1877 *
1878 * This function checks the ring, descriptor, and packet information in
1879 * order to populate the hash, checksum, VLAN, protocol, and
1880 * other fields within the skb.
1881 **/
1882void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1883 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1884{
1885 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1886 u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
1887 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1888 u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status);
1889 u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
1890
1891 if (unlikely(tsynvalid))
1892 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1893
1894 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1895
1896 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1897
1898 skb_record_rx_queue(skb, rx_ring->queue_index);
1899
1900 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1901 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1902
1903 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1904 le16_to_cpu(vlan_tag));
1905 }
1906
1907 /* modifies the skb - consumes the enet header */
1908 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1909}
1910
1911/**
1912 * i40e_cleanup_headers - Correct empty headers
1913 * @rx_ring: rx descriptor ring packet is being transacted on
1914 * @skb: pointer to current skb being fixed
1915 * @rx_desc: pointer to the EOP Rx descriptor
1916 *
1917 * In addition if skb is not at least 60 bytes we need to pad it so that
1918 * it is large enough to qualify as a valid Ethernet frame.
1919 *
1920 * Returns true if an error was encountered and skb was freed.
1921 **/
1922static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1923 union i40e_rx_desc *rx_desc)
1924
1925{
1926 /* ERR_MASK will only have valid bits if EOP set, and
1927 * what we are doing here is actually checking
1928 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1929 * the error field
1930 */
1931 if (unlikely(i40e_test_staterr(rx_desc,
1932 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1933 dev_kfree_skb_any(skb);
1934 return true;
1935 }
1936
1937 /* if eth_skb_pad returns an error the skb was freed */
1938 if (eth_skb_pad(skb))
1939 return true;
1940
1941 return false;
1942}
1943
1944/**
1945 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1946 * @rx_buffer: buffer containing the page
1947 * @rx_stats: rx stats structure for the rx ring
1948 *
1949 * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
1950 * which will assign the current buffer to the buffer that next_to_alloc is
1951 * pointing to; otherwise, the DMA mapping needs to be destroyed and
1952 * page freed.
1953 *
1954 * rx_stats will be updated to indicate whether the page was waived
1955 * or busy if it could not be reused.
1956 */
1957static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1958 struct i40e_rx_queue_stats *rx_stats)
1959{
1960 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1961 struct page *page = rx_buffer->page;
1962
1963 /* Is any reuse possible? */
1964 if (!dev_page_is_reusable(page)) {
1965 rx_stats->page_waive_count++;
1966 return false;
1967 }
1968
1969#if (PAGE_SIZE < 8192)
1970 /* if we are only owner of page we can reuse it */
1971 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) {
1972 rx_stats->page_busy_count++;
1973 return false;
1974 }
1975#else
1976#define I40E_LAST_OFFSET \
1977 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1978 if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
1979 rx_stats->page_busy_count++;
1980 return false;
1981 }
1982#endif
1983
1984 /* If we have drained the page fragment pool we need to update
1985 * the pagecnt_bias and page count so that we fully restock the
1986 * number of references the driver holds.
1987 */
1988 if (unlikely(pagecnt_bias == 1)) {
1989 page_ref_add(page, USHRT_MAX - 1);
1990 rx_buffer->pagecnt_bias = USHRT_MAX;
1991 }
1992
1993 return true;
1994}
1995
1996/**
1997 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
1998 * @rx_buffer: Rx buffer to adjust
1999 * @truesize: Size of adjustment
2000 **/
2001static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer,
2002 unsigned int truesize)
2003{
2004#if (PAGE_SIZE < 8192)
2005 rx_buffer->page_offset ^= truesize;
2006#else
2007 rx_buffer->page_offset += truesize;
2008#endif
2009}
2010
2011/**
2012 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2013 * @rx_ring: rx descriptor ring to transact packets on
2014 * @size: size of buffer to add to skb
2015 *
2016 * This function will pull an Rx buffer from the ring and synchronize it
2017 * for use by the CPU.
2018 */
2019static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2020 const unsigned int size)
2021{
2022 struct i40e_rx_buffer *rx_buffer;
2023
2024 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process);
2025 rx_buffer->page_count =
2026#if (PAGE_SIZE < 8192)
2027 page_count(rx_buffer->page);
2028#else
2029 0;
2030#endif
2031 prefetch_page_address(rx_buffer->page);
2032
2033 /* we are reusing so sync this buffer for CPU use */
2034 dma_sync_single_range_for_cpu(rx_ring->dev,
2035 rx_buffer->dma,
2036 rx_buffer->page_offset,
2037 size,
2038 DMA_FROM_DEVICE);
2039
2040 /* We have pulled a buffer for use, so decrement pagecnt_bias */
2041 rx_buffer->pagecnt_bias--;
2042
2043 return rx_buffer;
2044}
2045
2046/**
2047 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2048 * @rx_ring: rx descriptor ring to transact packets on
2049 * @rx_buffer: rx buffer to pull data from
2050 *
2051 * This function will clean up the contents of the rx_buffer. It will
2052 * either recycle the buffer or unmap it and free the associated resources.
2053 */
2054static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2055 struct i40e_rx_buffer *rx_buffer)
2056{
2057 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) {
2058 /* hand second half of page back to the ring */
2059 i40e_reuse_rx_page(rx_ring, rx_buffer);
2060 } else {
2061 /* we are not reusing the buffer so unmap it */
2062 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2063 i40e_rx_pg_size(rx_ring),
2064 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2065 __page_frag_cache_drain(rx_buffer->page,
2066 rx_buffer->pagecnt_bias);
2067 /* clear contents of buffer_info */
2068 rx_buffer->page = NULL;
2069 }
2070}
2071
2072/**
2073 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
2074 * @rx_ring: Rx descriptor ring to transact packets on
2075 * @xdp_res: Result of the XDP program
2076 * @xdp: xdp_buff pointing to the data
2077 **/
2078static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
2079 struct xdp_buff *xdp)
2080{
2081 u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
2082 u32 next = rx_ring->next_to_clean, i = 0;
2083 struct i40e_rx_buffer *rx_buffer;
2084
2085 xdp->flags = 0;
2086
2087 while (1) {
2088 rx_buffer = i40e_rx_bi(rx_ring, next);
2089 if (++next == rx_ring->count)
2090 next = 0;
2091
2092 if (!rx_buffer->page)
2093 continue;
2094
2095 if (xdp_res != I40E_XDP_CONSUMED)
2096 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2097 else if (i++ <= nr_frags)
2098 rx_buffer->pagecnt_bias++;
2099
2100 /* EOP buffer will be put in i40e_clean_rx_irq() */
2101 if (next == rx_ring->next_to_process)
2102 return;
2103
2104 i40e_put_rx_buffer(rx_ring, rx_buffer);
2105 }
2106}
2107
2108/**
2109 * i40e_construct_skb - Allocate skb and populate it
2110 * @rx_ring: rx descriptor ring to transact packets on
2111 * @xdp: xdp_buff pointing to the data
2112 *
2113 * This function allocates an skb. It then populates it with the page
2114 * data from the current receive descriptor, taking care to set up the
2115 * skb correctly.
2116 */
2117static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2118 struct xdp_buff *xdp)
2119{
2120 unsigned int size = xdp->data_end - xdp->data;
2121 struct i40e_rx_buffer *rx_buffer;
2122 struct skb_shared_info *sinfo;
2123 unsigned int headlen;
2124 struct sk_buff *skb;
2125 u32 nr_frags = 0;
2126
2127 /* prefetch first cache line of first page */
2128 net_prefetch(xdp->data);
2129
2130 /* Note, we get here by enabling legacy-rx via:
2131 *
2132 * ethtool --set-priv-flags <dev> legacy-rx on
2133 *
2134 * In this mode, we currently get 0 extra XDP headroom as
2135 * opposed to having legacy-rx off, where we process XDP
2136 * packets going to stack via i40e_build_skb(). The latter
2137 * provides us currently with 192 bytes of headroom.
2138 *
2139 * For i40e_construct_skb() mode it means that the
2140 * xdp->data_meta will always point to xdp->data, since
2141 * the helper cannot expand the head. Should this ever
2142 * change in future for legacy-rx mode on, then lets also
2143 * add xdp->data_meta handling here.
2144 */
2145
2146 /* allocate a skb to store the frags */
2147 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2148 I40E_RX_HDR_SIZE,
2149 GFP_ATOMIC | __GFP_NOWARN);
2150 if (unlikely(!skb))
2151 return NULL;
2152
2153 /* Determine available headroom for copy */
2154 headlen = size;
2155 if (headlen > I40E_RX_HDR_SIZE)
2156 headlen = eth_get_headlen(skb->dev, xdp->data,
2157 I40E_RX_HDR_SIZE);
2158
2159 /* align pull length to size of long to optimize memcpy performance */
2160 memcpy(__skb_put(skb, headlen), xdp->data,
2161 ALIGN(headlen, sizeof(long)));
2162
2163 if (unlikely(xdp_buff_has_frags(xdp))) {
2164 sinfo = xdp_get_shared_info_from_buff(xdp);
2165 nr_frags = sinfo->nr_frags;
2166 }
2167 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2168 /* update all of the pointers */
2169 size -= headlen;
2170 if (size) {
2171 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2172 dev_kfree_skb(skb);
2173 return NULL;
2174 }
2175 skb_add_rx_frag(skb, 0, rx_buffer->page,
2176 rx_buffer->page_offset + headlen,
2177 size, xdp->frame_sz);
2178 /* buffer is used by skb, update page_offset */
2179 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2180 } else {
2181 /* buffer is unused, reset bias back to rx_buffer */
2182 rx_buffer->pagecnt_bias++;
2183 }
2184
2185 if (unlikely(xdp_buff_has_frags(xdp))) {
2186 struct skb_shared_info *skinfo = skb_shinfo(skb);
2187
2188 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
2189 sizeof(skb_frag_t) * nr_frags);
2190
2191 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
2192 sinfo->xdp_frags_size,
2193 nr_frags * xdp->frame_sz,
2194 xdp_buff_is_frag_pfmemalloc(xdp));
2195
2196 /* First buffer has already been processed, so bump ntc */
2197 if (++rx_ring->next_to_clean == rx_ring->count)
2198 rx_ring->next_to_clean = 0;
2199
2200 i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
2201 }
2202
2203 return skb;
2204}
2205
2206/**
2207 * i40e_build_skb - Build skb around an existing buffer
2208 * @rx_ring: Rx descriptor ring to transact packets on
2209 * @xdp: xdp_buff pointing to the data
2210 *
2211 * This function builds an skb around an existing Rx buffer, taking care
2212 * to set up the skb correctly and avoid any memcpy overhead.
2213 */
2214static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2215 struct xdp_buff *xdp)
2216{
2217 unsigned int metasize = xdp->data - xdp->data_meta;
2218 struct skb_shared_info *sinfo;
2219 struct sk_buff *skb;
2220 u32 nr_frags;
2221
2222 /* Prefetch first cache line of first page. If xdp->data_meta
2223 * is unused, this points exactly as xdp->data, otherwise we
2224 * likely have a consumer accessing first few bytes of meta
2225 * data, and then actual data.
2226 */
2227 net_prefetch(xdp->data_meta);
2228
2229 if (unlikely(xdp_buff_has_frags(xdp))) {
2230 sinfo = xdp_get_shared_info_from_buff(xdp);
2231 nr_frags = sinfo->nr_frags;
2232 }
2233
2234 /* build an skb around the page buffer */
2235 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
2236 if (unlikely(!skb))
2237 return NULL;
2238
2239 /* update pointers within the skb to store the data */
2240 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2241 __skb_put(skb, xdp->data_end - xdp->data);
2242 if (metasize)
2243 skb_metadata_set(skb, metasize);
2244
2245 if (unlikely(xdp_buff_has_frags(xdp))) {
2246 xdp_update_skb_shared_info(skb, nr_frags,
2247 sinfo->xdp_frags_size,
2248 nr_frags * xdp->frame_sz,
2249 xdp_buff_is_frag_pfmemalloc(xdp));
2250
2251 i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
2252 } else {
2253 struct i40e_rx_buffer *rx_buffer;
2254
2255 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2256 /* buffer is used by skb, update page_offset */
2257 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2258 }
2259
2260 return skb;
2261}
2262
2263/**
2264 * i40e_is_non_eop - process handling of non-EOP buffers
2265 * @rx_ring: Rx ring being processed
2266 * @rx_desc: Rx descriptor for current buffer
2267 *
2268 * If the buffer is an EOP buffer, this function exits returning false,
2269 * otherwise return true indicating that this is in fact a non-EOP buffer.
2270 */
2271bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2272 union i40e_rx_desc *rx_desc)
2273{
2274 /* if we are the last buffer then there is nothing else to do */
2275#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2276 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2277 return false;
2278
2279 rx_ring->rx_stats.non_eop_descs++;
2280
2281 return true;
2282}
2283
2284static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2285 struct i40e_ring *xdp_ring);
2286
2287int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2288{
2289 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2290
2291 if (unlikely(!xdpf))
2292 return I40E_XDP_CONSUMED;
2293
2294 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2295}
2296
2297/**
2298 * i40e_run_xdp - run an XDP program
2299 * @rx_ring: Rx ring being processed
2300 * @xdp: XDP buffer containing the frame
2301 * @xdp_prog: XDP program to run
2302 **/
2303static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
2304{
2305 int err, result = I40E_XDP_PASS;
2306 struct i40e_ring *xdp_ring;
2307 u32 act;
2308
2309 if (!xdp_prog)
2310 goto xdp_out;
2311
2312 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2313
2314 act = bpf_prog_run_xdp(xdp_prog, xdp);
2315 switch (act) {
2316 case XDP_PASS:
2317 break;
2318 case XDP_TX:
2319 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2320 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2321 if (result == I40E_XDP_CONSUMED)
2322 goto out_failure;
2323 break;
2324 case XDP_REDIRECT:
2325 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2326 if (err)
2327 goto out_failure;
2328 result = I40E_XDP_REDIR;
2329 break;
2330 default:
2331 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
2332 fallthrough;
2333 case XDP_ABORTED:
2334out_failure:
2335 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2336 fallthrough; /* handle aborts by dropping packet */
2337 case XDP_DROP:
2338 result = I40E_XDP_CONSUMED;
2339 break;
2340 }
2341xdp_out:
2342 return result;
2343}
2344
2345/**
2346 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2347 * @xdp_ring: XDP Tx ring
2348 *
2349 * This function updates the XDP Tx ring tail register.
2350 **/
2351void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2352{
2353 /* Force memory writes to complete before letting h/w
2354 * know there are new descriptors to fetch.
2355 */
2356 wmb();
2357 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2358}
2359
2360/**
2361 * i40e_update_rx_stats - Update Rx ring statistics
2362 * @rx_ring: rx descriptor ring
2363 * @total_rx_bytes: number of bytes received
2364 * @total_rx_packets: number of packets received
2365 *
2366 * This function updates the Rx ring statistics.
2367 **/
2368void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2369 unsigned int total_rx_bytes,
2370 unsigned int total_rx_packets)
2371{
2372 u64_stats_update_begin(&rx_ring->syncp);
2373 rx_ring->stats.packets += total_rx_packets;
2374 rx_ring->stats.bytes += total_rx_bytes;
2375 u64_stats_update_end(&rx_ring->syncp);
2376 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2377 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2378}
2379
2380/**
2381 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2382 * @rx_ring: Rx ring
2383 * @xdp_res: Result of the receive batch
2384 *
2385 * This function bumps XDP Tx tail and/or flush redirect map, and
2386 * should be called when a batch of packets has been processed in the
2387 * napi loop.
2388 **/
2389void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2390{
2391 if (xdp_res & I40E_XDP_REDIR)
2392 xdp_do_flush();
2393
2394 if (xdp_res & I40E_XDP_TX) {
2395 struct i40e_ring *xdp_ring =
2396 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2397
2398 i40e_xdp_ring_update_tail(xdp_ring);
2399 }
2400}
2401
2402/**
2403 * i40e_inc_ntp: Advance the next_to_process index
2404 * @rx_ring: Rx ring
2405 **/
2406static void i40e_inc_ntp(struct i40e_ring *rx_ring)
2407{
2408 u32 ntp = rx_ring->next_to_process + 1;
2409
2410 ntp = (ntp < rx_ring->count) ? ntp : 0;
2411 rx_ring->next_to_process = ntp;
2412 prefetch(I40E_RX_DESC(rx_ring, ntp));
2413}
2414
2415/**
2416 * i40e_add_xdp_frag: Add a frag to xdp_buff
2417 * @xdp: xdp_buff pointing to the data
2418 * @nr_frags: return number of buffers for the packet
2419 * @rx_buffer: rx_buffer holding data of the current frag
2420 * @size: size of data of current frag
2421 */
2422static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags,
2423 struct i40e_rx_buffer *rx_buffer, u32 size)
2424{
2425 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2426
2427 if (!xdp_buff_has_frags(xdp)) {
2428 sinfo->nr_frags = 0;
2429 sinfo->xdp_frags_size = 0;
2430 xdp_buff_set_frags_flag(xdp);
2431 } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
2432 /* Overflowing packet: All frags need to be dropped */
2433 return -ENOMEM;
2434 }
2435
2436 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page,
2437 rx_buffer->page_offset, size);
2438
2439 sinfo->xdp_frags_size += size;
2440
2441 if (page_is_pfmemalloc(rx_buffer->page))
2442 xdp_buff_set_frag_pfmemalloc(xdp);
2443 *nr_frags = sinfo->nr_frags;
2444
2445 return 0;
2446}
2447
2448/**
2449 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
2450 * @rx_ring: rx descriptor ring to transact packets on
2451 * @xdp: xdp_buff pointing to the data
2452 * @rx_buffer: rx_buffer of eop desc
2453 */
2454static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring,
2455 struct xdp_buff *xdp,
2456 struct i40e_rx_buffer *rx_buffer)
2457{
2458 i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp);
2459 i40e_put_rx_buffer(rx_ring, rx_buffer);
2460 rx_ring->next_to_clean = rx_ring->next_to_process;
2461 xdp->data = NULL;
2462}
2463
2464/**
2465 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2466 * @rx_ring: rx descriptor ring to transact packets on
2467 * @budget: Total limit on number of packets to process
2468 * @rx_cleaned: Out parameter of the number of packets processed
2469 *
2470 * This function provides a "bounce buffer" approach to Rx interrupt
2471 * processing. The advantage to this is that on systems that have
2472 * expensive overhead for IOMMU access this provides a means of avoiding
2473 * it by maintaining the mapping of the page to the system.
2474 *
2475 * Returns amount of work completed
2476 **/
2477static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
2478 unsigned int *rx_cleaned)
2479{
2480 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2481 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2482 u16 clean_threshold = rx_ring->count / 2;
2483 unsigned int offset = rx_ring->rx_offset;
2484 struct xdp_buff *xdp = &rx_ring->xdp;
2485 unsigned int xdp_xmit = 0;
2486 struct bpf_prog *xdp_prog;
2487 bool failure = false;
2488 int xdp_res = 0;
2489
2490 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2491
2492 while (likely(total_rx_packets < (unsigned int)budget)) {
2493 u16 ntp = rx_ring->next_to_process;
2494 struct i40e_rx_buffer *rx_buffer;
2495 union i40e_rx_desc *rx_desc;
2496 struct sk_buff *skb;
2497 unsigned int size;
2498 u32 nfrags = 0;
2499 bool neop;
2500 u64 qword;
2501
2502 /* return some buffers to hardware, one at a time is too slow */
2503 if (cleaned_count >= clean_threshold) {
2504 failure = failure ||
2505 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2506 cleaned_count = 0;
2507 }
2508
2509 rx_desc = I40E_RX_DESC(rx_ring, ntp);
2510
2511 /* status_error_len will always be zero for unused descriptors
2512 * because it's cleared in cleanup, and overlaps with hdr_addr
2513 * which is always zero because packet split isn't used, if the
2514 * hardware wrote DD then the length will be non-zero
2515 */
2516 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2517
2518 /* This memory barrier is needed to keep us from reading
2519 * any other fields out of the rx_desc until we have
2520 * verified the descriptor has been written back.
2521 */
2522 dma_rmb();
2523
2524 if (i40e_rx_is_programming_status(qword)) {
2525 i40e_clean_programming_status(rx_ring,
2526 rx_desc->raw.qword[0],
2527 qword);
2528 rx_buffer = i40e_rx_bi(rx_ring, ntp);
2529 i40e_inc_ntp(rx_ring);
2530 i40e_reuse_rx_page(rx_ring, rx_buffer);
2531 /* Update ntc and bump cleaned count if not in the
2532 * middle of mb packet.
2533 */
2534 if (rx_ring->next_to_clean == ntp) {
2535 rx_ring->next_to_clean =
2536 rx_ring->next_to_process;
2537 cleaned_count++;
2538 }
2539 continue;
2540 }
2541
2542 size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
2543 if (!size)
2544 break;
2545
2546 i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp);
2547 /* retrieve a buffer from the ring */
2548 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2549
2550 neop = i40e_is_non_eop(rx_ring, rx_desc);
2551 i40e_inc_ntp(rx_ring);
2552
2553 if (!xdp->data) {
2554 unsigned char *hard_start;
2555
2556 hard_start = page_address(rx_buffer->page) +
2557 rx_buffer->page_offset - offset;
2558 xdp_prepare_buff(xdp, hard_start, offset, size, true);
2559#if (PAGE_SIZE > 4096)
2560 /* At larger PAGE_SIZE, frame_sz depend on len size */
2561 xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2562#endif
2563 } else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) &&
2564 !neop) {
2565 /* Overflowing packet: Drop all frags on EOP */
2566 i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
2567 break;
2568 }
2569
2570 if (neop)
2571 continue;
2572
2573 xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog);
2574
2575 if (xdp_res) {
2576 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
2577
2578 if (unlikely(xdp_buff_has_frags(xdp))) {
2579 i40e_process_rx_buffs(rx_ring, xdp_res, xdp);
2580 size = xdp_get_buff_len(xdp);
2581 } else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2582 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2583 } else {
2584 rx_buffer->pagecnt_bias++;
2585 }
2586 total_rx_bytes += size;
2587 } else {
2588 if (ring_uses_build_skb(rx_ring))
2589 skb = i40e_build_skb(rx_ring, xdp);
2590 else
2591 skb = i40e_construct_skb(rx_ring, xdp);
2592
2593 /* drop if we failed to retrieve a buffer */
2594 if (!skb) {
2595 rx_ring->rx_stats.alloc_buff_failed++;
2596 i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
2597 break;
2598 }
2599
2600 if (i40e_cleanup_headers(rx_ring, skb, rx_desc))
2601 goto process_next;
2602
2603 /* probably a little skewed due to removing CRC */
2604 total_rx_bytes += skb->len;
2605
2606 /* populate checksum, VLAN, and protocol */
2607 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2608
2609 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp);
2610 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2611 }
2612
2613 /* update budget accounting */
2614 total_rx_packets++;
2615process_next:
2616 cleaned_count += nfrags + 1;
2617 i40e_put_rx_buffer(rx_ring, rx_buffer);
2618 rx_ring->next_to_clean = rx_ring->next_to_process;
2619
2620 xdp->data = NULL;
2621 }
2622
2623 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2624
2625 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2626
2627 *rx_cleaned = total_rx_packets;
2628
2629 /* guarantee a trip back through this routine if there was a failure */
2630 return failure ? budget : (int)total_rx_packets;
2631}
2632
2633/**
2634 * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
2635 * @itr_idx: interrupt throttling index
2636 * @interval: interrupt throttling interval value in usecs
2637 * @force_swint: force software interrupt
2638 *
2639 * The function builds a value for I40E_PFINT_DYN_CTLN register that
2640 * is used to update interrupt throttling interval for specified ITR index
2641 * and optionally enforces a software interrupt. If the @itr_idx is equal
2642 * to I40E_ITR_NONE then no interval change is applied and only @force_swint
2643 * parameter is taken into account. If the interval change and enforced
2644 * software interrupt are not requested then the built value just enables
2645 * appropriate vector interrupt.
2646 **/
2647static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
2648 bool force_swint)
2649{
2650 u32 val;
2651
2652 /* We don't bother with setting the CLEARPBA bit as the data sheet
2653 * points out doing so is "meaningless since it was already
2654 * auto-cleared". The auto-clearing happens when the interrupt is
2655 * asserted.
2656 *
2657 * Hardware errata 28 for also indicates that writing to a
2658 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2659 * an event in the PBA anyway so we need to rely on the automask
2660 * to hold pending events for us until the interrupt is re-enabled
2661 *
2662 * We have to shift the given value as it is reported in microseconds
2663 * and the register value is recorded in 2 microsecond units.
2664 */
2665 interval >>= 1;
2666
2667 /* 1. Enable vector interrupt
2668 * 2. Update the interval for the specified ITR index
2669 * (I40E_ITR_NONE in the register is used to indicate that
2670 * no interval update is requested)
2671 */
2672 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2673 FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
2674 FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
2675
2676 /* 3. Enforce software interrupt trigger if requested
2677 * (These software interrupts rate is limited by ITR2 that is
2678 * set to 20K interrupts per second)
2679 */
2680 if (force_swint)
2681 val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
2682 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
2683 FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
2684 I40E_SW_ITR);
2685
2686 return val;
2687}
2688
2689/* The act of updating the ITR will cause it to immediately trigger. In order
2690 * to prevent this from throwing off adaptive update statistics we defer the
2691 * update so that it can only happen so often. So after either Tx or Rx are
2692 * updated we make the adaptive scheme wait until either the ITR completely
2693 * expires via the next_update expiration or we have been through at least
2694 * 3 interrupts.
2695 */
2696#define ITR_COUNTDOWN_START 3
2697
2698/**
2699 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2700 * @vsi: the VSI we care about
2701 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2702 *
2703 **/
2704static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2705 struct i40e_q_vector *q_vector)
2706{
2707 enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
2708 struct i40e_hw *hw = &vsi->back->hw;
2709 u16 interval = 0;
2710 u32 itr_val;
2711
2712 /* If we don't have MSIX, then we only need to re-enable icr0 */
2713 if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
2714 i40e_irq_dynamic_enable_icr0(vsi->back);
2715 return;
2716 }
2717
2718 /* These will do nothing if dynamic updates are not enabled */
2719 i40e_update_itr(q_vector, &q_vector->tx);
2720 i40e_update_itr(q_vector, &q_vector->rx);
2721
2722 /* This block of logic allows us to get away with only updating
2723 * one ITR value with each interrupt. The idea is to perform a
2724 * pseudo-lazy update with the following criteria.
2725 *
2726 * 1. Rx is given higher priority than Tx if both are in same state
2727 * 2. If we must reduce an ITR that is given highest priority.
2728 * 3. We then give priority to increasing ITR based on amount.
2729 */
2730 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2731 /* Rx ITR needs to be reduced, this is highest priority */
2732 itr_idx = I40E_RX_ITR;
2733 interval = q_vector->rx.target_itr;
2734 q_vector->rx.current_itr = q_vector->rx.target_itr;
2735 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2736 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2737 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2738 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2739 /* Tx ITR needs to be reduced, this is second priority
2740 * Tx ITR needs to be increased more than Rx, fourth priority
2741 */
2742 itr_idx = I40E_TX_ITR;
2743 interval = q_vector->tx.target_itr;
2744 q_vector->tx.current_itr = q_vector->tx.target_itr;
2745 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2746 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2747 /* Rx ITR needs to be increased, third priority */
2748 itr_idx = I40E_RX_ITR;
2749 interval = q_vector->rx.target_itr;
2750 q_vector->rx.current_itr = q_vector->rx.target_itr;
2751 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2752 } else {
2753 /* No ITR update, lowest priority */
2754 if (q_vector->itr_countdown)
2755 q_vector->itr_countdown--;
2756 }
2757
2758 /* Do not update interrupt control register if VSI is down */
2759 if (test_bit(__I40E_VSI_DOWN, vsi->state))
2760 return;
2761
2762 /* Update ITR interval if necessary and enforce software interrupt
2763 * if we are exiting busy poll.
2764 */
2765 if (q_vector->in_busy_poll) {
2766 itr_val = i40e_buildreg_itr(itr_idx, interval, true);
2767 q_vector->in_busy_poll = false;
2768 } else {
2769 itr_val = i40e_buildreg_itr(itr_idx, interval, false);
2770 }
2771 wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
2772}
2773
2774/**
2775 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2776 * @napi: napi struct with our devices info in it
2777 * @budget: amount of work driver is allowed to do this pass, in packets
2778 *
2779 * This function will clean all queues associated with a q_vector.
2780 *
2781 * Returns the amount of work done
2782 **/
2783int i40e_napi_poll(struct napi_struct *napi, int budget)
2784{
2785 struct i40e_q_vector *q_vector =
2786 container_of(napi, struct i40e_q_vector, napi);
2787 struct i40e_vsi *vsi = q_vector->vsi;
2788 struct i40e_ring *ring;
2789 bool tx_clean_complete = true;
2790 bool rx_clean_complete = true;
2791 unsigned int tx_cleaned = 0;
2792 unsigned int rx_cleaned = 0;
2793 bool clean_complete = true;
2794 bool arm_wb = false;
2795 int budget_per_ring;
2796 int work_done = 0;
2797
2798 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2799 napi_complete(napi);
2800 return 0;
2801 }
2802
2803 /* Since the actual Tx work is minimal, we can give the Tx a larger
2804 * budget and be more aggressive about cleaning up the Tx descriptors.
2805 */
2806 i40e_for_each_ring(ring, q_vector->tx) {
2807 bool wd = ring->xsk_pool ?
2808 i40e_clean_xdp_tx_irq(vsi, ring) :
2809 i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);
2810
2811 if (!wd) {
2812 clean_complete = tx_clean_complete = false;
2813 continue;
2814 }
2815 arm_wb |= ring->arm_wb;
2816 ring->arm_wb = false;
2817 }
2818
2819 /* Handle case where we are called by netpoll with a budget of 0 */
2820 if (budget <= 0)
2821 goto tx_only;
2822
2823 /* normally we have 1 Rx ring per q_vector */
2824 if (unlikely(q_vector->num_ringpairs > 1))
2825 /* We attempt to distribute budget to each Rx queue fairly, but
2826 * don't allow the budget to go below 1 because that would exit
2827 * polling early.
2828 */
2829 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2830 else
2831 /* Max of 1 Rx ring in this q_vector so give it the budget */
2832 budget_per_ring = budget;
2833
2834 i40e_for_each_ring(ring, q_vector->rx) {
2835 int cleaned = ring->xsk_pool ?
2836 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2837 i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
2838
2839 work_done += cleaned;
2840 /* if we clean as many as budgeted, we must not be done */
2841 if (cleaned >= budget_per_ring)
2842 clean_complete = rx_clean_complete = false;
2843 }
2844
2845 if (!i40e_enabled_xdp_vsi(vsi))
2846 trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
2847 tx_cleaned, rx_clean_complete, tx_clean_complete);
2848
2849 /* If work not completed, return budget and polling will return */
2850 if (!clean_complete) {
2851 int cpu_id = smp_processor_id();
2852
2853 /* It is possible that the interrupt affinity has changed but,
2854 * if the cpu is pegged at 100%, polling will never exit while
2855 * traffic continues and the interrupt will be stuck on this
2856 * cpu. We check to make sure affinity is correct before we
2857 * continue to poll, otherwise we must stop polling so the
2858 * interrupt can move to the correct cpu.
2859 */
2860 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2861 /* Tell napi that we are done polling */
2862 napi_complete_done(napi, work_done);
2863
2864 /* Force an interrupt */
2865 i40e_force_wb(vsi, q_vector);
2866
2867 /* Return budget-1 so that polling stops */
2868 return budget - 1;
2869 }
2870tx_only:
2871 if (arm_wb) {
2872 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2873 i40e_enable_wb_on_itr(vsi, q_vector);
2874 }
2875 return budget;
2876 }
2877
2878 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
2879 q_vector->arm_wb_state = false;
2880
2881 /* Exit the polling mode, but don't re-enable interrupts if stack might
2882 * poll us due to busy-polling
2883 */
2884 if (likely(napi_complete_done(napi, work_done)))
2885 i40e_update_enable_itr(vsi, q_vector);
2886 else
2887 q_vector->in_busy_poll = true;
2888
2889 return min(work_done, budget - 1);
2890}
2891
2892/**
2893 * i40e_atr - Add a Flow Director ATR filter
2894 * @tx_ring: ring to add programming descriptor to
2895 * @skb: send buffer
2896 * @tx_flags: send tx flags
2897 **/
2898static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2899 u32 tx_flags)
2900{
2901 struct i40e_filter_program_desc *fdir_desc;
2902 struct i40e_pf *pf = tx_ring->vsi->back;
2903 union {
2904 unsigned char *network;
2905 struct iphdr *ipv4;
2906 struct ipv6hdr *ipv6;
2907 } hdr;
2908 struct tcphdr *th;
2909 unsigned int hlen;
2910 u32 flex_ptype, dtype_cmd;
2911 int l4_proto;
2912 u16 i;
2913
2914 /* make sure ATR is enabled */
2915 if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
2916 return;
2917
2918 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2919 return;
2920
2921 /* if sampling is disabled do nothing */
2922 if (!tx_ring->atr_sample_rate)
2923 return;
2924
2925 /* Currently only IPv4/IPv6 with TCP is supported */
2926 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2927 return;
2928
2929 /* snag network header to get L4 type and address */
2930 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2931 skb_inner_network_header(skb) : skb_network_header(skb);
2932
2933 /* Note: tx_flags gets modified to reflect inner protocols in
2934 * tx_enable_csum function if encap is enabled.
2935 */
2936 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2937 /* access ihl as u8 to avoid unaligned access on ia64 */
2938 hlen = (hdr.network[0] & 0x0F) << 2;
2939 l4_proto = hdr.ipv4->protocol;
2940 } else {
2941 /* find the start of the innermost ipv6 header */
2942 unsigned int inner_hlen = hdr.network - skb->data;
2943 unsigned int h_offset = inner_hlen;
2944
2945 /* this function updates h_offset to the end of the header */
2946 l4_proto =
2947 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2948 /* hlen will contain our best estimate of the tcp header */
2949 hlen = h_offset - inner_hlen;
2950 }
2951
2952 if (l4_proto != IPPROTO_TCP)
2953 return;
2954
2955 th = (struct tcphdr *)(hdr.network + hlen);
2956
2957 /* Due to lack of space, no more new filters can be programmed */
2958 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2959 return;
2960 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) {
2961 /* HW ATR eviction will take care of removing filters on FIN
2962 * and RST packets.
2963 */
2964 if (th->fin || th->rst)
2965 return;
2966 }
2967
2968 tx_ring->atr_count++;
2969
2970 /* sample on all syn/fin/rst packets or once every atr sample rate */
2971 if (!th->fin &&
2972 !th->syn &&
2973 !th->rst &&
2974 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2975 return;
2976
2977 tx_ring->atr_count = 0;
2978
2979 /* grab the next descriptor */
2980 i = tx_ring->next_to_use;
2981 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2982
2983 i++;
2984 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2985
2986 flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
2987 tx_ring->queue_index);
2988 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2989 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2990 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2991 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2992 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2993
2994 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2995
2996 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2997
2998 dtype_cmd |= (th->fin || th->rst) ?
2999 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
3000 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
3001 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
3002 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
3003
3004 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
3005 I40E_TXD_FLTR_QW1_DEST_SHIFT;
3006
3007 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
3008 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
3009
3010 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
3011 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
3012 dtype_cmd |=
3013 FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
3014 I40E_FD_ATR_STAT_IDX(pf->hw.pf_id));
3015 else
3016 dtype_cmd |=
3017 FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
3018 I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id));
3019
3020 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags))
3021 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
3022
3023 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
3024 fdir_desc->rsvd = cpu_to_le32(0);
3025 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
3026 fdir_desc->fd_id = cpu_to_le32(0);
3027}
3028
3029/**
3030 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
3031 * @skb: send buffer
3032 * @tx_ring: ring to send buffer on
3033 * @flags: the tx flags to be set
3034 *
3035 * Checks the skb and set up correspondingly several generic transmit flags
3036 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
3037 *
3038 * Returns error code indicate the frame should be dropped upon error and the
3039 * otherwise returns 0 to indicate the flags has been set properly.
3040 **/
3041static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
3042 struct i40e_ring *tx_ring,
3043 u32 *flags)
3044{
3045 __be16 protocol = skb->protocol;
3046 u32 tx_flags = 0;
3047
3048 if (protocol == htons(ETH_P_8021Q) &&
3049 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
3050 /* When HW VLAN acceleration is turned off by the user the
3051 * stack sets the protocol to 8021q so that the driver
3052 * can take any steps required to support the SW only
3053 * VLAN handling. In our case the driver doesn't need
3054 * to take any further steps so just set the protocol
3055 * to the encapsulated ethertype.
3056 */
3057 skb->protocol = vlan_get_protocol(skb);
3058 goto out;
3059 }
3060
3061 /* if we have a HW VLAN tag being added, default to the HW one */
3062 if (skb_vlan_tag_present(skb)) {
3063 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
3064 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3065 /* else if it is a SW VLAN, check the next protocol and store the tag */
3066 } else if (protocol == htons(ETH_P_8021Q)) {
3067 struct vlan_hdr *vhdr, _vhdr;
3068
3069 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
3070 if (!vhdr)
3071 return -EINVAL;
3072
3073 protocol = vhdr->h_vlan_encapsulated_proto;
3074 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
3075 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
3076 }
3077
3078 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
3079 goto out;
3080
3081 /* Insert 802.1p priority into VLAN header */
3082 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
3083 (skb->priority != TC_PRIO_CONTROL)) {
3084 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
3085 tx_flags |= (skb->priority & 0x7) <<
3086 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
3087 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
3088 struct vlan_ethhdr *vhdr;
3089 int rc;
3090
3091 rc = skb_cow_head(skb, 0);
3092 if (rc < 0)
3093 return rc;
3094 vhdr = skb_vlan_eth_hdr(skb);
3095 vhdr->h_vlan_TCI = htons(tx_flags >>
3096 I40E_TX_FLAGS_VLAN_SHIFT);
3097 } else {
3098 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3099 }
3100 }
3101
3102out:
3103 *flags = tx_flags;
3104 return 0;
3105}
3106
3107/**
3108 * i40e_tso - set up the tso context descriptor
3109 * @first: pointer to first Tx buffer for xmit
3110 * @hdr_len: ptr to the size of the packet header
3111 * @cd_type_cmd_tso_mss: Quad Word 1
3112 *
3113 * Returns 0 if no TSO can happen, 1 if tso is going, or error
3114 **/
3115static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
3116 u64 *cd_type_cmd_tso_mss)
3117{
3118 struct sk_buff *skb = first->skb;
3119 u64 cd_cmd, cd_tso_len, cd_mss;
3120 __be16 protocol;
3121 union {
3122 struct iphdr *v4;
3123 struct ipv6hdr *v6;
3124 unsigned char *hdr;
3125 } ip;
3126 union {
3127 struct tcphdr *tcp;
3128 struct udphdr *udp;
3129 unsigned char *hdr;
3130 } l4;
3131 u32 paylen, l4_offset;
3132 u16 gso_size;
3133 int err;
3134
3135 if (skb->ip_summed != CHECKSUM_PARTIAL)
3136 return 0;
3137
3138 if (!skb_is_gso(skb))
3139 return 0;
3140
3141 err = skb_cow_head(skb, 0);
3142 if (err < 0)
3143 return err;
3144
3145 protocol = vlan_get_protocol(skb);
3146
3147 if (eth_p_mpls(protocol))
3148 ip.hdr = skb_inner_network_header(skb);
3149 else
3150 ip.hdr = skb_network_header(skb);
3151 l4.hdr = skb_checksum_start(skb);
3152
3153 /* initialize outer IP header fields */
3154 if (ip.v4->version == 4) {
3155 ip.v4->tot_len = 0;
3156 ip.v4->check = 0;
3157
3158 first->tx_flags |= I40E_TX_FLAGS_TSO;
3159 } else {
3160 ip.v6->payload_len = 0;
3161 first->tx_flags |= I40E_TX_FLAGS_TSO;
3162 }
3163
3164 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
3165 SKB_GSO_GRE_CSUM |
3166 SKB_GSO_IPXIP4 |
3167 SKB_GSO_IPXIP6 |
3168 SKB_GSO_UDP_TUNNEL |
3169 SKB_GSO_UDP_TUNNEL_CSUM)) {
3170 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3171 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
3172 l4.udp->len = 0;
3173
3174 /* determine offset of outer transport header */
3175 l4_offset = l4.hdr - skb->data;
3176
3177 /* remove payload length from outer checksum */
3178 paylen = skb->len - l4_offset;
3179 csum_replace_by_diff(&l4.udp->check,
3180 (__force __wsum)htonl(paylen));
3181 }
3182
3183 /* reset pointers to inner headers */
3184 ip.hdr = skb_inner_network_header(skb);
3185 l4.hdr = skb_inner_transport_header(skb);
3186
3187 /* initialize inner IP header fields */
3188 if (ip.v4->version == 4) {
3189 ip.v4->tot_len = 0;
3190 ip.v4->check = 0;
3191 } else {
3192 ip.v6->payload_len = 0;
3193 }
3194 }
3195
3196 /* determine offset of inner transport header */
3197 l4_offset = l4.hdr - skb->data;
3198
3199 /* remove payload length from inner checksum */
3200 paylen = skb->len - l4_offset;
3201
3202 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3203 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
3204 /* compute length of segmentation header */
3205 *hdr_len = sizeof(*l4.udp) + l4_offset;
3206 } else {
3207 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3208 /* compute length of segmentation header */
3209 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3210 }
3211
3212 /* pull values out of skb_shinfo */
3213 gso_size = skb_shinfo(skb)->gso_size;
3214
3215 /* update GSO size and bytecount with header size */
3216 first->gso_segs = skb_shinfo(skb)->gso_segs;
3217 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3218
3219 /* find the field values */
3220 cd_cmd = I40E_TX_CTX_DESC_TSO;
3221 cd_tso_len = skb->len - *hdr_len;
3222 cd_mss = gso_size;
3223 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
3224 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3225 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3226 return 1;
3227}
3228
3229/**
3230 * i40e_tsyn - set up the tsyn context descriptor
3231 * @tx_ring: ptr to the ring to send
3232 * @skb: ptr to the skb we're sending
3233 * @tx_flags: the collected send information
3234 * @cd_type_cmd_tso_mss: Quad Word 1
3235 *
3236 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3237 **/
3238static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3239 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3240{
3241 struct i40e_pf *pf;
3242
3243 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3244 return 0;
3245
3246 /* Tx timestamps cannot be sampled when doing TSO */
3247 if (tx_flags & I40E_TX_FLAGS_TSO)
3248 return 0;
3249
3250 /* only timestamp the outbound packet if the user has requested it and
3251 * we are not already transmitting a packet to be timestamped
3252 */
3253 pf = i40e_netdev_to_pf(tx_ring->netdev);
3254 if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
3255 return 0;
3256
3257 if (pf->ptp_tx &&
3258 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3259 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3260 pf->ptp_tx_start = jiffies;
3261 pf->ptp_tx_skb = skb_get(skb);
3262 } else {
3263 pf->tx_hwtstamp_skipped++;
3264 return 0;
3265 }
3266
3267 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3268 I40E_TXD_CTX_QW1_CMD_SHIFT;
3269
3270 return 1;
3271}
3272
3273/**
3274 * i40e_tx_enable_csum - Enable Tx checksum offloads
3275 * @skb: send buffer
3276 * @tx_flags: pointer to Tx flags currently set
3277 * @td_cmd: Tx descriptor command bits to set
3278 * @td_offset: Tx descriptor header offsets to set
3279 * @tx_ring: Tx descriptor ring
3280 * @cd_tunneling: ptr to context desc bits
3281 **/
3282static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3283 u32 *td_cmd, u32 *td_offset,
3284 struct i40e_ring *tx_ring,
3285 u32 *cd_tunneling)
3286{
3287 union {
3288 struct iphdr *v4;
3289 struct ipv6hdr *v6;
3290 unsigned char *hdr;
3291 } ip;
3292 union {
3293 struct tcphdr *tcp;
3294 struct udphdr *udp;
3295 unsigned char *hdr;
3296 } l4;
3297 unsigned char *exthdr;
3298 u32 offset, cmd = 0;
3299 __be16 frag_off;
3300 __be16 protocol;
3301 u8 l4_proto = 0;
3302
3303 if (skb->ip_summed != CHECKSUM_PARTIAL)
3304 return 0;
3305
3306 protocol = vlan_get_protocol(skb);
3307
3308 if (eth_p_mpls(protocol)) {
3309 ip.hdr = skb_inner_network_header(skb);
3310 l4.hdr = skb_checksum_start(skb);
3311 } else {
3312 ip.hdr = skb_network_header(skb);
3313 l4.hdr = skb_transport_header(skb);
3314 }
3315
3316 /* set the tx_flags to indicate the IP protocol type. this is
3317 * required so that checksum header computation below is accurate.
3318 */
3319 if (ip.v4->version == 4)
3320 *tx_flags |= I40E_TX_FLAGS_IPV4;
3321 else
3322 *tx_flags |= I40E_TX_FLAGS_IPV6;
3323
3324 /* compute outer L2 header size */
3325 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3326
3327 if (skb->encapsulation) {
3328 u32 tunnel = 0;
3329 /* define outer network header type */
3330 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3331 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3332 I40E_TX_CTX_EXT_IP_IPV4 :
3333 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3334
3335 l4_proto = ip.v4->protocol;
3336 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3337 int ret;
3338
3339 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3340
3341 exthdr = ip.hdr + sizeof(*ip.v6);
3342 l4_proto = ip.v6->nexthdr;
3343 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
3344 &l4_proto, &frag_off);
3345 if (ret < 0)
3346 return -1;
3347 }
3348
3349 /* define outer transport */
3350 switch (l4_proto) {
3351 case IPPROTO_UDP:
3352 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3353 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3354 break;
3355 case IPPROTO_GRE:
3356 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3357 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3358 break;
3359 case IPPROTO_IPIP:
3360 case IPPROTO_IPV6:
3361 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3362 l4.hdr = skb_inner_network_header(skb);
3363 break;
3364 default:
3365 if (*tx_flags & I40E_TX_FLAGS_TSO)
3366 return -1;
3367
3368 skb_checksum_help(skb);
3369 return 0;
3370 }
3371
3372 /* compute outer L3 header size */
3373 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3374 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3375
3376 /* switch IP header pointer from outer to inner header */
3377 ip.hdr = skb_inner_network_header(skb);
3378
3379 /* compute tunnel header size */
3380 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3381 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3382
3383 /* indicate if we need to offload outer UDP header */
3384 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3385 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3386 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3387 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3388
3389 /* record tunnel offload values */
3390 *cd_tunneling |= tunnel;
3391
3392 /* switch L4 header pointer from outer to inner */
3393 l4.hdr = skb_inner_transport_header(skb);
3394 l4_proto = 0;
3395
3396 /* reset type as we transition from outer to inner headers */
3397 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3398 if (ip.v4->version == 4)
3399 *tx_flags |= I40E_TX_FLAGS_IPV4;
3400 if (ip.v6->version == 6)
3401 *tx_flags |= I40E_TX_FLAGS_IPV6;
3402 }
3403
3404 /* Enable IP checksum offloads */
3405 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3406 l4_proto = ip.v4->protocol;
3407 /* the stack computes the IP header already, the only time we
3408 * need the hardware to recompute it is in the case of TSO.
3409 */
3410 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3411 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3412 I40E_TX_DESC_CMD_IIPT_IPV4;
3413 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3414 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3415
3416 exthdr = ip.hdr + sizeof(*ip.v6);
3417 l4_proto = ip.v6->nexthdr;
3418 if (l4.hdr != exthdr)
3419 ipv6_skip_exthdr(skb, exthdr - skb->data,
3420 &l4_proto, &frag_off);
3421 }
3422
3423 /* compute inner L3 header size */
3424 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3425
3426 /* Enable L4 checksum offloads */
3427 switch (l4_proto) {
3428 case IPPROTO_TCP:
3429 /* enable checksum offloads */
3430 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3431 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3432 break;
3433 case IPPROTO_SCTP:
3434 /* enable SCTP checksum offload */
3435 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3436 offset |= (sizeof(struct sctphdr) >> 2) <<
3437 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3438 break;
3439 case IPPROTO_UDP:
3440 /* enable UDP checksum offload */
3441 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3442 offset |= (sizeof(struct udphdr) >> 2) <<
3443 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3444 break;
3445 default:
3446 if (*tx_flags & I40E_TX_FLAGS_TSO)
3447 return -1;
3448 skb_checksum_help(skb);
3449 return 0;
3450 }
3451
3452 *td_cmd |= cmd;
3453 *td_offset |= offset;
3454
3455 return 1;
3456}
3457
3458/**
3459 * i40e_create_tx_ctx - Build the Tx context descriptor
3460 * @tx_ring: ring to create the descriptor on
3461 * @cd_type_cmd_tso_mss: Quad Word 1
3462 * @cd_tunneling: Quad Word 0 - bits 0-31
3463 * @cd_l2tag2: Quad Word 0 - bits 32-63
3464 **/
3465static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3466 const u64 cd_type_cmd_tso_mss,
3467 const u32 cd_tunneling, const u32 cd_l2tag2)
3468{
3469 struct i40e_tx_context_desc *context_desc;
3470 int i = tx_ring->next_to_use;
3471
3472 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3473 !cd_tunneling && !cd_l2tag2)
3474 return;
3475
3476 /* grab the next descriptor */
3477 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3478
3479 i++;
3480 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3481
3482 /* cpu_to_le32 and assign to struct fields */
3483 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3484 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3485 context_desc->rsvd = cpu_to_le16(0);
3486 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3487}
3488
3489/**
3490 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3491 * @tx_ring: the ring to be checked
3492 * @size: the size buffer we want to assure is available
3493 *
3494 * Returns -EBUSY if a stop is needed, else 0
3495 **/
3496int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3497{
3498 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3499 /* Memory barrier before checking head and tail */
3500 smp_mb();
3501
3502 ++tx_ring->tx_stats.tx_stopped;
3503
3504 /* Check again in a case another CPU has just made room available. */
3505 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3506 return -EBUSY;
3507
3508 /* A reprieve! - use start_queue because it doesn't call schedule */
3509 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3510 ++tx_ring->tx_stats.restart_queue;
3511 return 0;
3512}
3513
3514/**
3515 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3516 * @skb: send buffer
3517 *
3518 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3519 * and so we need to figure out the cases where we need to linearize the skb.
3520 *
3521 * For TSO we need to count the TSO header and segment payload separately.
3522 * As such we need to check cases where we have 7 fragments or more as we
3523 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3524 * the segment payload in the first descriptor, and another 7 for the
3525 * fragments.
3526 **/
3527bool __i40e_chk_linearize(struct sk_buff *skb)
3528{
3529 const skb_frag_t *frag, *stale;
3530 int nr_frags, sum;
3531
3532 /* no need to check if number of frags is less than 7 */
3533 nr_frags = skb_shinfo(skb)->nr_frags;
3534 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3535 return false;
3536
3537 /* We need to walk through the list and validate that each group
3538 * of 6 fragments totals at least gso_size.
3539 */
3540 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3541 frag = &skb_shinfo(skb)->frags[0];
3542
3543 /* Initialize size to the negative value of gso_size minus 1. We
3544 * use this as the worst case scenerio in which the frag ahead
3545 * of us only provides one byte which is why we are limited to 6
3546 * descriptors for a single transmit as the header and previous
3547 * fragment are already consuming 2 descriptors.
3548 */
3549 sum = 1 - skb_shinfo(skb)->gso_size;
3550
3551 /* Add size of frags 0 through 4 to create our initial sum */
3552 sum += skb_frag_size(frag++);
3553 sum += skb_frag_size(frag++);
3554 sum += skb_frag_size(frag++);
3555 sum += skb_frag_size(frag++);
3556 sum += skb_frag_size(frag++);
3557
3558 /* Walk through fragments adding latest fragment, testing it, and
3559 * then removing stale fragments from the sum.
3560 */
3561 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3562 int stale_size = skb_frag_size(stale);
3563
3564 sum += skb_frag_size(frag++);
3565
3566 /* The stale fragment may present us with a smaller
3567 * descriptor than the actual fragment size. To account
3568 * for that we need to remove all the data on the front and
3569 * figure out what the remainder would be in the last
3570 * descriptor associated with the fragment.
3571 */
3572 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3573 int align_pad = -(skb_frag_off(stale)) &
3574 (I40E_MAX_READ_REQ_SIZE - 1);
3575
3576 sum -= align_pad;
3577 stale_size -= align_pad;
3578
3579 do {
3580 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3581 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3582 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3583 }
3584
3585 /* if sum is negative we failed to make sufficient progress */
3586 if (sum < 0)
3587 return true;
3588
3589 if (!nr_frags--)
3590 break;
3591
3592 sum -= stale_size;
3593 }
3594
3595 return false;
3596}
3597
3598/**
3599 * i40e_tx_map - Build the Tx descriptor
3600 * @tx_ring: ring to send buffer on
3601 * @skb: send buffer
3602 * @first: first buffer info buffer to use
3603 * @tx_flags: collected send information
3604 * @hdr_len: size of the packet header
3605 * @td_cmd: the command field in the descriptor
3606 * @td_offset: offset for checksum or crc
3607 *
3608 * Returns 0 on success, -1 on failure to DMA
3609 **/
3610static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3611 struct i40e_tx_buffer *first, u32 tx_flags,
3612 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3613{
3614 unsigned int data_len = skb->data_len;
3615 unsigned int size = skb_headlen(skb);
3616 skb_frag_t *frag;
3617 struct i40e_tx_buffer *tx_bi;
3618 struct i40e_tx_desc *tx_desc;
3619 u16 i = tx_ring->next_to_use;
3620 u32 td_tag = 0;
3621 dma_addr_t dma;
3622 u16 desc_count = 1;
3623
3624 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3625 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3626 td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags);
3627 }
3628
3629 first->tx_flags = tx_flags;
3630
3631 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3632
3633 tx_desc = I40E_TX_DESC(tx_ring, i);
3634 tx_bi = first;
3635
3636 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3637 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3638
3639 if (dma_mapping_error(tx_ring->dev, dma))
3640 goto dma_error;
3641
3642 /* record length, and DMA address */
3643 dma_unmap_len_set(tx_bi, len, size);
3644 dma_unmap_addr_set(tx_bi, dma, dma);
3645
3646 /* align size to end of page */
3647 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3648 tx_desc->buffer_addr = cpu_to_le64(dma);
3649
3650 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3651 tx_desc->cmd_type_offset_bsz =
3652 build_ctob(td_cmd, td_offset,
3653 max_data, td_tag);
3654
3655 tx_desc++;
3656 i++;
3657 desc_count++;
3658
3659 if (i == tx_ring->count) {
3660 tx_desc = I40E_TX_DESC(tx_ring, 0);
3661 i = 0;
3662 }
3663
3664 dma += max_data;
3665 size -= max_data;
3666
3667 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3668 tx_desc->buffer_addr = cpu_to_le64(dma);
3669 }
3670
3671 if (likely(!data_len))
3672 break;
3673
3674 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3675 size, td_tag);
3676
3677 tx_desc++;
3678 i++;
3679 desc_count++;
3680
3681 if (i == tx_ring->count) {
3682 tx_desc = I40E_TX_DESC(tx_ring, 0);
3683 i = 0;
3684 }
3685
3686 size = skb_frag_size(frag);
3687 data_len -= size;
3688
3689 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3690 DMA_TO_DEVICE);
3691
3692 tx_bi = &tx_ring->tx_bi[i];
3693 }
3694
3695 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3696
3697 i++;
3698 if (i == tx_ring->count)
3699 i = 0;
3700
3701 tx_ring->next_to_use = i;
3702
3703 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3704
3705 /* write last descriptor with EOP bit */
3706 td_cmd |= I40E_TX_DESC_CMD_EOP;
3707
3708 /* We OR these values together to check both against 4 (WB_STRIDE)
3709 * below. This is safe since we don't re-use desc_count afterwards.
3710 */
3711 desc_count |= ++tx_ring->packet_stride;
3712
3713 if (desc_count >= WB_STRIDE) {
3714 /* write last descriptor with RS bit set */
3715 td_cmd |= I40E_TX_DESC_CMD_RS;
3716 tx_ring->packet_stride = 0;
3717 }
3718
3719 tx_desc->cmd_type_offset_bsz =
3720 build_ctob(td_cmd, td_offset, size, td_tag);
3721
3722 skb_tx_timestamp(skb);
3723
3724 /* Force memory writes to complete before letting h/w know there
3725 * are new descriptors to fetch.
3726 *
3727 * We also use this memory barrier to make certain all of the
3728 * status bits have been updated before next_to_watch is written.
3729 */
3730 wmb();
3731
3732 /* set next_to_watch value indicating a packet is present */
3733 first->next_to_watch = tx_desc;
3734
3735 /* notify HW of packet */
3736 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3737 writel(i, tx_ring->tail);
3738 }
3739
3740 return 0;
3741
3742dma_error:
3743 dev_info(tx_ring->dev, "TX DMA map failed\n");
3744
3745 /* clear dma mappings for failed tx_bi map */
3746 for (;;) {
3747 tx_bi = &tx_ring->tx_bi[i];
3748 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3749 if (tx_bi == first)
3750 break;
3751 if (i == 0)
3752 i = tx_ring->count;
3753 i--;
3754 }
3755
3756 tx_ring->next_to_use = i;
3757
3758 return -1;
3759}
3760
3761static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
3762 const struct sk_buff *skb,
3763 u16 num_tx_queues)
3764{
3765 u32 jhash_initval_salt = 0xd631614b;
3766 u32 hash;
3767
3768 if (skb->sk && skb->sk->sk_hash)
3769 hash = skb->sk->sk_hash;
3770 else
3771 hash = (__force u16)skb->protocol ^ skb->hash;
3772
3773 hash = jhash_1word(hash, jhash_initval_salt);
3774
3775 return (u16)(((u64)hash * num_tx_queues) >> 32);
3776}
3777
3778u16 i40e_lan_select_queue(struct net_device *netdev,
3779 struct sk_buff *skb,
3780 struct net_device __always_unused *sb_dev)
3781{
3782 struct i40e_netdev_priv *np = netdev_priv(netdev);
3783 struct i40e_vsi *vsi = np->vsi;
3784 struct i40e_hw *hw;
3785 u16 qoffset;
3786 u16 qcount;
3787 u8 tclass;
3788 u16 hash;
3789 u8 prio;
3790
3791 /* is DCB enabled at all? */
3792 if (vsi->tc_config.numtc == 1 ||
3793 i40e_is_tc_mqprio_enabled(vsi->back))
3794 return netdev_pick_tx(netdev, skb, sb_dev);
3795
3796 prio = skb->priority;
3797 hw = &vsi->back->hw;
3798 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
3799 /* sanity check */
3800 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
3801 tclass = 0;
3802
3803 /* select a queue assigned for the given TC */
3804 qcount = vsi->tc_config.tc_info[tclass].qcount;
3805 hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
3806
3807 qoffset = vsi->tc_config.tc_info[tclass].qoffset;
3808 return qoffset + hash;
3809}
3810
3811/**
3812 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3813 * @xdpf: data to transmit
3814 * @xdp_ring: XDP Tx ring
3815 **/
3816static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3817 struct i40e_ring *xdp_ring)
3818{
3819 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
3820 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
3821 u16 i = 0, index = xdp_ring->next_to_use;
3822 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
3823 struct i40e_tx_buffer *tx_bi = tx_head;
3824 struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
3825 void *data = xdpf->data;
3826 u32 size = xdpf->len;
3827
3828 if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
3829 xdp_ring->tx_stats.tx_busy++;
3830 return I40E_XDP_CONSUMED;
3831 }
3832
3833 tx_head->bytecount = xdp_get_frame_len(xdpf);
3834 tx_head->gso_segs = 1;
3835 tx_head->xdpf = xdpf;
3836
3837 for (;;) {
3838 dma_addr_t dma;
3839
3840 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3841 if (dma_mapping_error(xdp_ring->dev, dma))
3842 goto unmap;
3843
3844 /* record length, and DMA address */
3845 dma_unmap_len_set(tx_bi, len, size);
3846 dma_unmap_addr_set(tx_bi, dma, dma);
3847
3848 tx_desc->buffer_addr = cpu_to_le64(dma);
3849 tx_desc->cmd_type_offset_bsz =
3850 build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
3851
3852 if (++index == xdp_ring->count)
3853 index = 0;
3854
3855 if (i == nr_frags)
3856 break;
3857
3858 tx_bi = &xdp_ring->tx_bi[index];
3859 tx_desc = I40E_TX_DESC(xdp_ring, index);
3860
3861 data = skb_frag_address(&sinfo->frags[i]);
3862 size = skb_frag_size(&sinfo->frags[i]);
3863 i++;
3864 }
3865
3866 tx_desc->cmd_type_offset_bsz |=
3867 cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
3868
3869 /* Make certain all of the status bits have been updated
3870 * before next_to_watch is written.
3871 */
3872 smp_wmb();
3873
3874 xdp_ring->xdp_tx_active++;
3875
3876 tx_head->next_to_watch = tx_desc;
3877 xdp_ring->next_to_use = index;
3878
3879 return I40E_XDP_TX;
3880
3881unmap:
3882 for (;;) {
3883 tx_bi = &xdp_ring->tx_bi[index];
3884 if (dma_unmap_len(tx_bi, len))
3885 dma_unmap_page(xdp_ring->dev,
3886 dma_unmap_addr(tx_bi, dma),
3887 dma_unmap_len(tx_bi, len),
3888 DMA_TO_DEVICE);
3889 dma_unmap_len_set(tx_bi, len, 0);
3890 if (tx_bi == tx_head)
3891 break;
3892
3893 if (!index)
3894 index += xdp_ring->count;
3895 index--;
3896 }
3897
3898 return I40E_XDP_CONSUMED;
3899}
3900
3901/**
3902 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3903 * @skb: send buffer
3904 * @tx_ring: ring to send buffer on
3905 *
3906 * Returns NETDEV_TX_OK if sent, else an error code
3907 **/
3908static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3909 struct i40e_ring *tx_ring)
3910{
3911 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3912 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3913 struct i40e_tx_buffer *first;
3914 u32 td_offset = 0;
3915 u32 tx_flags = 0;
3916 u32 td_cmd = 0;
3917 u8 hdr_len = 0;
3918 int tso, count;
3919 int tsyn;
3920
3921 /* prefetch the data, we'll need it later */
3922 prefetch(skb->data);
3923
3924 i40e_trace(xmit_frame_ring, skb, tx_ring);
3925
3926 count = i40e_xmit_descriptor_count(skb);
3927 if (i40e_chk_linearize(skb, count)) {
3928 if (__skb_linearize(skb)) {
3929 dev_kfree_skb_any(skb);
3930 return NETDEV_TX_OK;
3931 }
3932 count = i40e_txd_use_count(skb->len);
3933 tx_ring->tx_stats.tx_linearize++;
3934 }
3935
3936 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3937 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3938 * + 4 desc gap to avoid the cache line where head is,
3939 * + 1 desc for context descriptor,
3940 * otherwise try next time
3941 */
3942 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3943 tx_ring->tx_stats.tx_busy++;
3944 return NETDEV_TX_BUSY;
3945 }
3946
3947 /* record the location of the first descriptor for this packet */
3948 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3949 first->skb = skb;
3950 first->bytecount = skb->len;
3951 first->gso_segs = 1;
3952
3953 /* prepare the xmit flags */
3954 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3955 goto out_drop;
3956
3957 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3958
3959 if (tso < 0)
3960 goto out_drop;
3961 else if (tso)
3962 tx_flags |= I40E_TX_FLAGS_TSO;
3963
3964 /* Always offload the checksum, since it's in the data descriptor */
3965 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3966 tx_ring, &cd_tunneling);
3967 if (tso < 0)
3968 goto out_drop;
3969
3970 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3971
3972 if (tsyn)
3973 tx_flags |= I40E_TX_FLAGS_TSYN;
3974
3975 /* always enable CRC insertion offload */
3976 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3977
3978 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3979 cd_tunneling, cd_l2tag2);
3980
3981 /* Add Flow Director ATR if it's enabled.
3982 *
3983 * NOTE: this must always be directly before the data descriptor.
3984 */
3985 i40e_atr(tx_ring, skb, tx_flags);
3986
3987 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3988 td_cmd, td_offset))
3989 goto cleanup_tx_tstamp;
3990
3991 return NETDEV_TX_OK;
3992
3993out_drop:
3994 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3995 dev_kfree_skb_any(first->skb);
3996 first->skb = NULL;
3997cleanup_tx_tstamp:
3998 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3999 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
4000
4001 dev_kfree_skb_any(pf->ptp_tx_skb);
4002 pf->ptp_tx_skb = NULL;
4003 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
4004 }
4005
4006 return NETDEV_TX_OK;
4007}
4008
4009/**
4010 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
4011 * @skb: send buffer
4012 * @netdev: network interface device structure
4013 *
4014 * Returns NETDEV_TX_OK if sent, else an error code
4015 **/
4016netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4017{
4018 struct i40e_netdev_priv *np = netdev_priv(netdev);
4019 struct i40e_vsi *vsi = np->vsi;
4020 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
4021
4022 /* hardware can't handle really short frames, hardware padding works
4023 * beyond this point
4024 */
4025 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
4026 return NETDEV_TX_OK;
4027
4028 return i40e_xmit_frame_ring(skb, tx_ring);
4029}
4030
4031/**
4032 * i40e_xdp_xmit - Implements ndo_xdp_xmit
4033 * @dev: netdev
4034 * @n: number of frames
4035 * @frames: array of XDP buffer pointers
4036 * @flags: XDP extra info
4037 *
4038 * Returns number of frames successfully sent. Failed frames
4039 * will be free'ed by XDP core.
4040 *
4041 * For error cases, a negative errno code is returned and no-frames
4042 * are transmitted (caller must handle freeing frames).
4043 **/
4044int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
4045 u32 flags)
4046{
4047 struct i40e_netdev_priv *np = netdev_priv(dev);
4048 unsigned int queue_index = smp_processor_id();
4049 struct i40e_vsi *vsi = np->vsi;
4050 struct i40e_pf *pf = vsi->back;
4051 struct i40e_ring *xdp_ring;
4052 int nxmit = 0;
4053 int i;
4054
4055 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4056 return -ENETDOWN;
4057
4058 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
4059 test_bit(__I40E_CONFIG_BUSY, pf->state))
4060 return -ENXIO;
4061
4062 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
4063 return -EINVAL;
4064
4065 xdp_ring = vsi->xdp_rings[queue_index];
4066
4067 for (i = 0; i < n; i++) {
4068 struct xdp_frame *xdpf = frames[i];
4069 int err;
4070
4071 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
4072 if (err != I40E_XDP_TX)
4073 break;
4074 nxmit++;
4075 }
4076
4077 if (unlikely(flags & XDP_XMIT_FLUSH))
4078 i40e_xdp_ring_update_tail(xdp_ring);
4079
4080 return nxmit;
4081}
1// SPDX-License-Identifier: GPL-2.0
2/*******************************************************************************
3 *
4 * Intel Ethernet Controller XL710 Family Linux Driver
5 * Copyright(c) 2013 - 2016 Intel Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include <linux/prefetch.h>
29#include <net/busy_poll.h>
30#include <linux/bpf_trace.h>
31#include <net/xdp.h>
32#include "i40e.h"
33#include "i40e_trace.h"
34#include "i40e_prototype.h"
35
36static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 u32 td_tag)
38{
39 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
40 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
41 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
42 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
43 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
44}
45
46#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
47/**
48 * i40e_fdir - Generate a Flow Director descriptor based on fdata
49 * @tx_ring: Tx ring to send buffer on
50 * @fdata: Flow director filter data
51 * @add: Indicate if we are adding a rule or deleting one
52 *
53 **/
54static void i40e_fdir(struct i40e_ring *tx_ring,
55 struct i40e_fdir_filter *fdata, bool add)
56{
57 struct i40e_filter_program_desc *fdir_desc;
58 struct i40e_pf *pf = tx_ring->vsi->back;
59 u32 flex_ptype, dtype_cmd;
60 u16 i;
61
62 /* grab the next descriptor */
63 i = tx_ring->next_to_use;
64 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
65
66 i++;
67 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
68
69 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
70 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
71
72 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
73 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
74
75 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
76 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
77
78 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
79 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
80
81 /* Use LAN VSI Id if not programmed by user */
82 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
83 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
84 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
85
86 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
87
88 dtype_cmd |= add ?
89 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
90 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
91 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
92 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
93
94 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
95 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
96
97 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
98 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
99
100 if (fdata->cnt_index) {
101 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
102 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
103 ((u32)fdata->cnt_index <<
104 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
105 }
106
107 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
108 fdir_desc->rsvd = cpu_to_le32(0);
109 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
110 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
111}
112
113#define I40E_FD_CLEAN_DELAY 10
114/**
115 * i40e_program_fdir_filter - Program a Flow Director filter
116 * @fdir_data: Packet data that will be filter parameters
117 * @raw_packet: the pre-allocated packet buffer for FDir
118 * @pf: The PF pointer
119 * @add: True for add/update, False for remove
120 **/
121static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
122 u8 *raw_packet, struct i40e_pf *pf,
123 bool add)
124{
125 struct i40e_tx_buffer *tx_buf, *first;
126 struct i40e_tx_desc *tx_desc;
127 struct i40e_ring *tx_ring;
128 struct i40e_vsi *vsi;
129 struct device *dev;
130 dma_addr_t dma;
131 u32 td_cmd = 0;
132 u16 i;
133
134 /* find existing FDIR VSI */
135 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
136 if (!vsi)
137 return -ENOENT;
138
139 tx_ring = vsi->tx_rings[0];
140 dev = tx_ring->dev;
141
142 /* we need two descriptors to add/del a filter and we can wait */
143 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
144 if (!i)
145 return -EAGAIN;
146 msleep_interruptible(1);
147 }
148
149 dma = dma_map_single(dev, raw_packet,
150 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
151 if (dma_mapping_error(dev, dma))
152 goto dma_fail;
153
154 /* grab the next descriptor */
155 i = tx_ring->next_to_use;
156 first = &tx_ring->tx_bi[i];
157 i40e_fdir(tx_ring, fdir_data, add);
158
159 /* Now program a dummy descriptor */
160 i = tx_ring->next_to_use;
161 tx_desc = I40E_TX_DESC(tx_ring, i);
162 tx_buf = &tx_ring->tx_bi[i];
163
164 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
165
166 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
167
168 /* record length, and DMA address */
169 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
170 dma_unmap_addr_set(tx_buf, dma, dma);
171
172 tx_desc->buffer_addr = cpu_to_le64(dma);
173 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
174
175 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
176 tx_buf->raw_buf = (void *)raw_packet;
177
178 tx_desc->cmd_type_offset_bsz =
179 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
180
181 /* Force memory writes to complete before letting h/w
182 * know there are new descriptors to fetch.
183 */
184 wmb();
185
186 /* Mark the data descriptor to be watched */
187 first->next_to_watch = tx_desc;
188
189 writel(tx_ring->next_to_use, tx_ring->tail);
190 return 0;
191
192dma_fail:
193 return -1;
194}
195
196#define IP_HEADER_OFFSET 14
197#define I40E_UDPIP_DUMMY_PACKET_LEN 42
198/**
199 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
200 * @vsi: pointer to the targeted VSI
201 * @fd_data: the flow director data required for the FDir descriptor
202 * @add: true adds a filter, false removes it
203 *
204 * Returns 0 if the filters were successfully added or removed
205 **/
206static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
207 struct i40e_fdir_filter *fd_data,
208 bool add)
209{
210 struct i40e_pf *pf = vsi->back;
211 struct udphdr *udp;
212 struct iphdr *ip;
213 u8 *raw_packet;
214 int ret;
215 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
216 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
217 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
218
219 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
220 if (!raw_packet)
221 return -ENOMEM;
222 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
223
224 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
225 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
226 + sizeof(struct iphdr));
227
228 ip->daddr = fd_data->dst_ip;
229 udp->dest = fd_data->dst_port;
230 ip->saddr = fd_data->src_ip;
231 udp->source = fd_data->src_port;
232
233 if (fd_data->flex_filter) {
234 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
235 __be16 pattern = fd_data->flex_word;
236 u16 off = fd_data->flex_offset;
237
238 *((__force __be16 *)(payload + off)) = pattern;
239 }
240
241 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
242 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
243 if (ret) {
244 dev_info(&pf->pdev->dev,
245 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
246 fd_data->pctype, fd_data->fd_id, ret);
247 /* Free the packet buffer since it wasn't added to the ring */
248 kfree(raw_packet);
249 return -EOPNOTSUPP;
250 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
251 if (add)
252 dev_info(&pf->pdev->dev,
253 "Filter OK for PCTYPE %d loc = %d\n",
254 fd_data->pctype, fd_data->fd_id);
255 else
256 dev_info(&pf->pdev->dev,
257 "Filter deleted for PCTYPE %d loc = %d\n",
258 fd_data->pctype, fd_data->fd_id);
259 }
260
261 if (add)
262 pf->fd_udp4_filter_cnt++;
263 else
264 pf->fd_udp4_filter_cnt--;
265
266 return 0;
267}
268
269#define I40E_TCPIP_DUMMY_PACKET_LEN 54
270/**
271 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
272 * @vsi: pointer to the targeted VSI
273 * @fd_data: the flow director data required for the FDir descriptor
274 * @add: true adds a filter, false removes it
275 *
276 * Returns 0 if the filters were successfully added or removed
277 **/
278static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
279 struct i40e_fdir_filter *fd_data,
280 bool add)
281{
282 struct i40e_pf *pf = vsi->back;
283 struct tcphdr *tcp;
284 struct iphdr *ip;
285 u8 *raw_packet;
286 int ret;
287 /* Dummy packet */
288 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
289 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
290 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
291 0x0, 0x72, 0, 0, 0, 0};
292
293 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
294 if (!raw_packet)
295 return -ENOMEM;
296 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
297
298 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
299 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
300 + sizeof(struct iphdr));
301
302 ip->daddr = fd_data->dst_ip;
303 tcp->dest = fd_data->dst_port;
304 ip->saddr = fd_data->src_ip;
305 tcp->source = fd_data->src_port;
306
307 if (fd_data->flex_filter) {
308 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
309 __be16 pattern = fd_data->flex_word;
310 u16 off = fd_data->flex_offset;
311
312 *((__force __be16 *)(payload + off)) = pattern;
313 }
314
315 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
316 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
317 if (ret) {
318 dev_info(&pf->pdev->dev,
319 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
320 fd_data->pctype, fd_data->fd_id, ret);
321 /* Free the packet buffer since it wasn't added to the ring */
322 kfree(raw_packet);
323 return -EOPNOTSUPP;
324 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
325 if (add)
326 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
327 fd_data->pctype, fd_data->fd_id);
328 else
329 dev_info(&pf->pdev->dev,
330 "Filter deleted for PCTYPE %d loc = %d\n",
331 fd_data->pctype, fd_data->fd_id);
332 }
333
334 if (add) {
335 pf->fd_tcp4_filter_cnt++;
336 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
337 I40E_DEBUG_FD & pf->hw.debug_mask)
338 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
339 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
340 } else {
341 pf->fd_tcp4_filter_cnt--;
342 }
343
344 return 0;
345}
346
347#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
348/**
349 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
350 * a specific flow spec
351 * @vsi: pointer to the targeted VSI
352 * @fd_data: the flow director data required for the FDir descriptor
353 * @add: true adds a filter, false removes it
354 *
355 * Returns 0 if the filters were successfully added or removed
356 **/
357static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
358 struct i40e_fdir_filter *fd_data,
359 bool add)
360{
361 struct i40e_pf *pf = vsi->back;
362 struct sctphdr *sctp;
363 struct iphdr *ip;
364 u8 *raw_packet;
365 int ret;
366 /* Dummy packet */
367 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
368 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
369 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
370
371 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
372 if (!raw_packet)
373 return -ENOMEM;
374 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
375
376 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
377 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
378 + sizeof(struct iphdr));
379
380 ip->daddr = fd_data->dst_ip;
381 sctp->dest = fd_data->dst_port;
382 ip->saddr = fd_data->src_ip;
383 sctp->source = fd_data->src_port;
384
385 if (fd_data->flex_filter) {
386 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
387 __be16 pattern = fd_data->flex_word;
388 u16 off = fd_data->flex_offset;
389
390 *((__force __be16 *)(payload + off)) = pattern;
391 }
392
393 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
394 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
395 if (ret) {
396 dev_info(&pf->pdev->dev,
397 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
398 fd_data->pctype, fd_data->fd_id, ret);
399 /* Free the packet buffer since it wasn't added to the ring */
400 kfree(raw_packet);
401 return -EOPNOTSUPP;
402 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
403 if (add)
404 dev_info(&pf->pdev->dev,
405 "Filter OK for PCTYPE %d loc = %d\n",
406 fd_data->pctype, fd_data->fd_id);
407 else
408 dev_info(&pf->pdev->dev,
409 "Filter deleted for PCTYPE %d loc = %d\n",
410 fd_data->pctype, fd_data->fd_id);
411 }
412
413 if (add)
414 pf->fd_sctp4_filter_cnt++;
415 else
416 pf->fd_sctp4_filter_cnt--;
417
418 return 0;
419}
420
421#define I40E_IP_DUMMY_PACKET_LEN 34
422/**
423 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
424 * a specific flow spec
425 * @vsi: pointer to the targeted VSI
426 * @fd_data: the flow director data required for the FDir descriptor
427 * @add: true adds a filter, false removes it
428 *
429 * Returns 0 if the filters were successfully added or removed
430 **/
431static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
432 struct i40e_fdir_filter *fd_data,
433 bool add)
434{
435 struct i40e_pf *pf = vsi->back;
436 struct iphdr *ip;
437 u8 *raw_packet;
438 int ret;
439 int i;
440 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
441 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
442 0, 0, 0, 0};
443
444 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
445 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
446 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
447 if (!raw_packet)
448 return -ENOMEM;
449 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
450 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
451
452 ip->saddr = fd_data->src_ip;
453 ip->daddr = fd_data->dst_ip;
454 ip->protocol = 0;
455
456 if (fd_data->flex_filter) {
457 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
458 __be16 pattern = fd_data->flex_word;
459 u16 off = fd_data->flex_offset;
460
461 *((__force __be16 *)(payload + off)) = pattern;
462 }
463
464 fd_data->pctype = i;
465 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
466 if (ret) {
467 dev_info(&pf->pdev->dev,
468 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
469 fd_data->pctype, fd_data->fd_id, ret);
470 /* The packet buffer wasn't added to the ring so we
471 * need to free it now.
472 */
473 kfree(raw_packet);
474 return -EOPNOTSUPP;
475 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
476 if (add)
477 dev_info(&pf->pdev->dev,
478 "Filter OK for PCTYPE %d loc = %d\n",
479 fd_data->pctype, fd_data->fd_id);
480 else
481 dev_info(&pf->pdev->dev,
482 "Filter deleted for PCTYPE %d loc = %d\n",
483 fd_data->pctype, fd_data->fd_id);
484 }
485 }
486
487 if (add)
488 pf->fd_ip4_filter_cnt++;
489 else
490 pf->fd_ip4_filter_cnt--;
491
492 return 0;
493}
494
495/**
496 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
497 * @vsi: pointer to the targeted VSI
498 * @cmd: command to get or set RX flow classification rules
499 * @add: true adds a filter, false removes it
500 *
501 **/
502int i40e_add_del_fdir(struct i40e_vsi *vsi,
503 struct i40e_fdir_filter *input, bool add)
504{
505 struct i40e_pf *pf = vsi->back;
506 int ret;
507
508 switch (input->flow_type & ~FLOW_EXT) {
509 case TCP_V4_FLOW:
510 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
511 break;
512 case UDP_V4_FLOW:
513 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
514 break;
515 case SCTP_V4_FLOW:
516 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
517 break;
518 case IP_USER_FLOW:
519 switch (input->ip4_proto) {
520 case IPPROTO_TCP:
521 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
522 break;
523 case IPPROTO_UDP:
524 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
525 break;
526 case IPPROTO_SCTP:
527 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
528 break;
529 case IPPROTO_IP:
530 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
531 break;
532 default:
533 /* We cannot support masking based on protocol */
534 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
535 input->ip4_proto);
536 return -EINVAL;
537 }
538 break;
539 default:
540 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
541 input->flow_type);
542 return -EINVAL;
543 }
544
545 /* The buffer allocated here will be normally be freed by
546 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
547 * completion. In the event of an error adding the buffer to the FDIR
548 * ring, it will immediately be freed. It may also be freed by
549 * i40e_clean_tx_ring() when closing the VSI.
550 */
551 return ret;
552}
553
554/**
555 * i40e_fd_handle_status - check the Programming Status for FD
556 * @rx_ring: the Rx ring for this descriptor
557 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
558 * @prog_id: the id originally used for programming
559 *
560 * This is used to verify if the FD programming or invalidation
561 * requested by SW to the HW is successful or not and take actions accordingly.
562 **/
563static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
564 union i40e_rx_desc *rx_desc, u8 prog_id)
565{
566 struct i40e_pf *pf = rx_ring->vsi->back;
567 struct pci_dev *pdev = pf->pdev;
568 u32 fcnt_prog, fcnt_avail;
569 u32 error;
570 u64 qw;
571
572 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
573 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
574 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
575
576 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
577 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
578 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
579 (I40E_DEBUG_FD & pf->hw.debug_mask))
580 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
581 pf->fd_inv);
582
583 /* Check if the programming error is for ATR.
584 * If so, auto disable ATR and set a state for
585 * flush in progress. Next time we come here if flush is in
586 * progress do nothing, once flush is complete the state will
587 * be cleared.
588 */
589 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
590 return;
591
592 pf->fd_add_err++;
593 /* store the current atr filter count */
594 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
595
596 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
597 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
598 /* These set_bit() calls aren't atomic with the
599 * test_bit() here, but worse case we potentially
600 * disable ATR and queue a flush right after SB
601 * support is re-enabled. That shouldn't cause an
602 * issue in practice
603 */
604 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
605 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
606 }
607
608 /* filter programming failed most likely due to table full */
609 fcnt_prog = i40e_get_global_fd_count(pf);
610 fcnt_avail = pf->fdir_pf_filter_count;
611 /* If ATR is running fcnt_prog can quickly change,
612 * if we are very close to full, it makes sense to disable
613 * FD ATR/SB and then re-enable it when there is room.
614 */
615 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
616 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
617 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
618 pf->state))
619 if (I40E_DEBUG_FD & pf->hw.debug_mask)
620 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
621 }
622 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
623 if (I40E_DEBUG_FD & pf->hw.debug_mask)
624 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
625 rx_desc->wb.qword0.hi_dword.fd_id);
626 }
627}
628
629/**
630 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
631 * @ring: the ring that owns the buffer
632 * @tx_buffer: the buffer to free
633 **/
634static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
635 struct i40e_tx_buffer *tx_buffer)
636{
637 if (tx_buffer->skb) {
638 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
639 kfree(tx_buffer->raw_buf);
640 else if (ring_is_xdp(ring))
641 page_frag_free(tx_buffer->raw_buf);
642 else
643 dev_kfree_skb_any(tx_buffer->skb);
644 if (dma_unmap_len(tx_buffer, len))
645 dma_unmap_single(ring->dev,
646 dma_unmap_addr(tx_buffer, dma),
647 dma_unmap_len(tx_buffer, len),
648 DMA_TO_DEVICE);
649 } else if (dma_unmap_len(tx_buffer, len)) {
650 dma_unmap_page(ring->dev,
651 dma_unmap_addr(tx_buffer, dma),
652 dma_unmap_len(tx_buffer, len),
653 DMA_TO_DEVICE);
654 }
655
656 tx_buffer->next_to_watch = NULL;
657 tx_buffer->skb = NULL;
658 dma_unmap_len_set(tx_buffer, len, 0);
659 /* tx_buffer must be completely set up in the transmit path */
660}
661
662/**
663 * i40e_clean_tx_ring - Free any empty Tx buffers
664 * @tx_ring: ring to be cleaned
665 **/
666void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
667{
668 unsigned long bi_size;
669 u16 i;
670
671 /* ring already cleared, nothing to do */
672 if (!tx_ring->tx_bi)
673 return;
674
675 /* Free all the Tx ring sk_buffs */
676 for (i = 0; i < tx_ring->count; i++)
677 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
678
679 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
680 memset(tx_ring->tx_bi, 0, bi_size);
681
682 /* Zero out the descriptor ring */
683 memset(tx_ring->desc, 0, tx_ring->size);
684
685 tx_ring->next_to_use = 0;
686 tx_ring->next_to_clean = 0;
687
688 if (!tx_ring->netdev)
689 return;
690
691 /* cleanup Tx queue statistics */
692 netdev_tx_reset_queue(txring_txq(tx_ring));
693}
694
695/**
696 * i40e_free_tx_resources - Free Tx resources per queue
697 * @tx_ring: Tx descriptor ring for a specific queue
698 *
699 * Free all transmit software resources
700 **/
701void i40e_free_tx_resources(struct i40e_ring *tx_ring)
702{
703 i40e_clean_tx_ring(tx_ring);
704 kfree(tx_ring->tx_bi);
705 tx_ring->tx_bi = NULL;
706
707 if (tx_ring->desc) {
708 dma_free_coherent(tx_ring->dev, tx_ring->size,
709 tx_ring->desc, tx_ring->dma);
710 tx_ring->desc = NULL;
711 }
712}
713
714/**
715 * i40e_get_tx_pending - how many tx descriptors not processed
716 * @tx_ring: the ring of descriptors
717 * @in_sw: use SW variables
718 *
719 * Since there is no access to the ring head register
720 * in XL710, we need to use our local copies
721 **/
722u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
723{
724 u32 head, tail;
725
726 if (!in_sw) {
727 head = i40e_get_head(ring);
728 tail = readl(ring->tail);
729 } else {
730 head = ring->next_to_clean;
731 tail = ring->next_to_use;
732 }
733
734 if (head != tail)
735 return (head < tail) ?
736 tail - head : (tail + ring->count - head);
737
738 return 0;
739}
740
741/**
742 * i40e_detect_recover_hung - Function to detect and recover hung_queues
743 * @vsi: pointer to vsi struct with tx queues
744 *
745 * VSI has netdev and netdev has TX queues. This function is to check each of
746 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
747 **/
748void i40e_detect_recover_hung(struct i40e_vsi *vsi)
749{
750 struct i40e_ring *tx_ring = NULL;
751 struct net_device *netdev;
752 unsigned int i;
753 int packets;
754
755 if (!vsi)
756 return;
757
758 if (test_bit(__I40E_VSI_DOWN, vsi->state))
759 return;
760
761 netdev = vsi->netdev;
762 if (!netdev)
763 return;
764
765 if (!netif_carrier_ok(netdev))
766 return;
767
768 for (i = 0; i < vsi->num_queue_pairs; i++) {
769 tx_ring = vsi->tx_rings[i];
770 if (tx_ring && tx_ring->desc) {
771 /* If packet counter has not changed the queue is
772 * likely stalled, so force an interrupt for this
773 * queue.
774 *
775 * prev_pkt_ctr would be negative if there was no
776 * pending work.
777 */
778 packets = tx_ring->stats.packets & INT_MAX;
779 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
780 i40e_force_wb(vsi, tx_ring->q_vector);
781 continue;
782 }
783
784 /* Memory barrier between read of packet count and call
785 * to i40e_get_tx_pending()
786 */
787 smp_rmb();
788 tx_ring->tx_stats.prev_pkt_ctr =
789 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
790 }
791 }
792}
793
794#define WB_STRIDE 4
795
796/**
797 * i40e_clean_tx_irq - Reclaim resources after transmit completes
798 * @vsi: the VSI we care about
799 * @tx_ring: Tx ring to clean
800 * @napi_budget: Used to determine if we are in netpoll
801 *
802 * Returns true if there's any budget left (e.g. the clean is finished)
803 **/
804static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
805 struct i40e_ring *tx_ring, int napi_budget)
806{
807 u16 i = tx_ring->next_to_clean;
808 struct i40e_tx_buffer *tx_buf;
809 struct i40e_tx_desc *tx_head;
810 struct i40e_tx_desc *tx_desc;
811 unsigned int total_bytes = 0, total_packets = 0;
812 unsigned int budget = vsi->work_limit;
813
814 tx_buf = &tx_ring->tx_bi[i];
815 tx_desc = I40E_TX_DESC(tx_ring, i);
816 i -= tx_ring->count;
817
818 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
819
820 do {
821 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
822
823 /* if next_to_watch is not set then there is no work pending */
824 if (!eop_desc)
825 break;
826
827 /* prevent any other reads prior to eop_desc */
828 smp_rmb();
829
830 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
831 /* we have caught up to head, no work left to do */
832 if (tx_head == tx_desc)
833 break;
834
835 /* clear next_to_watch to prevent false hangs */
836 tx_buf->next_to_watch = NULL;
837
838 /* update the statistics for this packet */
839 total_bytes += tx_buf->bytecount;
840 total_packets += tx_buf->gso_segs;
841
842 /* free the skb/XDP data */
843 if (ring_is_xdp(tx_ring))
844 page_frag_free(tx_buf->raw_buf);
845 else
846 napi_consume_skb(tx_buf->skb, napi_budget);
847
848 /* unmap skb header data */
849 dma_unmap_single(tx_ring->dev,
850 dma_unmap_addr(tx_buf, dma),
851 dma_unmap_len(tx_buf, len),
852 DMA_TO_DEVICE);
853
854 /* clear tx_buffer data */
855 tx_buf->skb = NULL;
856 dma_unmap_len_set(tx_buf, len, 0);
857
858 /* unmap remaining buffers */
859 while (tx_desc != eop_desc) {
860 i40e_trace(clean_tx_irq_unmap,
861 tx_ring, tx_desc, tx_buf);
862
863 tx_buf++;
864 tx_desc++;
865 i++;
866 if (unlikely(!i)) {
867 i -= tx_ring->count;
868 tx_buf = tx_ring->tx_bi;
869 tx_desc = I40E_TX_DESC(tx_ring, 0);
870 }
871
872 /* unmap any remaining paged data */
873 if (dma_unmap_len(tx_buf, len)) {
874 dma_unmap_page(tx_ring->dev,
875 dma_unmap_addr(tx_buf, dma),
876 dma_unmap_len(tx_buf, len),
877 DMA_TO_DEVICE);
878 dma_unmap_len_set(tx_buf, len, 0);
879 }
880 }
881
882 /* move us one more past the eop_desc for start of next pkt */
883 tx_buf++;
884 tx_desc++;
885 i++;
886 if (unlikely(!i)) {
887 i -= tx_ring->count;
888 tx_buf = tx_ring->tx_bi;
889 tx_desc = I40E_TX_DESC(tx_ring, 0);
890 }
891
892 prefetch(tx_desc);
893
894 /* update budget accounting */
895 budget--;
896 } while (likely(budget));
897
898 i += tx_ring->count;
899 tx_ring->next_to_clean = i;
900 u64_stats_update_begin(&tx_ring->syncp);
901 tx_ring->stats.bytes += total_bytes;
902 tx_ring->stats.packets += total_packets;
903 u64_stats_update_end(&tx_ring->syncp);
904 tx_ring->q_vector->tx.total_bytes += total_bytes;
905 tx_ring->q_vector->tx.total_packets += total_packets;
906
907 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
908 /* check to see if there are < 4 descriptors
909 * waiting to be written back, then kick the hardware to force
910 * them to be written back in case we stay in NAPI.
911 * In this mode on X722 we do not enable Interrupt.
912 */
913 unsigned int j = i40e_get_tx_pending(tx_ring, false);
914
915 if (budget &&
916 ((j / WB_STRIDE) == 0) && (j > 0) &&
917 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
918 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
919 tx_ring->arm_wb = true;
920 }
921
922 if (ring_is_xdp(tx_ring))
923 return !!budget;
924
925 /* notify netdev of completed buffers */
926 netdev_tx_completed_queue(txring_txq(tx_ring),
927 total_packets, total_bytes);
928
929#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
930 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
931 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
932 /* Make sure that anybody stopping the queue after this
933 * sees the new next_to_clean.
934 */
935 smp_mb();
936 if (__netif_subqueue_stopped(tx_ring->netdev,
937 tx_ring->queue_index) &&
938 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
939 netif_wake_subqueue(tx_ring->netdev,
940 tx_ring->queue_index);
941 ++tx_ring->tx_stats.restart_queue;
942 }
943 }
944
945 return !!budget;
946}
947
948/**
949 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
950 * @vsi: the VSI we care about
951 * @q_vector: the vector on which to enable writeback
952 *
953 **/
954static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
955 struct i40e_q_vector *q_vector)
956{
957 u16 flags = q_vector->tx.ring[0].flags;
958 u32 val;
959
960 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
961 return;
962
963 if (q_vector->arm_wb_state)
964 return;
965
966 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
967 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
968 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
969
970 wr32(&vsi->back->hw,
971 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
972 val);
973 } else {
974 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
975 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
976
977 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
978 }
979 q_vector->arm_wb_state = true;
980}
981
982/**
983 * i40e_force_wb - Issue SW Interrupt so HW does a wb
984 * @vsi: the VSI we care about
985 * @q_vector: the vector on which to force writeback
986 *
987 **/
988void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
989{
990 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
991 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
992 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
993 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
994 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
995 /* allow 00 to be written to the index */
996
997 wr32(&vsi->back->hw,
998 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
999 } else {
1000 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1001 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1002 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1003 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1004 /* allow 00 to be written to the index */
1005
1006 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1007 }
1008}
1009
1010static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1011 struct i40e_ring_container *rc)
1012{
1013 return &q_vector->rx == rc;
1014}
1015
1016static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1017{
1018 unsigned int divisor;
1019
1020 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1021 case I40E_LINK_SPEED_40GB:
1022 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1023 break;
1024 case I40E_LINK_SPEED_25GB:
1025 case I40E_LINK_SPEED_20GB:
1026 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1027 break;
1028 default:
1029 case I40E_LINK_SPEED_10GB:
1030 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1031 break;
1032 case I40E_LINK_SPEED_1GB:
1033 case I40E_LINK_SPEED_100MB:
1034 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1035 break;
1036 }
1037
1038 return divisor;
1039}
1040
1041/**
1042 * i40e_update_itr - update the dynamic ITR value based on statistics
1043 * @q_vector: structure containing interrupt and ring information
1044 * @rc: structure containing ring performance data
1045 *
1046 * Stores a new ITR value based on packets and byte
1047 * counts during the last interrupt. The advantage of per interrupt
1048 * computation is faster updates and more accurate ITR for the current
1049 * traffic pattern. Constants in this function were computed
1050 * based on theoretical maximum wire speed and thresholds were set based
1051 * on testing data as well as attempting to minimize response time
1052 * while increasing bulk throughput.
1053 **/
1054static void i40e_update_itr(struct i40e_q_vector *q_vector,
1055 struct i40e_ring_container *rc)
1056{
1057 unsigned int avg_wire_size, packets, bytes, itr;
1058 unsigned long next_update = jiffies;
1059
1060 /* If we don't have any rings just leave ourselves set for maximum
1061 * possible latency so we take ourselves out of the equation.
1062 */
1063 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1064 return;
1065
1066 /* For Rx we want to push the delay up and default to low latency.
1067 * for Tx we want to pull the delay down and default to high latency.
1068 */
1069 itr = i40e_container_is_rx(q_vector, rc) ?
1070 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1071 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1072
1073 /* If we didn't update within up to 1 - 2 jiffies we can assume
1074 * that either packets are coming in so slow there hasn't been
1075 * any work, or that there is so much work that NAPI is dealing
1076 * with interrupt moderation and we don't need to do anything.
1077 */
1078 if (time_after(next_update, rc->next_update))
1079 goto clear_counts;
1080
1081 /* If itr_countdown is set it means we programmed an ITR within
1082 * the last 4 interrupt cycles. This has a side effect of us
1083 * potentially firing an early interrupt. In order to work around
1084 * this we need to throw out any data received for a few
1085 * interrupts following the update.
1086 */
1087 if (q_vector->itr_countdown) {
1088 itr = rc->target_itr;
1089 goto clear_counts;
1090 }
1091
1092 packets = rc->total_packets;
1093 bytes = rc->total_bytes;
1094
1095 if (i40e_container_is_rx(q_vector, rc)) {
1096 /* If Rx there are 1 to 4 packets and bytes are less than
1097 * 9000 assume insufficient data to use bulk rate limiting
1098 * approach unless Tx is already in bulk rate limiting. We
1099 * are likely latency driven.
1100 */
1101 if (packets && packets < 4 && bytes < 9000 &&
1102 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1103 itr = I40E_ITR_ADAPTIVE_LATENCY;
1104 goto adjust_by_size;
1105 }
1106 } else if (packets < 4) {
1107 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1108 * bulk mode and we are receiving 4 or fewer packets just
1109 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1110 * that the Rx can relax.
1111 */
1112 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1113 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1114 I40E_ITR_ADAPTIVE_MAX_USECS)
1115 goto clear_counts;
1116 } else if (packets > 32) {
1117 /* If we have processed over 32 packets in a single interrupt
1118 * for Tx assume we need to switch over to "bulk" mode.
1119 */
1120 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1121 }
1122
1123 /* We have no packets to actually measure against. This means
1124 * either one of the other queues on this vector is active or
1125 * we are a Tx queue doing TSO with too high of an interrupt rate.
1126 *
1127 * Between 4 and 56 we can assume that our current interrupt delay
1128 * is only slightly too low. As such we should increase it by a small
1129 * fixed amount.
1130 */
1131 if (packets < 56) {
1132 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1133 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1134 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1135 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1136 }
1137 goto clear_counts;
1138 }
1139
1140 if (packets <= 256) {
1141 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1142 itr &= I40E_ITR_MASK;
1143
1144 /* Between 56 and 112 is our "goldilocks" zone where we are
1145 * working out "just right". Just report that our current
1146 * ITR is good for us.
1147 */
1148 if (packets <= 112)
1149 goto clear_counts;
1150
1151 /* If packet count is 128 or greater we are likely looking
1152 * at a slight overrun of the delay we want. Try halving
1153 * our delay to see if that will cut the number of packets
1154 * in half per interrupt.
1155 */
1156 itr /= 2;
1157 itr &= I40E_ITR_MASK;
1158 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1159 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1160
1161 goto clear_counts;
1162 }
1163
1164 /* The paths below assume we are dealing with a bulk ITR since
1165 * number of packets is greater than 256. We are just going to have
1166 * to compute a value and try to bring the count under control,
1167 * though for smaller packet sizes there isn't much we can do as
1168 * NAPI polling will likely be kicking in sooner rather than later.
1169 */
1170 itr = I40E_ITR_ADAPTIVE_BULK;
1171
1172adjust_by_size:
1173 /* If packet counts are 256 or greater we can assume we have a gross
1174 * overestimation of what the rate should be. Instead of trying to fine
1175 * tune it just use the formula below to try and dial in an exact value
1176 * give the current packet size of the frame.
1177 */
1178 avg_wire_size = bytes / packets;
1179
1180 /* The following is a crude approximation of:
1181 * wmem_default / (size + overhead) = desired_pkts_per_int
1182 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1183 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1184 *
1185 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1186 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1187 * formula down to
1188 *
1189 * (170 * (size + 24)) / (size + 640) = ITR
1190 *
1191 * We first do some math on the packet size and then finally bitshift
1192 * by 8 after rounding up. We also have to account for PCIe link speed
1193 * difference as ITR scales based on this.
1194 */
1195 if (avg_wire_size <= 60) {
1196 /* Start at 250k ints/sec */
1197 avg_wire_size = 4096;
1198 } else if (avg_wire_size <= 380) {
1199 /* 250K ints/sec to 60K ints/sec */
1200 avg_wire_size *= 40;
1201 avg_wire_size += 1696;
1202 } else if (avg_wire_size <= 1084) {
1203 /* 60K ints/sec to 36K ints/sec */
1204 avg_wire_size *= 15;
1205 avg_wire_size += 11452;
1206 } else if (avg_wire_size <= 1980) {
1207 /* 36K ints/sec to 30K ints/sec */
1208 avg_wire_size *= 5;
1209 avg_wire_size += 22420;
1210 } else {
1211 /* plateau at a limit of 30K ints/sec */
1212 avg_wire_size = 32256;
1213 }
1214
1215 /* If we are in low latency mode halve our delay which doubles the
1216 * rate to somewhere between 100K to 16K ints/sec
1217 */
1218 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1219 avg_wire_size /= 2;
1220
1221 /* Resultant value is 256 times larger than it needs to be. This
1222 * gives us room to adjust the value as needed to either increase
1223 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1224 *
1225 * Use addition as we have already recorded the new latency flag
1226 * for the ITR value.
1227 */
1228 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1229 I40E_ITR_ADAPTIVE_MIN_INC;
1230
1231 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1232 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1233 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1234 }
1235
1236clear_counts:
1237 /* write back value */
1238 rc->target_itr = itr;
1239
1240 /* next update should occur within next jiffy */
1241 rc->next_update = next_update + 1;
1242
1243 rc->total_bytes = 0;
1244 rc->total_packets = 0;
1245}
1246
1247/**
1248 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1249 * @rx_ring: rx descriptor ring to store buffers on
1250 * @old_buff: donor buffer to have page reused
1251 *
1252 * Synchronizes page for reuse by the adapter
1253 **/
1254static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1255 struct i40e_rx_buffer *old_buff)
1256{
1257 struct i40e_rx_buffer *new_buff;
1258 u16 nta = rx_ring->next_to_alloc;
1259
1260 new_buff = &rx_ring->rx_bi[nta];
1261
1262 /* update, and store next to alloc */
1263 nta++;
1264 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1265
1266 /* transfer page from old buffer to new buffer */
1267 new_buff->dma = old_buff->dma;
1268 new_buff->page = old_buff->page;
1269 new_buff->page_offset = old_buff->page_offset;
1270 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1271}
1272
1273/**
1274 * i40e_rx_is_programming_status - check for programming status descriptor
1275 * @qw: qword representing status_error_len in CPU ordering
1276 *
1277 * The value of in the descriptor length field indicate if this
1278 * is a programming status descriptor for flow director or FCoE
1279 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1280 * it is a packet descriptor.
1281 **/
1282static inline bool i40e_rx_is_programming_status(u64 qw)
1283{
1284 /* The Rx filter programming status and SPH bit occupy the same
1285 * spot in the descriptor. Since we don't support packet split we
1286 * can just reuse the bit as an indication that this is a
1287 * programming status descriptor.
1288 */
1289 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1290}
1291
1292/**
1293 * i40e_clean_programming_status - clean the programming status descriptor
1294 * @rx_ring: the rx ring that has this descriptor
1295 * @rx_desc: the rx descriptor written back by HW
1296 * @qw: qword representing status_error_len in CPU ordering
1297 *
1298 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1299 * status being successful or not and take actions accordingly. FCoE should
1300 * handle its context/filter programming/invalidation status and take actions.
1301 *
1302 **/
1303static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1304 union i40e_rx_desc *rx_desc,
1305 u64 qw)
1306{
1307 struct i40e_rx_buffer *rx_buffer;
1308 u32 ntc = rx_ring->next_to_clean;
1309 u8 id;
1310
1311 /* fetch, update, and store next to clean */
1312 rx_buffer = &rx_ring->rx_bi[ntc++];
1313 ntc = (ntc < rx_ring->count) ? ntc : 0;
1314 rx_ring->next_to_clean = ntc;
1315
1316 prefetch(I40E_RX_DESC(rx_ring, ntc));
1317
1318 /* place unused page back on the ring */
1319 i40e_reuse_rx_page(rx_ring, rx_buffer);
1320 rx_ring->rx_stats.page_reuse_count++;
1321
1322 /* clear contents of buffer_info */
1323 rx_buffer->page = NULL;
1324
1325 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1326 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1327
1328 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1329 i40e_fd_handle_status(rx_ring, rx_desc, id);
1330}
1331
1332/**
1333 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1334 * @tx_ring: the tx ring to set up
1335 *
1336 * Return 0 on success, negative on error
1337 **/
1338int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1339{
1340 struct device *dev = tx_ring->dev;
1341 int bi_size;
1342
1343 if (!dev)
1344 return -ENOMEM;
1345
1346 /* warn if we are about to overwrite the pointer */
1347 WARN_ON(tx_ring->tx_bi);
1348 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1349 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1350 if (!tx_ring->tx_bi)
1351 goto err;
1352
1353 u64_stats_init(&tx_ring->syncp);
1354
1355 /* round up to nearest 4K */
1356 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1357 /* add u32 for head writeback, align after this takes care of
1358 * guaranteeing this is at least one cache line in size
1359 */
1360 tx_ring->size += sizeof(u32);
1361 tx_ring->size = ALIGN(tx_ring->size, 4096);
1362 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1363 &tx_ring->dma, GFP_KERNEL);
1364 if (!tx_ring->desc) {
1365 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1366 tx_ring->size);
1367 goto err;
1368 }
1369
1370 tx_ring->next_to_use = 0;
1371 tx_ring->next_to_clean = 0;
1372 tx_ring->tx_stats.prev_pkt_ctr = -1;
1373 return 0;
1374
1375err:
1376 kfree(tx_ring->tx_bi);
1377 tx_ring->tx_bi = NULL;
1378 return -ENOMEM;
1379}
1380
1381/**
1382 * i40e_clean_rx_ring - Free Rx buffers
1383 * @rx_ring: ring to be cleaned
1384 **/
1385void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1386{
1387 unsigned long bi_size;
1388 u16 i;
1389
1390 /* ring already cleared, nothing to do */
1391 if (!rx_ring->rx_bi)
1392 return;
1393
1394 if (rx_ring->skb) {
1395 dev_kfree_skb(rx_ring->skb);
1396 rx_ring->skb = NULL;
1397 }
1398
1399 /* Free all the Rx ring sk_buffs */
1400 for (i = 0; i < rx_ring->count; i++) {
1401 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1402
1403 if (!rx_bi->page)
1404 continue;
1405
1406 /* Invalidate cache lines that may have been written to by
1407 * device so that we avoid corrupting memory.
1408 */
1409 dma_sync_single_range_for_cpu(rx_ring->dev,
1410 rx_bi->dma,
1411 rx_bi->page_offset,
1412 rx_ring->rx_buf_len,
1413 DMA_FROM_DEVICE);
1414
1415 /* free resources associated with mapping */
1416 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1417 i40e_rx_pg_size(rx_ring),
1418 DMA_FROM_DEVICE,
1419 I40E_RX_DMA_ATTR);
1420
1421 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1422
1423 rx_bi->page = NULL;
1424 rx_bi->page_offset = 0;
1425 }
1426
1427 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1428 memset(rx_ring->rx_bi, 0, bi_size);
1429
1430 /* Zero out the descriptor ring */
1431 memset(rx_ring->desc, 0, rx_ring->size);
1432
1433 rx_ring->next_to_alloc = 0;
1434 rx_ring->next_to_clean = 0;
1435 rx_ring->next_to_use = 0;
1436}
1437
1438/**
1439 * i40e_free_rx_resources - Free Rx resources
1440 * @rx_ring: ring to clean the resources from
1441 *
1442 * Free all receive software resources
1443 **/
1444void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1445{
1446 i40e_clean_rx_ring(rx_ring);
1447 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1448 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1449 rx_ring->xdp_prog = NULL;
1450 kfree(rx_ring->rx_bi);
1451 rx_ring->rx_bi = NULL;
1452
1453 if (rx_ring->desc) {
1454 dma_free_coherent(rx_ring->dev, rx_ring->size,
1455 rx_ring->desc, rx_ring->dma);
1456 rx_ring->desc = NULL;
1457 }
1458}
1459
1460/**
1461 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1462 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1463 *
1464 * Returns 0 on success, negative on failure
1465 **/
1466int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1467{
1468 struct device *dev = rx_ring->dev;
1469 int err = -ENOMEM;
1470 int bi_size;
1471
1472 /* warn if we are about to overwrite the pointer */
1473 WARN_ON(rx_ring->rx_bi);
1474 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1475 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1476 if (!rx_ring->rx_bi)
1477 goto err;
1478
1479 u64_stats_init(&rx_ring->syncp);
1480
1481 /* Round up to nearest 4K */
1482 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1483 rx_ring->size = ALIGN(rx_ring->size, 4096);
1484 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1485 &rx_ring->dma, GFP_KERNEL);
1486
1487 if (!rx_ring->desc) {
1488 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1489 rx_ring->size);
1490 goto err;
1491 }
1492
1493 rx_ring->next_to_alloc = 0;
1494 rx_ring->next_to_clean = 0;
1495 rx_ring->next_to_use = 0;
1496
1497 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1498 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1499 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1500 rx_ring->queue_index);
1501 if (err < 0)
1502 goto err;
1503 }
1504
1505 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1506
1507 return 0;
1508err:
1509 kfree(rx_ring->rx_bi);
1510 rx_ring->rx_bi = NULL;
1511 return err;
1512}
1513
1514/**
1515 * i40e_release_rx_desc - Store the new tail and head values
1516 * @rx_ring: ring to bump
1517 * @val: new head index
1518 **/
1519static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1520{
1521 rx_ring->next_to_use = val;
1522
1523 /* update next to alloc since we have filled the ring */
1524 rx_ring->next_to_alloc = val;
1525
1526 /* Force memory writes to complete before letting h/w
1527 * know there are new descriptors to fetch. (Only
1528 * applicable for weak-ordered memory model archs,
1529 * such as IA-64).
1530 */
1531 wmb();
1532 writel(val, rx_ring->tail);
1533}
1534
1535/**
1536 * i40e_rx_offset - Return expected offset into page to access data
1537 * @rx_ring: Ring we are requesting offset of
1538 *
1539 * Returns the offset value for ring into the data buffer.
1540 */
1541static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1542{
1543 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1544}
1545
1546/**
1547 * i40e_alloc_mapped_page - recycle or make a new page
1548 * @rx_ring: ring to use
1549 * @bi: rx_buffer struct to modify
1550 *
1551 * Returns true if the page was successfully allocated or
1552 * reused.
1553 **/
1554static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1555 struct i40e_rx_buffer *bi)
1556{
1557 struct page *page = bi->page;
1558 dma_addr_t dma;
1559
1560 /* since we are recycling buffers we should seldom need to alloc */
1561 if (likely(page)) {
1562 rx_ring->rx_stats.page_reuse_count++;
1563 return true;
1564 }
1565
1566 /* alloc new page for storage */
1567 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1568 if (unlikely(!page)) {
1569 rx_ring->rx_stats.alloc_page_failed++;
1570 return false;
1571 }
1572
1573 /* map page for use */
1574 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1575 i40e_rx_pg_size(rx_ring),
1576 DMA_FROM_DEVICE,
1577 I40E_RX_DMA_ATTR);
1578
1579 /* if mapping failed free memory back to system since
1580 * there isn't much point in holding memory we can't use
1581 */
1582 if (dma_mapping_error(rx_ring->dev, dma)) {
1583 __free_pages(page, i40e_rx_pg_order(rx_ring));
1584 rx_ring->rx_stats.alloc_page_failed++;
1585 return false;
1586 }
1587
1588 bi->dma = dma;
1589 bi->page = page;
1590 bi->page_offset = i40e_rx_offset(rx_ring);
1591 page_ref_add(page, USHRT_MAX - 1);
1592 bi->pagecnt_bias = USHRT_MAX;
1593
1594 return true;
1595}
1596
1597/**
1598 * i40e_receive_skb - Send a completed packet up the stack
1599 * @rx_ring: rx ring in play
1600 * @skb: packet to send up
1601 * @vlan_tag: vlan tag for packet
1602 **/
1603static void i40e_receive_skb(struct i40e_ring *rx_ring,
1604 struct sk_buff *skb, u16 vlan_tag)
1605{
1606 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1607
1608 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1609 (vlan_tag & VLAN_VID_MASK))
1610 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1611
1612 napi_gro_receive(&q_vector->napi, skb);
1613}
1614
1615/**
1616 * i40e_alloc_rx_buffers - Replace used receive buffers
1617 * @rx_ring: ring to place buffers on
1618 * @cleaned_count: number of buffers to replace
1619 *
1620 * Returns false if all allocations were successful, true if any fail
1621 **/
1622bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1623{
1624 u16 ntu = rx_ring->next_to_use;
1625 union i40e_rx_desc *rx_desc;
1626 struct i40e_rx_buffer *bi;
1627
1628 /* do nothing if no valid netdev defined */
1629 if (!rx_ring->netdev || !cleaned_count)
1630 return false;
1631
1632 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1633 bi = &rx_ring->rx_bi[ntu];
1634
1635 do {
1636 if (!i40e_alloc_mapped_page(rx_ring, bi))
1637 goto no_buffers;
1638
1639 /* sync the buffer for use by the device */
1640 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1641 bi->page_offset,
1642 rx_ring->rx_buf_len,
1643 DMA_FROM_DEVICE);
1644
1645 /* Refresh the desc even if buffer_addrs didn't change
1646 * because each write-back erases this info.
1647 */
1648 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1649
1650 rx_desc++;
1651 bi++;
1652 ntu++;
1653 if (unlikely(ntu == rx_ring->count)) {
1654 rx_desc = I40E_RX_DESC(rx_ring, 0);
1655 bi = rx_ring->rx_bi;
1656 ntu = 0;
1657 }
1658
1659 /* clear the status bits for the next_to_use descriptor */
1660 rx_desc->wb.qword1.status_error_len = 0;
1661
1662 cleaned_count--;
1663 } while (cleaned_count);
1664
1665 if (rx_ring->next_to_use != ntu)
1666 i40e_release_rx_desc(rx_ring, ntu);
1667
1668 return false;
1669
1670no_buffers:
1671 if (rx_ring->next_to_use != ntu)
1672 i40e_release_rx_desc(rx_ring, ntu);
1673
1674 /* make sure to come back via polling to try again after
1675 * allocation failure
1676 */
1677 return true;
1678}
1679
1680/**
1681 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1682 * @vsi: the VSI we care about
1683 * @skb: skb currently being received and modified
1684 * @rx_desc: the receive descriptor
1685 **/
1686static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1687 struct sk_buff *skb,
1688 union i40e_rx_desc *rx_desc)
1689{
1690 struct i40e_rx_ptype_decoded decoded;
1691 u32 rx_error, rx_status;
1692 bool ipv4, ipv6;
1693 u8 ptype;
1694 u64 qword;
1695
1696 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1697 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1698 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1699 I40E_RXD_QW1_ERROR_SHIFT;
1700 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1701 I40E_RXD_QW1_STATUS_SHIFT;
1702 decoded = decode_rx_desc_ptype(ptype);
1703
1704 skb->ip_summed = CHECKSUM_NONE;
1705
1706 skb_checksum_none_assert(skb);
1707
1708 /* Rx csum enabled and ip headers found? */
1709 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1710 return;
1711
1712 /* did the hardware decode the packet and checksum? */
1713 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1714 return;
1715
1716 /* both known and outer_ip must be set for the below code to work */
1717 if (!(decoded.known && decoded.outer_ip))
1718 return;
1719
1720 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1721 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1722 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1723 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1724
1725 if (ipv4 &&
1726 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1727 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1728 goto checksum_fail;
1729
1730 /* likely incorrect csum if alternate IP extension headers found */
1731 if (ipv6 &&
1732 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1733 /* don't increment checksum err here, non-fatal err */
1734 return;
1735
1736 /* there was some L4 error, count error and punt packet to the stack */
1737 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1738 goto checksum_fail;
1739
1740 /* handle packets that were not able to be checksummed due
1741 * to arrival speed, in this case the stack can compute
1742 * the csum.
1743 */
1744 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1745 return;
1746
1747 /* If there is an outer header present that might contain a checksum
1748 * we need to bump the checksum level by 1 to reflect the fact that
1749 * we are indicating we validated the inner checksum.
1750 */
1751 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1752 skb->csum_level = 1;
1753
1754 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1755 switch (decoded.inner_prot) {
1756 case I40E_RX_PTYPE_INNER_PROT_TCP:
1757 case I40E_RX_PTYPE_INNER_PROT_UDP:
1758 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1759 skb->ip_summed = CHECKSUM_UNNECESSARY;
1760 /* fall though */
1761 default:
1762 break;
1763 }
1764
1765 return;
1766
1767checksum_fail:
1768 vsi->back->hw_csum_rx_error++;
1769}
1770
1771/**
1772 * i40e_ptype_to_htype - get a hash type
1773 * @ptype: the ptype value from the descriptor
1774 *
1775 * Returns a hash type to be used by skb_set_hash
1776 **/
1777static inline int i40e_ptype_to_htype(u8 ptype)
1778{
1779 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1780
1781 if (!decoded.known)
1782 return PKT_HASH_TYPE_NONE;
1783
1784 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1785 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1786 return PKT_HASH_TYPE_L4;
1787 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1788 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1789 return PKT_HASH_TYPE_L3;
1790 else
1791 return PKT_HASH_TYPE_L2;
1792}
1793
1794/**
1795 * i40e_rx_hash - set the hash value in the skb
1796 * @ring: descriptor ring
1797 * @rx_desc: specific descriptor
1798 **/
1799static inline void i40e_rx_hash(struct i40e_ring *ring,
1800 union i40e_rx_desc *rx_desc,
1801 struct sk_buff *skb,
1802 u8 rx_ptype)
1803{
1804 u32 hash;
1805 const __le64 rss_mask =
1806 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1807 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1808
1809 if (!(ring->netdev->features & NETIF_F_RXHASH))
1810 return;
1811
1812 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1813 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1814 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1815 }
1816}
1817
1818/**
1819 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1820 * @rx_ring: rx descriptor ring packet is being transacted on
1821 * @rx_desc: pointer to the EOP Rx descriptor
1822 * @skb: pointer to current skb being populated
1823 * @rx_ptype: the packet type decoded by hardware
1824 *
1825 * This function checks the ring, descriptor, and packet information in
1826 * order to populate the hash, checksum, VLAN, protocol, and
1827 * other fields within the skb.
1828 **/
1829static inline
1830void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1831 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1832 u8 rx_ptype)
1833{
1834 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1835 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1836 I40E_RXD_QW1_STATUS_SHIFT;
1837 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1838 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1839 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1840
1841 if (unlikely(tsynvalid))
1842 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1843
1844 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1845
1846 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1847
1848 skb_record_rx_queue(skb, rx_ring->queue_index);
1849
1850 /* modifies the skb - consumes the enet header */
1851 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1852}
1853
1854/**
1855 * i40e_cleanup_headers - Correct empty headers
1856 * @rx_ring: rx descriptor ring packet is being transacted on
1857 * @skb: pointer to current skb being fixed
1858 * @rx_desc: pointer to the EOP Rx descriptor
1859 *
1860 * Also address the case where we are pulling data in on pages only
1861 * and as such no data is present in the skb header.
1862 *
1863 * In addition if skb is not at least 60 bytes we need to pad it so that
1864 * it is large enough to qualify as a valid Ethernet frame.
1865 *
1866 * Returns true if an error was encountered and skb was freed.
1867 **/
1868static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1869 union i40e_rx_desc *rx_desc)
1870
1871{
1872 /* XDP packets use error pointer so abort at this point */
1873 if (IS_ERR(skb))
1874 return true;
1875
1876 /* ERR_MASK will only have valid bits if EOP set, and
1877 * what we are doing here is actually checking
1878 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1879 * the error field
1880 */
1881 if (unlikely(i40e_test_staterr(rx_desc,
1882 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1883 dev_kfree_skb_any(skb);
1884 return true;
1885 }
1886
1887 /* if eth_skb_pad returns an error the skb was freed */
1888 if (eth_skb_pad(skb))
1889 return true;
1890
1891 return false;
1892}
1893
1894/**
1895 * i40e_page_is_reusable - check if any reuse is possible
1896 * @page: page struct to check
1897 *
1898 * A page is not reusable if it was allocated under low memory
1899 * conditions, or it's not in the same NUMA node as this CPU.
1900 */
1901static inline bool i40e_page_is_reusable(struct page *page)
1902{
1903 return (page_to_nid(page) == numa_mem_id()) &&
1904 !page_is_pfmemalloc(page);
1905}
1906
1907/**
1908 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1909 * the adapter for another receive
1910 *
1911 * @rx_buffer: buffer containing the page
1912 *
1913 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1914 * an unused region in the page.
1915 *
1916 * For small pages, @truesize will be a constant value, half the size
1917 * of the memory at page. We'll attempt to alternate between high and
1918 * low halves of the page, with one half ready for use by the hardware
1919 * and the other half being consumed by the stack. We use the page
1920 * ref count to determine whether the stack has finished consuming the
1921 * portion of this page that was passed up with a previous packet. If
1922 * the page ref count is >1, we'll assume the "other" half page is
1923 * still busy, and this page cannot be reused.
1924 *
1925 * For larger pages, @truesize will be the actual space used by the
1926 * received packet (adjusted upward to an even multiple of the cache
1927 * line size). This will advance through the page by the amount
1928 * actually consumed by the received packets while there is still
1929 * space for a buffer. Each region of larger pages will be used at
1930 * most once, after which the page will not be reused.
1931 *
1932 * In either case, if the page is reusable its refcount is increased.
1933 **/
1934static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1935{
1936 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1937 struct page *page = rx_buffer->page;
1938
1939 /* Is any reuse possible? */
1940 if (unlikely(!i40e_page_is_reusable(page)))
1941 return false;
1942
1943#if (PAGE_SIZE < 8192)
1944 /* if we are only owner of page we can reuse it */
1945 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1946 return false;
1947#else
1948#define I40E_LAST_OFFSET \
1949 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1950 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1951 return false;
1952#endif
1953
1954 /* If we have drained the page fragment pool we need to update
1955 * the pagecnt_bias and page count so that we fully restock the
1956 * number of references the driver holds.
1957 */
1958 if (unlikely(pagecnt_bias == 1)) {
1959 page_ref_add(page, USHRT_MAX - 1);
1960 rx_buffer->pagecnt_bias = USHRT_MAX;
1961 }
1962
1963 return true;
1964}
1965
1966/**
1967 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1968 * @rx_ring: rx descriptor ring to transact packets on
1969 * @rx_buffer: buffer containing page to add
1970 * @skb: sk_buff to place the data into
1971 * @size: packet length from rx_desc
1972 *
1973 * This function will add the data contained in rx_buffer->page to the skb.
1974 * It will just attach the page as a frag to the skb.
1975 *
1976 * The function will then update the page offset.
1977 **/
1978static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1979 struct i40e_rx_buffer *rx_buffer,
1980 struct sk_buff *skb,
1981 unsigned int size)
1982{
1983#if (PAGE_SIZE < 8192)
1984 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1985#else
1986 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1987#endif
1988
1989 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1990 rx_buffer->page_offset, size, truesize);
1991
1992 /* page is being used so we must update the page offset */
1993#if (PAGE_SIZE < 8192)
1994 rx_buffer->page_offset ^= truesize;
1995#else
1996 rx_buffer->page_offset += truesize;
1997#endif
1998}
1999
2000/**
2001 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2002 * @rx_ring: rx descriptor ring to transact packets on
2003 * @size: size of buffer to add to skb
2004 *
2005 * This function will pull an Rx buffer from the ring and synchronize it
2006 * for use by the CPU.
2007 */
2008static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2009 const unsigned int size)
2010{
2011 struct i40e_rx_buffer *rx_buffer;
2012
2013 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
2014 prefetchw(rx_buffer->page);
2015
2016 /* we are reusing so sync this buffer for CPU use */
2017 dma_sync_single_range_for_cpu(rx_ring->dev,
2018 rx_buffer->dma,
2019 rx_buffer->page_offset,
2020 size,
2021 DMA_FROM_DEVICE);
2022
2023 /* We have pulled a buffer for use, so decrement pagecnt_bias */
2024 rx_buffer->pagecnt_bias--;
2025
2026 return rx_buffer;
2027}
2028
2029/**
2030 * i40e_construct_skb - Allocate skb and populate it
2031 * @rx_ring: rx descriptor ring to transact packets on
2032 * @rx_buffer: rx buffer to pull data from
2033 * @xdp: xdp_buff pointing to the data
2034 *
2035 * This function allocates an skb. It then populates it with the page
2036 * data from the current receive descriptor, taking care to set up the
2037 * skb correctly.
2038 */
2039static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2040 struct i40e_rx_buffer *rx_buffer,
2041 struct xdp_buff *xdp)
2042{
2043 unsigned int size = xdp->data_end - xdp->data;
2044#if (PAGE_SIZE < 8192)
2045 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2046#else
2047 unsigned int truesize = SKB_DATA_ALIGN(size);
2048#endif
2049 unsigned int headlen;
2050 struct sk_buff *skb;
2051
2052 /* prefetch first cache line of first page */
2053 prefetch(xdp->data);
2054#if L1_CACHE_BYTES < 128
2055 prefetch(xdp->data + L1_CACHE_BYTES);
2056#endif
2057
2058 /* allocate a skb to store the frags */
2059 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2060 I40E_RX_HDR_SIZE,
2061 GFP_ATOMIC | __GFP_NOWARN);
2062 if (unlikely(!skb))
2063 return NULL;
2064
2065 /* Determine available headroom for copy */
2066 headlen = size;
2067 if (headlen > I40E_RX_HDR_SIZE)
2068 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2069
2070 /* align pull length to size of long to optimize memcpy performance */
2071 memcpy(__skb_put(skb, headlen), xdp->data,
2072 ALIGN(headlen, sizeof(long)));
2073
2074 /* update all of the pointers */
2075 size -= headlen;
2076 if (size) {
2077 skb_add_rx_frag(skb, 0, rx_buffer->page,
2078 rx_buffer->page_offset + headlen,
2079 size, truesize);
2080
2081 /* buffer is used by skb, update page_offset */
2082#if (PAGE_SIZE < 8192)
2083 rx_buffer->page_offset ^= truesize;
2084#else
2085 rx_buffer->page_offset += truesize;
2086#endif
2087 } else {
2088 /* buffer is unused, reset bias back to rx_buffer */
2089 rx_buffer->pagecnt_bias++;
2090 }
2091
2092 return skb;
2093}
2094
2095/**
2096 * i40e_build_skb - Build skb around an existing buffer
2097 * @rx_ring: Rx descriptor ring to transact packets on
2098 * @rx_buffer: Rx buffer to pull data from
2099 * @xdp: xdp_buff pointing to the data
2100 *
2101 * This function builds an skb around an existing Rx buffer, taking care
2102 * to set up the skb correctly and avoid any memcpy overhead.
2103 */
2104static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2105 struct i40e_rx_buffer *rx_buffer,
2106 struct xdp_buff *xdp)
2107{
2108 unsigned int size = xdp->data_end - xdp->data;
2109#if (PAGE_SIZE < 8192)
2110 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2111#else
2112 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2113 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
2114#endif
2115 struct sk_buff *skb;
2116
2117 /* prefetch first cache line of first page */
2118 prefetch(xdp->data);
2119#if L1_CACHE_BYTES < 128
2120 prefetch(xdp->data + L1_CACHE_BYTES);
2121#endif
2122 /* build an skb around the page buffer */
2123 skb = build_skb(xdp->data_hard_start, truesize);
2124 if (unlikely(!skb))
2125 return NULL;
2126
2127 /* update pointers within the skb to store the data */
2128 skb_reserve(skb, I40E_SKB_PAD);
2129 __skb_put(skb, size);
2130
2131 /* buffer is used by skb, update page_offset */
2132#if (PAGE_SIZE < 8192)
2133 rx_buffer->page_offset ^= truesize;
2134#else
2135 rx_buffer->page_offset += truesize;
2136#endif
2137
2138 return skb;
2139}
2140
2141/**
2142 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2143 * @rx_ring: rx descriptor ring to transact packets on
2144 * @rx_buffer: rx buffer to pull data from
2145 *
2146 * This function will clean up the contents of the rx_buffer. It will
2147 * either recycle the buffer or unmap it and free the associated resources.
2148 */
2149static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2150 struct i40e_rx_buffer *rx_buffer)
2151{
2152 if (i40e_can_reuse_rx_page(rx_buffer)) {
2153 /* hand second half of page back to the ring */
2154 i40e_reuse_rx_page(rx_ring, rx_buffer);
2155 rx_ring->rx_stats.page_reuse_count++;
2156 } else {
2157 /* we are not reusing the buffer so unmap it */
2158 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2159 i40e_rx_pg_size(rx_ring),
2160 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2161 __page_frag_cache_drain(rx_buffer->page,
2162 rx_buffer->pagecnt_bias);
2163 }
2164
2165 /* clear contents of buffer_info */
2166 rx_buffer->page = NULL;
2167}
2168
2169/**
2170 * i40e_is_non_eop - process handling of non-EOP buffers
2171 * @rx_ring: Rx ring being processed
2172 * @rx_desc: Rx descriptor for current buffer
2173 * @skb: Current socket buffer containing buffer in progress
2174 *
2175 * This function updates next to clean. If the buffer is an EOP buffer
2176 * this function exits returning false, otherwise it will place the
2177 * sk_buff in the next buffer to be chained and return true indicating
2178 * that this is in fact a non-EOP buffer.
2179 **/
2180static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2181 union i40e_rx_desc *rx_desc,
2182 struct sk_buff *skb)
2183{
2184 u32 ntc = rx_ring->next_to_clean + 1;
2185
2186 /* fetch, update, and store next to clean */
2187 ntc = (ntc < rx_ring->count) ? ntc : 0;
2188 rx_ring->next_to_clean = ntc;
2189
2190 prefetch(I40E_RX_DESC(rx_ring, ntc));
2191
2192 /* if we are the last buffer then there is nothing else to do */
2193#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2194 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2195 return false;
2196
2197 rx_ring->rx_stats.non_eop_descs++;
2198
2199 return true;
2200}
2201
2202#define I40E_XDP_PASS 0
2203#define I40E_XDP_CONSUMED 1
2204#define I40E_XDP_TX 2
2205
2206static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
2207 struct i40e_ring *xdp_ring);
2208
2209/**
2210 * i40e_run_xdp - run an XDP program
2211 * @rx_ring: Rx ring being processed
2212 * @xdp: XDP buffer containing the frame
2213 **/
2214static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2215 struct xdp_buff *xdp)
2216{
2217 int err, result = I40E_XDP_PASS;
2218 struct i40e_ring *xdp_ring;
2219 struct bpf_prog *xdp_prog;
2220 u32 act;
2221
2222 rcu_read_lock();
2223 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2224
2225 if (!xdp_prog)
2226 goto xdp_out;
2227
2228 act = bpf_prog_run_xdp(xdp_prog, xdp);
2229 switch (act) {
2230 case XDP_PASS:
2231 break;
2232 case XDP_TX:
2233 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2234 result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2235 break;
2236 case XDP_REDIRECT:
2237 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2238 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
2239 break;
2240 default:
2241 bpf_warn_invalid_xdp_action(act);
2242 case XDP_ABORTED:
2243 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2244 /* fallthrough -- handle aborts by dropping packet */
2245 case XDP_DROP:
2246 result = I40E_XDP_CONSUMED;
2247 break;
2248 }
2249xdp_out:
2250 rcu_read_unlock();
2251 return ERR_PTR(-result);
2252}
2253
2254/**
2255 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2256 * @rx_ring: Rx ring
2257 * @rx_buffer: Rx buffer to adjust
2258 * @size: Size of adjustment
2259 **/
2260static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2261 struct i40e_rx_buffer *rx_buffer,
2262 unsigned int size)
2263{
2264#if (PAGE_SIZE < 8192)
2265 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2266
2267 rx_buffer->page_offset ^= truesize;
2268#else
2269 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2270
2271 rx_buffer->page_offset += truesize;
2272#endif
2273}
2274
2275static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2276{
2277 /* Force memory writes to complete before letting h/w
2278 * know there are new descriptors to fetch.
2279 */
2280 wmb();
2281 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2282}
2283
2284/**
2285 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2286 * @rx_ring: rx descriptor ring to transact packets on
2287 * @budget: Total limit on number of packets to process
2288 *
2289 * This function provides a "bounce buffer" approach to Rx interrupt
2290 * processing. The advantage to this is that on systems that have
2291 * expensive overhead for IOMMU access this provides a means of avoiding
2292 * it by maintaining the mapping of the page to the system.
2293 *
2294 * Returns amount of work completed
2295 **/
2296static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2297{
2298 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2299 struct sk_buff *skb = rx_ring->skb;
2300 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2301 bool failure = false, xdp_xmit = false;
2302 struct xdp_buff xdp;
2303
2304 xdp.rxq = &rx_ring->xdp_rxq;
2305
2306 while (likely(total_rx_packets < (unsigned int)budget)) {
2307 struct i40e_rx_buffer *rx_buffer;
2308 union i40e_rx_desc *rx_desc;
2309 unsigned int size;
2310 u16 vlan_tag;
2311 u8 rx_ptype;
2312 u64 qword;
2313
2314 /* return some buffers to hardware, one at a time is too slow */
2315 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2316 failure = failure ||
2317 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2318 cleaned_count = 0;
2319 }
2320
2321 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2322
2323 /* status_error_len will always be zero for unused descriptors
2324 * because it's cleared in cleanup, and overlaps with hdr_addr
2325 * which is always zero because packet split isn't used, if the
2326 * hardware wrote DD then the length will be non-zero
2327 */
2328 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2329
2330 /* This memory barrier is needed to keep us from reading
2331 * any other fields out of the rx_desc until we have
2332 * verified the descriptor has been written back.
2333 */
2334 dma_rmb();
2335
2336 if (unlikely(i40e_rx_is_programming_status(qword))) {
2337 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2338 cleaned_count++;
2339 continue;
2340 }
2341 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2342 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2343 if (!size)
2344 break;
2345
2346 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2347 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2348
2349 /* retrieve a buffer from the ring */
2350 if (!skb) {
2351 xdp.data = page_address(rx_buffer->page) +
2352 rx_buffer->page_offset;
2353 xdp_set_data_meta_invalid(&xdp);
2354 xdp.data_hard_start = xdp.data -
2355 i40e_rx_offset(rx_ring);
2356 xdp.data_end = xdp.data + size;
2357
2358 skb = i40e_run_xdp(rx_ring, &xdp);
2359 }
2360
2361 if (IS_ERR(skb)) {
2362 if (PTR_ERR(skb) == -I40E_XDP_TX) {
2363 xdp_xmit = true;
2364 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2365 } else {
2366 rx_buffer->pagecnt_bias++;
2367 }
2368 total_rx_bytes += size;
2369 total_rx_packets++;
2370 } else if (skb) {
2371 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2372 } else if (ring_uses_build_skb(rx_ring)) {
2373 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2374 } else {
2375 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2376 }
2377
2378 /* exit if we failed to retrieve a buffer */
2379 if (!skb) {
2380 rx_ring->rx_stats.alloc_buff_failed++;
2381 rx_buffer->pagecnt_bias++;
2382 break;
2383 }
2384
2385 i40e_put_rx_buffer(rx_ring, rx_buffer);
2386 cleaned_count++;
2387
2388 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2389 continue;
2390
2391 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2392 skb = NULL;
2393 continue;
2394 }
2395
2396 /* probably a little skewed due to removing CRC */
2397 total_rx_bytes += skb->len;
2398
2399 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2400 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2401 I40E_RXD_QW1_PTYPE_SHIFT;
2402
2403 /* populate checksum, VLAN, and protocol */
2404 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2405
2406 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2407 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2408
2409 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2410 i40e_receive_skb(rx_ring, skb, vlan_tag);
2411 skb = NULL;
2412
2413 /* update budget accounting */
2414 total_rx_packets++;
2415 }
2416
2417 if (xdp_xmit) {
2418 struct i40e_ring *xdp_ring =
2419 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2420
2421 i40e_xdp_ring_update_tail(xdp_ring);
2422 xdp_do_flush_map();
2423 }
2424
2425 rx_ring->skb = skb;
2426
2427 u64_stats_update_begin(&rx_ring->syncp);
2428 rx_ring->stats.packets += total_rx_packets;
2429 rx_ring->stats.bytes += total_rx_bytes;
2430 u64_stats_update_end(&rx_ring->syncp);
2431 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2432 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2433
2434 /* guarantee a trip back through this routine if there was a failure */
2435 return failure ? budget : (int)total_rx_packets;
2436}
2437
2438static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2439{
2440 u32 val;
2441
2442 /* We don't bother with setting the CLEARPBA bit as the data sheet
2443 * points out doing so is "meaningless since it was already
2444 * auto-cleared". The auto-clearing happens when the interrupt is
2445 * asserted.
2446 *
2447 * Hardware errata 28 for also indicates that writing to a
2448 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2449 * an event in the PBA anyway so we need to rely on the automask
2450 * to hold pending events for us until the interrupt is re-enabled
2451 *
2452 * The itr value is reported in microseconds, and the register
2453 * value is recorded in 2 microsecond units. For this reason we
2454 * only need to shift by the interval shift - 1 instead of the
2455 * full value.
2456 */
2457 itr &= I40E_ITR_MASK;
2458
2459 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2460 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2461 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2462
2463 return val;
2464}
2465
2466/* a small macro to shorten up some long lines */
2467#define INTREG I40E_PFINT_DYN_CTLN
2468
2469/* The act of updating the ITR will cause it to immediately trigger. In order
2470 * to prevent this from throwing off adaptive update statistics we defer the
2471 * update so that it can only happen so often. So after either Tx or Rx are
2472 * updated we make the adaptive scheme wait until either the ITR completely
2473 * expires via the next_update expiration or we have been through at least
2474 * 3 interrupts.
2475 */
2476#define ITR_COUNTDOWN_START 3
2477
2478/**
2479 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2480 * @vsi: the VSI we care about
2481 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2482 *
2483 **/
2484static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2485 struct i40e_q_vector *q_vector)
2486{
2487 struct i40e_hw *hw = &vsi->back->hw;
2488 u32 intval;
2489
2490 /* If we don't have MSIX, then we only need to re-enable icr0 */
2491 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2492 i40e_irq_dynamic_enable_icr0(vsi->back);
2493 return;
2494 }
2495
2496 /* These will do nothing if dynamic updates are not enabled */
2497 i40e_update_itr(q_vector, &q_vector->tx);
2498 i40e_update_itr(q_vector, &q_vector->rx);
2499
2500 /* This block of logic allows us to get away with only updating
2501 * one ITR value with each interrupt. The idea is to perform a
2502 * pseudo-lazy update with the following criteria.
2503 *
2504 * 1. Rx is given higher priority than Tx if both are in same state
2505 * 2. If we must reduce an ITR that is given highest priority.
2506 * 3. We then give priority to increasing ITR based on amount.
2507 */
2508 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2509 /* Rx ITR needs to be reduced, this is highest priority */
2510 intval = i40e_buildreg_itr(I40E_RX_ITR,
2511 q_vector->rx.target_itr);
2512 q_vector->rx.current_itr = q_vector->rx.target_itr;
2513 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2514 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2515 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2516 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2517 /* Tx ITR needs to be reduced, this is second priority
2518 * Tx ITR needs to be increased more than Rx, fourth priority
2519 */
2520 intval = i40e_buildreg_itr(I40E_TX_ITR,
2521 q_vector->tx.target_itr);
2522 q_vector->tx.current_itr = q_vector->tx.target_itr;
2523 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2524 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2525 /* Rx ITR needs to be increased, third priority */
2526 intval = i40e_buildreg_itr(I40E_RX_ITR,
2527 q_vector->rx.target_itr);
2528 q_vector->rx.current_itr = q_vector->rx.target_itr;
2529 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2530 } else {
2531 /* No ITR update, lowest priority */
2532 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2533 if (q_vector->itr_countdown)
2534 q_vector->itr_countdown--;
2535 }
2536
2537 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2538 wr32(hw, INTREG(q_vector->reg_idx), intval);
2539}
2540
2541/**
2542 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2543 * @napi: napi struct with our devices info in it
2544 * @budget: amount of work driver is allowed to do this pass, in packets
2545 *
2546 * This function will clean all queues associated with a q_vector.
2547 *
2548 * Returns the amount of work done
2549 **/
2550int i40e_napi_poll(struct napi_struct *napi, int budget)
2551{
2552 struct i40e_q_vector *q_vector =
2553 container_of(napi, struct i40e_q_vector, napi);
2554 struct i40e_vsi *vsi = q_vector->vsi;
2555 struct i40e_ring *ring;
2556 bool clean_complete = true;
2557 bool arm_wb = false;
2558 int budget_per_ring;
2559 int work_done = 0;
2560
2561 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2562 napi_complete(napi);
2563 return 0;
2564 }
2565
2566 /* Since the actual Tx work is minimal, we can give the Tx a larger
2567 * budget and be more aggressive about cleaning up the Tx descriptors.
2568 */
2569 i40e_for_each_ring(ring, q_vector->tx) {
2570 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2571 clean_complete = false;
2572 continue;
2573 }
2574 arm_wb |= ring->arm_wb;
2575 ring->arm_wb = false;
2576 }
2577
2578 /* Handle case where we are called by netpoll with a budget of 0 */
2579 if (budget <= 0)
2580 goto tx_only;
2581
2582 /* We attempt to distribute budget to each Rx queue fairly, but don't
2583 * allow the budget to go below 1 because that would exit polling early.
2584 */
2585 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2586
2587 i40e_for_each_ring(ring, q_vector->rx) {
2588 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2589
2590 work_done += cleaned;
2591 /* if we clean as many as budgeted, we must not be done */
2592 if (cleaned >= budget_per_ring)
2593 clean_complete = false;
2594 }
2595
2596 /* If work not completed, return budget and polling will return */
2597 if (!clean_complete) {
2598 int cpu_id = smp_processor_id();
2599
2600 /* It is possible that the interrupt affinity has changed but,
2601 * if the cpu is pegged at 100%, polling will never exit while
2602 * traffic continues and the interrupt will be stuck on this
2603 * cpu. We check to make sure affinity is correct before we
2604 * continue to poll, otherwise we must stop polling so the
2605 * interrupt can move to the correct cpu.
2606 */
2607 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2608 /* Tell napi that we are done polling */
2609 napi_complete_done(napi, work_done);
2610
2611 /* Force an interrupt */
2612 i40e_force_wb(vsi, q_vector);
2613
2614 /* Return budget-1 so that polling stops */
2615 return budget - 1;
2616 }
2617tx_only:
2618 if (arm_wb) {
2619 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2620 i40e_enable_wb_on_itr(vsi, q_vector);
2621 }
2622 return budget;
2623 }
2624
2625 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2626 q_vector->arm_wb_state = false;
2627
2628 /* Work is done so exit the polling mode and re-enable the interrupt */
2629 napi_complete_done(napi, work_done);
2630
2631 i40e_update_enable_itr(vsi, q_vector);
2632
2633 return min(work_done, budget - 1);
2634}
2635
2636/**
2637 * i40e_atr - Add a Flow Director ATR filter
2638 * @tx_ring: ring to add programming descriptor to
2639 * @skb: send buffer
2640 * @tx_flags: send tx flags
2641 **/
2642static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2643 u32 tx_flags)
2644{
2645 struct i40e_filter_program_desc *fdir_desc;
2646 struct i40e_pf *pf = tx_ring->vsi->back;
2647 union {
2648 unsigned char *network;
2649 struct iphdr *ipv4;
2650 struct ipv6hdr *ipv6;
2651 } hdr;
2652 struct tcphdr *th;
2653 unsigned int hlen;
2654 u32 flex_ptype, dtype_cmd;
2655 int l4_proto;
2656 u16 i;
2657
2658 /* make sure ATR is enabled */
2659 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2660 return;
2661
2662 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2663 return;
2664
2665 /* if sampling is disabled do nothing */
2666 if (!tx_ring->atr_sample_rate)
2667 return;
2668
2669 /* Currently only IPv4/IPv6 with TCP is supported */
2670 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2671 return;
2672
2673 /* snag network header to get L4 type and address */
2674 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2675 skb_inner_network_header(skb) : skb_network_header(skb);
2676
2677 /* Note: tx_flags gets modified to reflect inner protocols in
2678 * tx_enable_csum function if encap is enabled.
2679 */
2680 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2681 /* access ihl as u8 to avoid unaligned access on ia64 */
2682 hlen = (hdr.network[0] & 0x0F) << 2;
2683 l4_proto = hdr.ipv4->protocol;
2684 } else {
2685 /* find the start of the innermost ipv6 header */
2686 unsigned int inner_hlen = hdr.network - skb->data;
2687 unsigned int h_offset = inner_hlen;
2688
2689 /* this function updates h_offset to the end of the header */
2690 l4_proto =
2691 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2692 /* hlen will contain our best estimate of the tcp header */
2693 hlen = h_offset - inner_hlen;
2694 }
2695
2696 if (l4_proto != IPPROTO_TCP)
2697 return;
2698
2699 th = (struct tcphdr *)(hdr.network + hlen);
2700
2701 /* Due to lack of space, no more new filters can be programmed */
2702 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2703 return;
2704 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2705 /* HW ATR eviction will take care of removing filters on FIN
2706 * and RST packets.
2707 */
2708 if (th->fin || th->rst)
2709 return;
2710 }
2711
2712 tx_ring->atr_count++;
2713
2714 /* sample on all syn/fin/rst packets or once every atr sample rate */
2715 if (!th->fin &&
2716 !th->syn &&
2717 !th->rst &&
2718 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2719 return;
2720
2721 tx_ring->atr_count = 0;
2722
2723 /* grab the next descriptor */
2724 i = tx_ring->next_to_use;
2725 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2726
2727 i++;
2728 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2729
2730 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2731 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2732 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2733 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2734 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2735 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2736 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2737
2738 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2739
2740 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2741
2742 dtype_cmd |= (th->fin || th->rst) ?
2743 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2744 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2745 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2746 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2747
2748 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2749 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2750
2751 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2752 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2753
2754 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2755 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2756 dtype_cmd |=
2757 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2758 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2759 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2760 else
2761 dtype_cmd |=
2762 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2763 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2764 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2765
2766 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2767 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2768
2769 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2770 fdir_desc->rsvd = cpu_to_le32(0);
2771 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2772 fdir_desc->fd_id = cpu_to_le32(0);
2773}
2774
2775/**
2776 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2777 * @skb: send buffer
2778 * @tx_ring: ring to send buffer on
2779 * @flags: the tx flags to be set
2780 *
2781 * Checks the skb and set up correspondingly several generic transmit flags
2782 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2783 *
2784 * Returns error code indicate the frame should be dropped upon error and the
2785 * otherwise returns 0 to indicate the flags has been set properly.
2786 **/
2787static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2788 struct i40e_ring *tx_ring,
2789 u32 *flags)
2790{
2791 __be16 protocol = skb->protocol;
2792 u32 tx_flags = 0;
2793
2794 if (protocol == htons(ETH_P_8021Q) &&
2795 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2796 /* When HW VLAN acceleration is turned off by the user the
2797 * stack sets the protocol to 8021q so that the driver
2798 * can take any steps required to support the SW only
2799 * VLAN handling. In our case the driver doesn't need
2800 * to take any further steps so just set the protocol
2801 * to the encapsulated ethertype.
2802 */
2803 skb->protocol = vlan_get_protocol(skb);
2804 goto out;
2805 }
2806
2807 /* if we have a HW VLAN tag being added, default to the HW one */
2808 if (skb_vlan_tag_present(skb)) {
2809 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2810 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2811 /* else if it is a SW VLAN, check the next protocol and store the tag */
2812 } else if (protocol == htons(ETH_P_8021Q)) {
2813 struct vlan_hdr *vhdr, _vhdr;
2814
2815 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2816 if (!vhdr)
2817 return -EINVAL;
2818
2819 protocol = vhdr->h_vlan_encapsulated_proto;
2820 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2821 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2822 }
2823
2824 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2825 goto out;
2826
2827 /* Insert 802.1p priority into VLAN header */
2828 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2829 (skb->priority != TC_PRIO_CONTROL)) {
2830 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2831 tx_flags |= (skb->priority & 0x7) <<
2832 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2833 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2834 struct vlan_ethhdr *vhdr;
2835 int rc;
2836
2837 rc = skb_cow_head(skb, 0);
2838 if (rc < 0)
2839 return rc;
2840 vhdr = (struct vlan_ethhdr *)skb->data;
2841 vhdr->h_vlan_TCI = htons(tx_flags >>
2842 I40E_TX_FLAGS_VLAN_SHIFT);
2843 } else {
2844 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2845 }
2846 }
2847
2848out:
2849 *flags = tx_flags;
2850 return 0;
2851}
2852
2853/**
2854 * i40e_tso - set up the tso context descriptor
2855 * @first: pointer to first Tx buffer for xmit
2856 * @hdr_len: ptr to the size of the packet header
2857 * @cd_type_cmd_tso_mss: Quad Word 1
2858 *
2859 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2860 **/
2861static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2862 u64 *cd_type_cmd_tso_mss)
2863{
2864 struct sk_buff *skb = first->skb;
2865 u64 cd_cmd, cd_tso_len, cd_mss;
2866 union {
2867 struct iphdr *v4;
2868 struct ipv6hdr *v6;
2869 unsigned char *hdr;
2870 } ip;
2871 union {
2872 struct tcphdr *tcp;
2873 struct udphdr *udp;
2874 unsigned char *hdr;
2875 } l4;
2876 u32 paylen, l4_offset;
2877 u16 gso_segs, gso_size;
2878 int err;
2879
2880 if (skb->ip_summed != CHECKSUM_PARTIAL)
2881 return 0;
2882
2883 if (!skb_is_gso(skb))
2884 return 0;
2885
2886 err = skb_cow_head(skb, 0);
2887 if (err < 0)
2888 return err;
2889
2890 ip.hdr = skb_network_header(skb);
2891 l4.hdr = skb_transport_header(skb);
2892
2893 /* initialize outer IP header fields */
2894 if (ip.v4->version == 4) {
2895 ip.v4->tot_len = 0;
2896 ip.v4->check = 0;
2897 } else {
2898 ip.v6->payload_len = 0;
2899 }
2900
2901 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2902 SKB_GSO_GRE_CSUM |
2903 SKB_GSO_IPXIP4 |
2904 SKB_GSO_IPXIP6 |
2905 SKB_GSO_UDP_TUNNEL |
2906 SKB_GSO_UDP_TUNNEL_CSUM)) {
2907 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2908 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2909 l4.udp->len = 0;
2910
2911 /* determine offset of outer transport header */
2912 l4_offset = l4.hdr - skb->data;
2913
2914 /* remove payload length from outer checksum */
2915 paylen = skb->len - l4_offset;
2916 csum_replace_by_diff(&l4.udp->check,
2917 (__force __wsum)htonl(paylen));
2918 }
2919
2920 /* reset pointers to inner headers */
2921 ip.hdr = skb_inner_network_header(skb);
2922 l4.hdr = skb_inner_transport_header(skb);
2923
2924 /* initialize inner IP header fields */
2925 if (ip.v4->version == 4) {
2926 ip.v4->tot_len = 0;
2927 ip.v4->check = 0;
2928 } else {
2929 ip.v6->payload_len = 0;
2930 }
2931 }
2932
2933 /* determine offset of inner transport header */
2934 l4_offset = l4.hdr - skb->data;
2935
2936 /* remove payload length from inner checksum */
2937 paylen = skb->len - l4_offset;
2938 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2939
2940 /* compute length of segmentation header */
2941 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2942
2943 /* pull values out of skb_shinfo */
2944 gso_size = skb_shinfo(skb)->gso_size;
2945 gso_segs = skb_shinfo(skb)->gso_segs;
2946
2947 /* update GSO size and bytecount with header size */
2948 first->gso_segs = gso_segs;
2949 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2950
2951 /* find the field values */
2952 cd_cmd = I40E_TX_CTX_DESC_TSO;
2953 cd_tso_len = skb->len - *hdr_len;
2954 cd_mss = gso_size;
2955 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2956 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2957 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2958 return 1;
2959}
2960
2961/**
2962 * i40e_tsyn - set up the tsyn context descriptor
2963 * @tx_ring: ptr to the ring to send
2964 * @skb: ptr to the skb we're sending
2965 * @tx_flags: the collected send information
2966 * @cd_type_cmd_tso_mss: Quad Word 1
2967 *
2968 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2969 **/
2970static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2971 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2972{
2973 struct i40e_pf *pf;
2974
2975 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2976 return 0;
2977
2978 /* Tx timestamps cannot be sampled when doing TSO */
2979 if (tx_flags & I40E_TX_FLAGS_TSO)
2980 return 0;
2981
2982 /* only timestamp the outbound packet if the user has requested it and
2983 * we are not already transmitting a packet to be timestamped
2984 */
2985 pf = i40e_netdev_to_pf(tx_ring->netdev);
2986 if (!(pf->flags & I40E_FLAG_PTP))
2987 return 0;
2988
2989 if (pf->ptp_tx &&
2990 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
2991 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2992 pf->ptp_tx_start = jiffies;
2993 pf->ptp_tx_skb = skb_get(skb);
2994 } else {
2995 pf->tx_hwtstamp_skipped++;
2996 return 0;
2997 }
2998
2999 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3000 I40E_TXD_CTX_QW1_CMD_SHIFT;
3001
3002 return 1;
3003}
3004
3005/**
3006 * i40e_tx_enable_csum - Enable Tx checksum offloads
3007 * @skb: send buffer
3008 * @tx_flags: pointer to Tx flags currently set
3009 * @td_cmd: Tx descriptor command bits to set
3010 * @td_offset: Tx descriptor header offsets to set
3011 * @tx_ring: Tx descriptor ring
3012 * @cd_tunneling: ptr to context desc bits
3013 **/
3014static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3015 u32 *td_cmd, u32 *td_offset,
3016 struct i40e_ring *tx_ring,
3017 u32 *cd_tunneling)
3018{
3019 union {
3020 struct iphdr *v4;
3021 struct ipv6hdr *v6;
3022 unsigned char *hdr;
3023 } ip;
3024 union {
3025 struct tcphdr *tcp;
3026 struct udphdr *udp;
3027 unsigned char *hdr;
3028 } l4;
3029 unsigned char *exthdr;
3030 u32 offset, cmd = 0;
3031 __be16 frag_off;
3032 u8 l4_proto = 0;
3033
3034 if (skb->ip_summed != CHECKSUM_PARTIAL)
3035 return 0;
3036
3037 ip.hdr = skb_network_header(skb);
3038 l4.hdr = skb_transport_header(skb);
3039
3040 /* compute outer L2 header size */
3041 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3042
3043 if (skb->encapsulation) {
3044 u32 tunnel = 0;
3045 /* define outer network header type */
3046 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3047 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3048 I40E_TX_CTX_EXT_IP_IPV4 :
3049 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3050
3051 l4_proto = ip.v4->protocol;
3052 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3053 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3054
3055 exthdr = ip.hdr + sizeof(*ip.v6);
3056 l4_proto = ip.v6->nexthdr;
3057 if (l4.hdr != exthdr)
3058 ipv6_skip_exthdr(skb, exthdr - skb->data,
3059 &l4_proto, &frag_off);
3060 }
3061
3062 /* define outer transport */
3063 switch (l4_proto) {
3064 case IPPROTO_UDP:
3065 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3066 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3067 break;
3068 case IPPROTO_GRE:
3069 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3070 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3071 break;
3072 case IPPROTO_IPIP:
3073 case IPPROTO_IPV6:
3074 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3075 l4.hdr = skb_inner_network_header(skb);
3076 break;
3077 default:
3078 if (*tx_flags & I40E_TX_FLAGS_TSO)
3079 return -1;
3080
3081 skb_checksum_help(skb);
3082 return 0;
3083 }
3084
3085 /* compute outer L3 header size */
3086 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3087 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3088
3089 /* switch IP header pointer from outer to inner header */
3090 ip.hdr = skb_inner_network_header(skb);
3091
3092 /* compute tunnel header size */
3093 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3094 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3095
3096 /* indicate if we need to offload outer UDP header */
3097 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3098 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3099 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3100 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3101
3102 /* record tunnel offload values */
3103 *cd_tunneling |= tunnel;
3104
3105 /* switch L4 header pointer from outer to inner */
3106 l4.hdr = skb_inner_transport_header(skb);
3107 l4_proto = 0;
3108
3109 /* reset type as we transition from outer to inner headers */
3110 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3111 if (ip.v4->version == 4)
3112 *tx_flags |= I40E_TX_FLAGS_IPV4;
3113 if (ip.v6->version == 6)
3114 *tx_flags |= I40E_TX_FLAGS_IPV6;
3115 }
3116
3117 /* Enable IP checksum offloads */
3118 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3119 l4_proto = ip.v4->protocol;
3120 /* the stack computes the IP header already, the only time we
3121 * need the hardware to recompute it is in the case of TSO.
3122 */
3123 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3124 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3125 I40E_TX_DESC_CMD_IIPT_IPV4;
3126 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3127 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3128
3129 exthdr = ip.hdr + sizeof(*ip.v6);
3130 l4_proto = ip.v6->nexthdr;
3131 if (l4.hdr != exthdr)
3132 ipv6_skip_exthdr(skb, exthdr - skb->data,
3133 &l4_proto, &frag_off);
3134 }
3135
3136 /* compute inner L3 header size */
3137 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3138
3139 /* Enable L4 checksum offloads */
3140 switch (l4_proto) {
3141 case IPPROTO_TCP:
3142 /* enable checksum offloads */
3143 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3144 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3145 break;
3146 case IPPROTO_SCTP:
3147 /* enable SCTP checksum offload */
3148 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3149 offset |= (sizeof(struct sctphdr) >> 2) <<
3150 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3151 break;
3152 case IPPROTO_UDP:
3153 /* enable UDP checksum offload */
3154 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3155 offset |= (sizeof(struct udphdr) >> 2) <<
3156 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3157 break;
3158 default:
3159 if (*tx_flags & I40E_TX_FLAGS_TSO)
3160 return -1;
3161 skb_checksum_help(skb);
3162 return 0;
3163 }
3164
3165 *td_cmd |= cmd;
3166 *td_offset |= offset;
3167
3168 return 1;
3169}
3170
3171/**
3172 * i40e_create_tx_ctx Build the Tx context descriptor
3173 * @tx_ring: ring to create the descriptor on
3174 * @cd_type_cmd_tso_mss: Quad Word 1
3175 * @cd_tunneling: Quad Word 0 - bits 0-31
3176 * @cd_l2tag2: Quad Word 0 - bits 32-63
3177 **/
3178static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3179 const u64 cd_type_cmd_tso_mss,
3180 const u32 cd_tunneling, const u32 cd_l2tag2)
3181{
3182 struct i40e_tx_context_desc *context_desc;
3183 int i = tx_ring->next_to_use;
3184
3185 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3186 !cd_tunneling && !cd_l2tag2)
3187 return;
3188
3189 /* grab the next descriptor */
3190 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3191
3192 i++;
3193 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3194
3195 /* cpu_to_le32 and assign to struct fields */
3196 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3197 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3198 context_desc->rsvd = cpu_to_le16(0);
3199 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3200}
3201
3202/**
3203 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3204 * @tx_ring: the ring to be checked
3205 * @size: the size buffer we want to assure is available
3206 *
3207 * Returns -EBUSY if a stop is needed, else 0
3208 **/
3209int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3210{
3211 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3212 /* Memory barrier before checking head and tail */
3213 smp_mb();
3214
3215 /* Check again in a case another CPU has just made room available. */
3216 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3217 return -EBUSY;
3218
3219 /* A reprieve! - use start_queue because it doesn't call schedule */
3220 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3221 ++tx_ring->tx_stats.restart_queue;
3222 return 0;
3223}
3224
3225/**
3226 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3227 * @skb: send buffer
3228 *
3229 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3230 * and so we need to figure out the cases where we need to linearize the skb.
3231 *
3232 * For TSO we need to count the TSO header and segment payload separately.
3233 * As such we need to check cases where we have 7 fragments or more as we
3234 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3235 * the segment payload in the first descriptor, and another 7 for the
3236 * fragments.
3237 **/
3238bool __i40e_chk_linearize(struct sk_buff *skb)
3239{
3240 const struct skb_frag_struct *frag, *stale;
3241 int nr_frags, sum;
3242
3243 /* no need to check if number of frags is less than 7 */
3244 nr_frags = skb_shinfo(skb)->nr_frags;
3245 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3246 return false;
3247
3248 /* We need to walk through the list and validate that each group
3249 * of 6 fragments totals at least gso_size.
3250 */
3251 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3252 frag = &skb_shinfo(skb)->frags[0];
3253
3254 /* Initialize size to the negative value of gso_size minus 1. We
3255 * use this as the worst case scenerio in which the frag ahead
3256 * of us only provides one byte which is why we are limited to 6
3257 * descriptors for a single transmit as the header and previous
3258 * fragment are already consuming 2 descriptors.
3259 */
3260 sum = 1 - skb_shinfo(skb)->gso_size;
3261
3262 /* Add size of frags 0 through 4 to create our initial sum */
3263 sum += skb_frag_size(frag++);
3264 sum += skb_frag_size(frag++);
3265 sum += skb_frag_size(frag++);
3266 sum += skb_frag_size(frag++);
3267 sum += skb_frag_size(frag++);
3268
3269 /* Walk through fragments adding latest fragment, testing it, and
3270 * then removing stale fragments from the sum.
3271 */
3272 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3273 int stale_size = skb_frag_size(stale);
3274
3275 sum += skb_frag_size(frag++);
3276
3277 /* The stale fragment may present us with a smaller
3278 * descriptor than the actual fragment size. To account
3279 * for that we need to remove all the data on the front and
3280 * figure out what the remainder would be in the last
3281 * descriptor associated with the fragment.
3282 */
3283 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3284 int align_pad = -(stale->page_offset) &
3285 (I40E_MAX_READ_REQ_SIZE - 1);
3286
3287 sum -= align_pad;
3288 stale_size -= align_pad;
3289
3290 do {
3291 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3292 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3293 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3294 }
3295
3296 /* if sum is negative we failed to make sufficient progress */
3297 if (sum < 0)
3298 return true;
3299
3300 if (!nr_frags--)
3301 break;
3302
3303 sum -= stale_size;
3304 }
3305
3306 return false;
3307}
3308
3309/**
3310 * i40e_tx_map - Build the Tx descriptor
3311 * @tx_ring: ring to send buffer on
3312 * @skb: send buffer
3313 * @first: first buffer info buffer to use
3314 * @tx_flags: collected send information
3315 * @hdr_len: size of the packet header
3316 * @td_cmd: the command field in the descriptor
3317 * @td_offset: offset for checksum or crc
3318 *
3319 * Returns 0 on success, -1 on failure to DMA
3320 **/
3321static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3322 struct i40e_tx_buffer *first, u32 tx_flags,
3323 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3324{
3325 unsigned int data_len = skb->data_len;
3326 unsigned int size = skb_headlen(skb);
3327 struct skb_frag_struct *frag;
3328 struct i40e_tx_buffer *tx_bi;
3329 struct i40e_tx_desc *tx_desc;
3330 u16 i = tx_ring->next_to_use;
3331 u32 td_tag = 0;
3332 dma_addr_t dma;
3333 u16 desc_count = 1;
3334
3335 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3336 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3337 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3338 I40E_TX_FLAGS_VLAN_SHIFT;
3339 }
3340
3341 first->tx_flags = tx_flags;
3342
3343 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3344
3345 tx_desc = I40E_TX_DESC(tx_ring, i);
3346 tx_bi = first;
3347
3348 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3349 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3350
3351 if (dma_mapping_error(tx_ring->dev, dma))
3352 goto dma_error;
3353
3354 /* record length, and DMA address */
3355 dma_unmap_len_set(tx_bi, len, size);
3356 dma_unmap_addr_set(tx_bi, dma, dma);
3357
3358 /* align size to end of page */
3359 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3360 tx_desc->buffer_addr = cpu_to_le64(dma);
3361
3362 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3363 tx_desc->cmd_type_offset_bsz =
3364 build_ctob(td_cmd, td_offset,
3365 max_data, td_tag);
3366
3367 tx_desc++;
3368 i++;
3369 desc_count++;
3370
3371 if (i == tx_ring->count) {
3372 tx_desc = I40E_TX_DESC(tx_ring, 0);
3373 i = 0;
3374 }
3375
3376 dma += max_data;
3377 size -= max_data;
3378
3379 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3380 tx_desc->buffer_addr = cpu_to_le64(dma);
3381 }
3382
3383 if (likely(!data_len))
3384 break;
3385
3386 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3387 size, td_tag);
3388
3389 tx_desc++;
3390 i++;
3391 desc_count++;
3392
3393 if (i == tx_ring->count) {
3394 tx_desc = I40E_TX_DESC(tx_ring, 0);
3395 i = 0;
3396 }
3397
3398 size = skb_frag_size(frag);
3399 data_len -= size;
3400
3401 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3402 DMA_TO_DEVICE);
3403
3404 tx_bi = &tx_ring->tx_bi[i];
3405 }
3406
3407 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3408
3409 i++;
3410 if (i == tx_ring->count)
3411 i = 0;
3412
3413 tx_ring->next_to_use = i;
3414
3415 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3416
3417 /* write last descriptor with EOP bit */
3418 td_cmd |= I40E_TX_DESC_CMD_EOP;
3419
3420 /* We OR these values together to check both against 4 (WB_STRIDE)
3421 * below. This is safe since we don't re-use desc_count afterwards.
3422 */
3423 desc_count |= ++tx_ring->packet_stride;
3424
3425 if (desc_count >= WB_STRIDE) {
3426 /* write last descriptor with RS bit set */
3427 td_cmd |= I40E_TX_DESC_CMD_RS;
3428 tx_ring->packet_stride = 0;
3429 }
3430
3431 tx_desc->cmd_type_offset_bsz =
3432 build_ctob(td_cmd, td_offset, size, td_tag);
3433
3434 /* Force memory writes to complete before letting h/w know there
3435 * are new descriptors to fetch.
3436 *
3437 * We also use this memory barrier to make certain all of the
3438 * status bits have been updated before next_to_watch is written.
3439 */
3440 wmb();
3441
3442 /* set next_to_watch value indicating a packet is present */
3443 first->next_to_watch = tx_desc;
3444
3445 /* notify HW of packet */
3446 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3447 writel(i, tx_ring->tail);
3448
3449 /* we need this if more than one processor can write to our tail
3450 * at a time, it synchronizes IO on IA64/Altix systems
3451 */
3452 mmiowb();
3453 }
3454
3455 return 0;
3456
3457dma_error:
3458 dev_info(tx_ring->dev, "TX DMA map failed\n");
3459
3460 /* clear dma mappings for failed tx_bi map */
3461 for (;;) {
3462 tx_bi = &tx_ring->tx_bi[i];
3463 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3464 if (tx_bi == first)
3465 break;
3466 if (i == 0)
3467 i = tx_ring->count;
3468 i--;
3469 }
3470
3471 tx_ring->next_to_use = i;
3472
3473 return -1;
3474}
3475
3476/**
3477 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3478 * @xdp: data to transmit
3479 * @xdp_ring: XDP Tx ring
3480 **/
3481static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3482 struct i40e_ring *xdp_ring)
3483{
3484 u32 size = xdp->data_end - xdp->data;
3485 u16 i = xdp_ring->next_to_use;
3486 struct i40e_tx_buffer *tx_bi;
3487 struct i40e_tx_desc *tx_desc;
3488 dma_addr_t dma;
3489
3490 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3491 xdp_ring->tx_stats.tx_busy++;
3492 return I40E_XDP_CONSUMED;
3493 }
3494
3495 dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3496 if (dma_mapping_error(xdp_ring->dev, dma))
3497 return I40E_XDP_CONSUMED;
3498
3499 tx_bi = &xdp_ring->tx_bi[i];
3500 tx_bi->bytecount = size;
3501 tx_bi->gso_segs = 1;
3502 tx_bi->raw_buf = xdp->data;
3503
3504 /* record length, and DMA address */
3505 dma_unmap_len_set(tx_bi, len, size);
3506 dma_unmap_addr_set(tx_bi, dma, dma);
3507
3508 tx_desc = I40E_TX_DESC(xdp_ring, i);
3509 tx_desc->buffer_addr = cpu_to_le64(dma);
3510 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3511 | I40E_TXD_CMD,
3512 0, size, 0);
3513
3514 /* Make certain all of the status bits have been updated
3515 * before next_to_watch is written.
3516 */
3517 smp_wmb();
3518
3519 i++;
3520 if (i == xdp_ring->count)
3521 i = 0;
3522
3523 tx_bi->next_to_watch = tx_desc;
3524 xdp_ring->next_to_use = i;
3525
3526 return I40E_XDP_TX;
3527}
3528
3529/**
3530 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3531 * @skb: send buffer
3532 * @tx_ring: ring to send buffer on
3533 *
3534 * Returns NETDEV_TX_OK if sent, else an error code
3535 **/
3536static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3537 struct i40e_ring *tx_ring)
3538{
3539 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3540 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3541 struct i40e_tx_buffer *first;
3542 u32 td_offset = 0;
3543 u32 tx_flags = 0;
3544 __be16 protocol;
3545 u32 td_cmd = 0;
3546 u8 hdr_len = 0;
3547 int tso, count;
3548 int tsyn;
3549
3550 /* prefetch the data, we'll need it later */
3551 prefetch(skb->data);
3552
3553 i40e_trace(xmit_frame_ring, skb, tx_ring);
3554
3555 count = i40e_xmit_descriptor_count(skb);
3556 if (i40e_chk_linearize(skb, count)) {
3557 if (__skb_linearize(skb)) {
3558 dev_kfree_skb_any(skb);
3559 return NETDEV_TX_OK;
3560 }
3561 count = i40e_txd_use_count(skb->len);
3562 tx_ring->tx_stats.tx_linearize++;
3563 }
3564
3565 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3566 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3567 * + 4 desc gap to avoid the cache line where head is,
3568 * + 1 desc for context descriptor,
3569 * otherwise try next time
3570 */
3571 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3572 tx_ring->tx_stats.tx_busy++;
3573 return NETDEV_TX_BUSY;
3574 }
3575
3576 /* record the location of the first descriptor for this packet */
3577 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3578 first->skb = skb;
3579 first->bytecount = skb->len;
3580 first->gso_segs = 1;
3581
3582 /* prepare the xmit flags */
3583 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3584 goto out_drop;
3585
3586 /* obtain protocol of skb */
3587 protocol = vlan_get_protocol(skb);
3588
3589 /* setup IPv4/IPv6 offloads */
3590 if (protocol == htons(ETH_P_IP))
3591 tx_flags |= I40E_TX_FLAGS_IPV4;
3592 else if (protocol == htons(ETH_P_IPV6))
3593 tx_flags |= I40E_TX_FLAGS_IPV6;
3594
3595 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3596
3597 if (tso < 0)
3598 goto out_drop;
3599 else if (tso)
3600 tx_flags |= I40E_TX_FLAGS_TSO;
3601
3602 /* Always offload the checksum, since it's in the data descriptor */
3603 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3604 tx_ring, &cd_tunneling);
3605 if (tso < 0)
3606 goto out_drop;
3607
3608 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3609
3610 if (tsyn)
3611 tx_flags |= I40E_TX_FLAGS_TSYN;
3612
3613 skb_tx_timestamp(skb);
3614
3615 /* always enable CRC insertion offload */
3616 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3617
3618 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3619 cd_tunneling, cd_l2tag2);
3620
3621 /* Add Flow Director ATR if it's enabled.
3622 *
3623 * NOTE: this must always be directly before the data descriptor.
3624 */
3625 i40e_atr(tx_ring, skb, tx_flags);
3626
3627 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3628 td_cmd, td_offset))
3629 goto cleanup_tx_tstamp;
3630
3631 return NETDEV_TX_OK;
3632
3633out_drop:
3634 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3635 dev_kfree_skb_any(first->skb);
3636 first->skb = NULL;
3637cleanup_tx_tstamp:
3638 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3639 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3640
3641 dev_kfree_skb_any(pf->ptp_tx_skb);
3642 pf->ptp_tx_skb = NULL;
3643 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3644 }
3645
3646 return NETDEV_TX_OK;
3647}
3648
3649/**
3650 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3651 * @skb: send buffer
3652 * @netdev: network interface device structure
3653 *
3654 * Returns NETDEV_TX_OK if sent, else an error code
3655 **/
3656netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3657{
3658 struct i40e_netdev_priv *np = netdev_priv(netdev);
3659 struct i40e_vsi *vsi = np->vsi;
3660 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3661
3662 /* hardware can't handle really short frames, hardware padding works
3663 * beyond this point
3664 */
3665 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3666 return NETDEV_TX_OK;
3667
3668 return i40e_xmit_frame_ring(skb, tx_ring);
3669}
3670
3671/**
3672 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3673 * @dev: netdev
3674 * @xdp: XDP buffer
3675 *
3676 * Returns Zero if sent, else an error code
3677 **/
3678int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
3679{
3680 struct i40e_netdev_priv *np = netdev_priv(dev);
3681 unsigned int queue_index = smp_processor_id();
3682 struct i40e_vsi *vsi = np->vsi;
3683 int err;
3684
3685 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3686 return -ENETDOWN;
3687
3688 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3689 return -ENXIO;
3690
3691 err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]);
3692 if (err != I40E_XDP_TX)
3693 return -ENOSPC;
3694
3695 return 0;
3696}
3697
3698/**
3699 * i40e_xdp_flush - Implements ndo_xdp_flush
3700 * @dev: netdev
3701 **/
3702void i40e_xdp_flush(struct net_device *dev)
3703{
3704 struct i40e_netdev_priv *np = netdev_priv(dev);
3705 unsigned int queue_index = smp_processor_id();
3706 struct i40e_vsi *vsi = np->vsi;
3707
3708 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3709 return;
3710
3711 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3712 return;
3713
3714 i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
3715}